repo_name
stringlengths 5
100
| path
stringlengths 4
375
| copies
stringclasses 991
values | size
stringlengths 4
7
| content
stringlengths 666
1M
| license
stringclasses 15
values |
---|---|---|---|---|---|
spiffykahuna/TIJ4-code_idea
|
OutputVerifier.py
|
10
|
7994
|
#!/usr/bin/python
"""
To do:
3) command-line argument (to test a single file)
- What about exceptions and aborts?
-If ...is embedded anywhere in a line, that portion becomes a .*? regexp
---------------
Find files with
/* Output:
Run the programs and capture the output, compare with anticipated output.
/* Output: (80% match)
For files that vary from run to run
Complete punt:
/* Output: (Sample)
(More elaborate design in SimpleTest1.py)
"""
import os, re, glob, sys, string, codecs
from difflib import SequenceMatcher
argTag = '// {Args: '
targetOutput = re.compile("/* Output:(.*?)\n(.*)\n\*///:~", re.DOTALL)
class SimpleTest:
def __init__(self, fileName, text, referencePath, reportFile):
self.fileName = fileName
self.normalOutput = self.fileName + "-output.txt"
self.errorOutput = self.fileName + "-erroroutput.txt"
self.text = text
self.referencePath = referencePath
self.reportFile = reportFile
self.package = ""
self.args = ""
self.runTest = True
self.insertOutput = True
self.EmbeddedComparisonOutput = False
self.comparisonFile = None
self.lines = self.text.split("\n")
for line in self.lines:
if "{RunByHand}" in line or \
line.startswith("import javax.swing.*;") or \
"c12:ZipCompress.java" in line or \
"/* (Execute to see output) *///:~" in line:
self.runTest = False
if line.startswith("package"):
self.package = line.split()[1][:-1] + "."
if line.startswith(argTag):
self.args = line[len(argTag):].strip()
assert self.args.rfind('}') != -1, "%s, %s" % (self.args, referencePath)
self.args = self.args[:self.args.rfind('}')]
if line.startswith("// {main:"):
self.fileName = line.split()[-1][:-1]
if line.startswith("// {Exec:"):
self.command = line.split(':', 1)[1].strip()[:-1]
if "/* Output:" in line:
self.EmbeddedComparisonOutput = True
if line.startswith("} /*"):
break # Out of for loop
#if "} ///:~" in line: # Extra space
# self.insertOutput = False
def run(self):
if not self.runTest: return
if not hasattr(self, "command"):
self.command = "java " + self.package + self.fileName + " " + self.args
# Capture standard output into a local file.
self.command = self.command + " > " + self.normalOutput
print self.command
os.system(self.command)
if os.stat(self.normalOutput).st_size:
return self.compareResults(self.normalOutput)
# Capture error output into a local file.
# The '2>' requires cygwin under Windows, or *nix:
self.command = self.command + " 2> " + self.errorOutput
print self.command
os.system(self.command)
return self.compareResults(self.errorOutput)
def compareResults(self, fileName):
# Read output file that was just generated:
results = makePrintable(file(fileName).read())
results = results.replace('\t', ' ')
results = results.strip()
file("Generated.txt",'w').write(results)
# Strip off trailing spaces on each line:
results = "\n".join([line.rstrip() for line in results.split("\n")])
controlSample = self.getControlSample()
ratio = 1.0
if controlSample:
controlOutput = controlSample.group(2).rstrip()
if "\n..." in controlOutput:
controlLines = controlOutput.split("\n")[:-1]
resultLines = results.split("\n")[:len(controlLines)]
controlOutput = "\n".join(controlLines)
results = "\n".join(resultLines)
file("controlOutput.txt",'w').write(controlOutput)
modifier = controlSample.group(1)
if "match" in modifier:
ratio = float(re.findall("\d+", modifier)[0]) / 100
print "Looking for", ratio, "match"
if "Sample" in modifier:
ratio = 0.0
actualRatio = SequenceMatcher(None, controlOutput, results).ratio()
if actualRatio < ratio:
self.reportFile.write("mismatch in " + self.referencePath + "\n")
self.reportFile.write("Actual ratio " + str(actualRatio) + "\n")
self.reportFile.write("expected:\n")
self.reportFile.write(controlOutput + "\n")
self.reportFile.write("----------actual:----------\n")
self.reportFile.write(results + "\n")
file(self.fileName + "-control.txt", 'w').write(controlOutput)
file(self.fileName + "-results.txt", 'w').write(results)
self.reportFile.write("---------------------------\n")
os.system("cmp " + self.fileName + "-control.txt "
+ self.fileName + "-results.txt"
+ " > cmp-out.txt")
self.reportFile.write(file("cmp-out.txt").read())
self.reportFile.write("=" * 40 + "\n")
else:
pass #!!! No control sample, create initial one here
def appendOutput(self):
if self.insertOutput:
# Rewrite the tail of the source file if the result is nonzero
self.lines[-2] = '}'
self.lines[-1] = "/* Output:"
for tline in file(self.fileName + "-output.txt"):
self.lines.append(tline.rstrip())
self.lines.append("*///:~")
self.lines.append("")
file(self.fileName + ".java", 'w').write("\n".join(self.lines))
def getControlSample(self):
"""Finds the control sample, returns an re group
First element is the arguments, second is the actual data"""
if self.EmbeddedComparisonOutput:
self.sourceOutput = targetOutput.search(self.text)
else:
return None
return self.sourceOutput
def makePrintable(s):
for c in s:
if c not in string.printable: return _makePrintable(s)
return s
def _makePrintable(s):
result = ''
for c in s:
if c not in string.printable: result += ' '
else: result += c
return result
class ReportFile:
def __init__(self, filePath):
self.filePath = filePath
self.file = None
def write(self, line):
if not self.file:
self.file = file(self.filePath, 'w')
self.file.write(line)
print line
def close(self):
if self.file:
self.file.close()
if __name__ == "__main__":
if len(sys.argv) > 1:
javaSource = sys.argv[1]
if javaSource.endswith("."): javaSource = javaSource[:-1]
if not javaSource.endswith(".java"): javaSource += ".java"
os.system("javac " + javaSource)
SimpleTest(javaSource.split('.')[0], file(javaSource).read(), javaSource, sys.stdout).run()
sys.exit()
start = os.getcwd()
reportFile = ReportFile(start + os.sep + "OutputErrors.txt")
for root, dirs, files in os.walk('.'):
print root
os.chdir(root)
for f in [name.split('.')[0] for name in files if name.endswith(".java")]:
text = file(f + ".java").read()
# Only perform verification if there is an output tag:
if text.find("/* Output:") != -1:
referencePath = os.path.join(root, f + ".java")
SimpleTest(f, text, referencePath, reportFile).run()
os.chdir(start)
reportFile.close()
if reportFile.file:
print "Errors in OutputErrors.txt"
|
mit
|
pk-sam/crosswalk-test-suite
|
webapi/tct-widgetpolicy-w3c-tests/inst.wgt.py
|
3
|
6691
|
#!/usr/bin/env python
import os
import shutil
import glob
import time
import sys
import subprocess
import string
from optparse import OptionParser, make_option
SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__))
PKG_NAME = os.path.basename(SCRIPT_DIR)
PARAMETERS = None
#XW_ENV = "export DBUS_SESSION_BUS_ADDRESS=unix:path=/run/user/5000/dbus/user_bus_socket"
SRC_DIR = ""
PKG_SRC_DIR = ""
def doCMD(cmd):
# Do not need handle timeout in this short script, let tool do it
print "-->> \"%s\"" % cmd
output = []
cmd_return_code = 1
cmd_proc = subprocess.Popen(
cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, shell=True)
while True:
output_line = cmd_proc.stdout.readline().strip("\r\n")
cmd_return_code = cmd_proc.poll()
if output_line == '' and cmd_return_code != None:
break
sys.stdout.write("%s\n" % output_line)
sys.stdout.flush()
output.append(output_line)
return (cmd_return_code, output)
def updateCMD(cmd=None):
if "pkgcmd" in cmd:
cmd = "su - %s -c '%s;%s'" % (PARAMETERS.user, XW_ENV, cmd)
return cmd
def getUSERID():
if PARAMETERS.mode == "SDB":
cmd = "sdb -s %s shell id -u %s" % (
PARAMETERS.device, PARAMETERS.user)
else:
cmd = "ssh %s \"id -u %s\"" % (
PARAMETERS.device, PARAMETERS.user )
return doCMD(cmd)
def getPKGID(pkg_name=None):
if PARAMETERS.mode == "SDB":
cmd = "sdb -s %s shell %s" % (
PARAMETERS.device, updateCMD('pkgcmd -l'))
else:
cmd = "ssh %s \"%s\"" % (
PARAMETERS.device, updateCMD('pkgcmd -l'))
(return_code, output) = doCMD(cmd)
if return_code != 0:
return None
test_pkg_id = None
for line in output:
if line.find("[" + pkg_name + "]") != -1:
pkgidIndex = line.split().index("pkgid")
test_pkg_id = line.split()[pkgidIndex+1].strip("[]")
break
return test_pkg_id
def doRemoteCMD(cmd=None):
if PARAMETERS.mode == "SDB":
cmd = "sdb -s %s shell %s" % (PARAMETERS.device, updateCMD(cmd))
else:
cmd = "ssh %s \"%s\"" % (PARAMETERS.device, updateCMD(cmd))
return doCMD(cmd)
def doRemoteCopy(src=None, dest=None):
if PARAMETERS.mode == "SDB":
cmd_prefix = "sdb -s %s push" % PARAMETERS.device
cmd = "%s %s %s" % (cmd_prefix, src, dest)
else:
cmd = "scp -r %s %s:/%s" % (src, PARAMETERS.device, dest)
(return_code, output) = doCMD(cmd)
doRemoteCMD("sync")
if return_code != 0:
return True
else:
return False
def uninstPKGs():
action_status = True
for root, dirs, files in os.walk(SCRIPT_DIR):
for file in files:
if file.endswith(".wgt"):
pkg_id = getPKGID(os.path.basename(os.path.splitext(file)[0]))
if not pkg_id:
action_status = False
continue
(return_code, output) = doRemoteCMD(
"pkgcmd -u -t wgt -q -n %s" % pkg_id)
for line in output:
if "Failure" in line:
action_status = False
break
(return_code, output) = doRemoteCMD(
"rm -rf %s" % PKG_SRC_DIR)
if return_code != 0:
action_status = False
return action_status
def instPKGs():
action_status = True
(return_code, output) = doRemoteCMD(
"mkdir -p %s" % PKG_SRC_DIR)
if return_code != 0:
action_status = False
for root, dirs, files in os.walk(SCRIPT_DIR):
for file in files:
if file.endswith("%s.wgt" % PKG_NAME):
if not doRemoteCopy(os.path.join(root, file), "%s/%s" % (SRC_DIR, file)):
action_status = False
(return_code, output) = doRemoteCMD(
"pkgcmd -i -t wgt -q -p %s/%s" % (SRC_DIR, file))
doRemoteCMD("rm -rf %s/%s" % (SRC_DIR, file))
for line in output:
if "Failure" in line:
action_status = False
break
for item in glob.glob("%s/*" % SCRIPT_DIR):
if item.endswith(".wgt"):
continue
elif item.endswith("inst.py"):
continue
else:
item_name = os.path.basename(item)
if not doRemoteCopy(item, "%s/%s" % (PKG_SRC_DIR, item_name)):
action_status = False
return action_status
def main():
try:
usage = "usage: inst.py -i"
opts_parser = OptionParser(usage=usage)
opts_parser.add_option(
"-m", dest="mode", action="store", help="Specify mode")
opts_parser.add_option(
"-s", dest="device", action="store", help="Specify device")
opts_parser.add_option(
"-i", dest="binstpkg", action="store_true", help="Install package")
opts_parser.add_option(
"-u", dest="buninstpkg", action="store_true", help="Uninstall package")
opts_parser.add_option(
"-a", dest="user", action="store", help="User name")
global PARAMETERS
(PARAMETERS, args) = opts_parser.parse_args()
except Exception, e:
print "Got wrong option: %s, exit ..." % e
sys.exit(1)
if not PARAMETERS.user:
PARAMETERS.user = "app"
global SRC_DIR, PKG_SRC_DIR
SRC_DIR = "/home/%s/content" % PARAMETERS.user
PKG_SRC_DIR = "%s/tct/opt/%s" % (SRC_DIR, PKG_NAME)
if not PARAMETERS.mode:
PARAMETERS.mode = "SDB"
if PARAMETERS.mode == "SDB":
if not PARAMETERS.device:
(return_code, output) = doCMD("sdb devices")
for line in output:
if str.find(line, "\tdevice") != -1:
PARAMETERS.device = line.split("\t")[0]
break
else:
PARAMETERS.mode = "SSH"
if not PARAMETERS.device:
print "No device provided"
sys.exit(1)
user_info = getUSERID()
re_code = user_info[0]
if re_code == 0 :
global XW_ENV
userid = user_info[1][0]
XW_ENV = "export DBUS_SESSION_BUS_ADDRESS=unix:path=/run/user/%s/dbus/user_bus_socket"%str(userid)
else:
print "[Error] cmd commands error : %s"%str(user_info[1])
sys.exit(1)
if PARAMETERS.binstpkg and PARAMETERS.buninstpkg:
print "-i and -u are conflict"
sys.exit(1)
if PARAMETERS.buninstpkg:
if not uninstPKGs():
sys.exit(1)
else:
if not instPKGs():
sys.exit(1)
if __name__ == "__main__":
main()
sys.exit(0)
|
bsd-3-clause
|
vicnet/weboob
|
modules/audioaddict/module.py
|
1
|
12892
|
# -*- coding: utf-8 -*-
# Copyright(C) 2013 Pierre Mazière
#
# This file is part of a weboob module.
#
# This weboob module is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This weboob module is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this weboob module. If not, see <http://www.gnu.org/licenses/>.
from __future__ import unicode_literals
from weboob.capabilities.radio import CapRadio, Radio
from weboob.capabilities.audiostream import BaseAudioStream
from weboob.tools.capabilities.streaminfo import StreamInfo
from weboob.capabilities.collection import CapCollection, Collection
from weboob.tools.backend import Module, BackendConfig
from weboob.tools.value import Value
from weboob.browser.browsers import APIBrowser
import time
__all__ = ['AudioAddictModule']
#
# WARNING
#
# AudioAddict playlists do not seem to be appreciated by mplayer
# VLC plays them successfully, therefore I advice to set the media_player
# option to another player in the ~/.config/weboob/radioob config file:
# [ROOT]
# media_player = your_non_mplayer_player
class AudioAddictModule(Module, CapRadio, CapCollection):
NAME = 'audioaddict'
MAINTAINER = u'Pierre Mazière'
EMAIL = '[email protected]'
VERSION = '1.6'
DESCRIPTION = u'Internet radios powered by audioaddict.com services'
LICENSE = 'AGPLv3+'
BROWSER = APIBrowser
# Data extracted from http://tobiass.eu/api-doc.html
NETWORKS = {
'DI': {
'desc': 'Digitally Imported addictive electronic music',
'domain': 'listen.di.fm',
'streams': { # 'android_low': {'rate': 40, 'fmt': 'aac'},
# 'android': {'rate': 64, 'fmt': 'aac'},
# 'android_high': {'rate': 96, 'fmt': 'aac'},
'android_premium_low': {'rate': 40, 'fmt': 'aac'},
'android_premium_medium': {'rate': 64, 'fmt': 'aac'},
'android_premium': {'rate': 128, 'fmt': 'aac'},
'android_premium_high': {'rate': 256, 'fmt': 'aac'},
# 'public1': {'rate': 64, 'fmt': 'aac'},
# 'public2': {'rate': 40, 'fmt': 'aac'},
# 'public3': {'rate': 96, 'fmt': 'mp3'},
'premium_low': {'rate': 40, 'fmt': 'aac'},
'premium_medium': {'rate': 64, 'fmt': 'aac'},
'premium': {'rate': 128, 'fmt': 'aac'},
'premium_high': {'rate': 256, 'fmt': 'mp3'}
}
},
'RadioTunes': {
'desc': 'Radio Tunes',
'domain': 'listen.radiotunes.com',
'streams': { # 'appleapp_low': {'rate': 40, 'fmt': 'aac'},
# 'appleapp': {'rate': 64, 'fmt': 'aac'},
'appleapp_high': {'rate': 96, 'fmt': 'mp3'},
'appleapp_premium_medium': {'rate': 64, 'fmt': 'aac'},
'appleapp_premium': {'rate': 128, 'fmt': 'aac'},
'appleapp_premium_high': {'rate': 256, 'fmt': 'mp3'},
# 'public1': {'rate': 40, 'fmt': 'aac'},
# 'public5': {'rate': 40, 'fmt': 'wma'},
# 'public3': {'rate': 96, 'fmt': 'mp3'},
'premium_low': {'rate': 40, 'fmt': 'aac'},
'premium_medium': {'rate': 64, 'fmt': 'aac'},
'premium': {'rate': 128, 'fmt': 'aac'},
'premium_high': {'rate': 256, 'fmt': 'mp3'}
}
},
'JazzRadio': {
'desc': 'Jazz Radio',
'domain': 'listen.jazzradio.com',
'streams': { # 'appleapp_low': {'rate': 40, 'fmt': 'aac'},
# 'appleapp': {'rate': 64, 'fmt': 'aac'},
'appleapp_premium_medium': {'rate': 64, 'fmt': 'aac'},
'appleapp_premium': {'rate': 128, 'fmt': 'aac'},
'appleapp_premium_high': {'rate': 256, 'fmt': 'mp3'},
# 'public1': {'rate': 40, 'fmt': 'aac'},
# 'public3': {'rate': 64, 'fmt': 'mp3'},
'premium_low': {'rate': 40, 'fmt': 'aac'},
'premium_medium': {'rate': 64, 'fmt': 'aac'},
'premium': {'rate': 128, 'fmt': 'aac'},
'premium_high': {'rate': 256, 'fmt': 'mp3'}
}
},
'RockRadio': {
'desc': 'Rock Radio',
'domain': 'listen.rockradio.com',
'streams': { # 'android_low': {'rate': 40, 'fmt': 'aac'},
# 'android': {'rate': 64, 'fmt': 'aac'},
'android_premium_medium': {'rate': 64, 'fmt': 'aac'},
'android_premium': {'rate': 128, 'fmt': 'aac'},
'android_premium_high': {'rate': 256, 'fmt': 'mp3'},
# 'public1': {'rate': 96, 'fmt': 'mp3'}
}
},
'ClassicalRadio': {
'desc': 'Classical Radio',
'domain': 'listen.classicalradio.com',
'streams': { # 'android_low': {'rate': 40, 'fmt': 'aac'},
# 'android': {'rate': 64, 'fmt': 'aac'},
'android_premium_medium': {'rate': 64, 'fmt': 'aac'},
'android_premium': {'rate': 128, 'fmt': 'aac'},
'android_premium_high': {'rate': 256, 'fmt': 'mp3'},
# 'public1': {'rate': 96, 'fmt': 'mp3'}
}
},
}
CONFIG = BackendConfig(Value('networks',
label='Selected Networks [%s](space separated)' %
' '.join(NETWORKS.keys()), default=''),
Value('quality', label='Radio streaming quality',
choices={'h': 'high', 'l': 'low'},
default='h')
)
def __init__(self, *a, **kw):
super(AudioAddictModule, self).__init__(*a, **kw)
if 'FrescaRadio' in self.config['networks'].get():
raise self.ConfigError('FresacaRadio does not exists anymore')
self.RADIOS = {}
self.HISTORY = {}
def _get_tracks_history(self, network):
self._fetch_radio_list(network)
domain = self.NETWORKS[network]['domain']
url = 'http://api.audioaddict.com/v1/%s/track_history' %\
(domain[domain.find('.') + 1:domain.rfind('.')])
self.HISTORY[network] = self.browser.request(url)
return self.HISTORY
def create_default_browser(self):
return self.create_browser()
def _get_stream_name(self, network, quality):
streamName = 'public3'
for name in self.NETWORKS[network]['streams'].keys():
if name.startswith('public') and \
self.NETWORKS[network]['streams'][name]['rate'] >= 64:
if quality == 'h':
streamName = name
break
else:
if quality == 'l':
streamName = name
break
return streamName
def _fetch_radio_list(self, network=None):
quality = self.config['quality'].get()
for selectedNetwork in self.config['networks'].get().split():
if network is None or network == selectedNetwork:
streamName = self._get_stream_name(selectedNetwork, quality)
if not self.RADIOS:
self.RADIOS = {}
if selectedNetwork not in self.RADIOS:
document = self.browser.request('http://%s/%s' %
(self.NETWORKS[selectedNetwork]['domain'],
streamName))
self.RADIOS[selectedNetwork] = {}
for info in document:
radio = info['key']
self.RADIOS[selectedNetwork][radio] = {}
self.RADIOS[selectedNetwork][radio]['id'] = info['id']
self.RADIOS[selectedNetwork][radio]['name'] = info['name']
self.RADIOS[selectedNetwork][radio]['playlist'] = info['playlist']
return self.RADIOS
def iter_radios_search(self, pattern):
self._fetch_radio_list()
pattern = pattern.lower()
for network in self.config['networks'].get().split():
for radio in self.RADIOS[network]:
radio_dict = self.RADIOS[network][radio]
if pattern in radio_dict['name'].lower():
yield self.get_radio(radio+"."+network)
def iter_resources(self, objs, split_path):
self._fetch_radio_list()
if Radio in objs:
for network in self.config['networks'].get().split():
if split_path == [network]:
for radio in self.RADIOS[network]:
yield self.get_radio(radio+"."+network)
return
for network in self.config['networks'].get().split():
yield Collection([network], self.NETWORKS[network]['desc'])
def get_current(self, network, radio):
channel = {}
if network not in self.HISTORY:
self._get_tracks_history(network)
channel = self.HISTORY[network].get(str(self.RADIOS[network][radio]['id']))
else:
now = time.time()
channel = self.HISTORY[network].get(str(self.RADIOS[network][radio]['id']))
if channel is None:
return 'Unknown', 'Unknown'
if (channel.get('started') + channel.get('duration')) < now:
self._get_tracks_history(network)
channel = self.HISTORY[network].get(str(self.RADIOS[network][radio]['id']))
artist = u'' + (channel.get('artist', '') or 'Unknown')
title = u''+(channel.get('title', '') or 'Unknown')
if artist == 'Unknown':
track = u'' + (channel.get('track', '') or 'Unknown')
if track != 'Unknown':
artist = track[:track.find(' - ')]
return artist, title
def get_radio(self, radio):
if not isinstance(radio, Radio):
radio = Radio(radio)
radioName, network = radio.id.split('.', 1)
self._fetch_radio_list(network)
if radioName not in self.RADIOS[network]:
return None
radio_dict = self.RADIOS[network][radioName]
radio.title = radio_dict['name']
radio.description = radio_dict['name']
artist, title = self.get_current(network, radioName)
current = StreamInfo(0)
current.who = artist
current.what = title
radio.current = current
radio.streams = []
defaultname = self._get_stream_name(network, self.config['quality'].get())
stream = BaseAudioStream(0)
stream.bitrate = self.NETWORKS[network]['streams'][defaultname]['rate']
stream.format = self.NETWORKS[network]['streams'][defaultname]['fmt']
stream.title = u'%s %skbps' % (stream.format, stream.bitrate)
stream.url = 'http://%s/%s/%s.pls' %\
(self.NETWORKS[network]['domain'], defaultname, radioName)
radio.streams.append(stream)
i = 1
for name in self.NETWORKS[network]['streams'].keys():
if name == defaultname:
continue
stream = BaseAudioStream(i)
stream.bitrate = self.NETWORKS[network]['streams'][name]['rate']
stream.format = self.NETWORKS[network]['streams'][name]['fmt']
stream.title = u'%s %skbps' % (stream.format, stream.bitrate)
stream.url = 'http://%s/%s/%s.pls' % \
(self.NETWORKS[network]['domain'], name, radioName)
radio.streams.append(stream)
i = i + 1
return radio
def fill_radio(self, radio, fields):
if 'current' in fields:
radioName, network = radio.id.split('.', 1)
radio.current = StreamInfo(0)
radio.current.who, radio.current.what = self.get_current(network, radioName)
return radio
OBJECTS = {Radio: fill_radio}
|
lgpl-3.0
|
liubq919/locust
|
locust/runners.py
|
30
|
16947
|
# coding=UTF-8
import socket
import traceback
import warnings
import random
import logging
from time import time
from hashlib import md5
import gevent
from gevent import GreenletExit
from gevent.pool import Group
import events
from stats import global_stats
from rpc import rpc, Message
logger = logging.getLogger(__name__)
# global locust runner singleton
locust_runner = None
STATE_INIT, STATE_HATCHING, STATE_RUNNING, STATE_STOPPED = ["ready", "hatching", "running", "stopped"]
SLAVE_REPORT_INTERVAL = 3.0
class LocustRunner(object):
def __init__(self, locust_classes, options):
self.locust_classes = locust_classes
self.hatch_rate = options.hatch_rate
self.num_clients = options.num_clients
self.num_requests = options.num_requests
self.host = options.host
self.locusts = Group()
self.state = STATE_INIT
self.hatching_greenlet = None
self.exceptions = {}
self.stats = global_stats
# register listener that resets stats when hatching is complete
def on_hatch_complete(user_count):
self.state = STATE_RUNNING
logger.info("Resetting stats\n")
self.stats.reset_all()
events.hatch_complete += on_hatch_complete
@property
def request_stats(self):
return self.stats.entries
@property
def errors(self):
return self.stats.errors
@property
def user_count(self):
return len(self.locusts)
def weight_locusts(self, amount, stop_timeout = None):
"""
Distributes the amount of locusts for each WebLocust-class according to it's weight
returns a list "bucket" with the weighted locusts
"""
bucket = []
weight_sum = sum((locust.weight for locust in self.locust_classes if locust.task_set))
for locust in self.locust_classes:
if not locust.task_set:
warnings.warn("Notice: Found Locust class (%s) got no task_set. Skipping..." % locust.__name__)
continue
if self.host is not None:
locust.host = self.host
if stop_timeout is not None:
locust.stop_timeout = stop_timeout
# create locusts depending on weight
percent = locust.weight / float(weight_sum)
num_locusts = int(round(amount * percent))
bucket.extend([locust for x in xrange(0, num_locusts)])
return bucket
def spawn_locusts(self, spawn_count=None, stop_timeout=None, wait=False):
if spawn_count is None:
spawn_count = self.num_clients
if self.num_requests is not None:
self.stats.max_requests = self.num_requests
bucket = self.weight_locusts(spawn_count, stop_timeout)
spawn_count = len(bucket)
if self.state == STATE_INIT or self.state == STATE_STOPPED:
self.state = STATE_HATCHING
self.num_clients = spawn_count
else:
self.num_clients += spawn_count
logger.info("Hatching and swarming %i clients at the rate %g clients/s..." % (spawn_count, self.hatch_rate))
occurence_count = dict([(l.__name__, 0) for l in self.locust_classes])
def hatch():
sleep_time = 1.0 / self.hatch_rate
while True:
if not bucket:
logger.info("All locusts hatched: %s" % ", ".join(["%s: %d" % (name, count) for name, count in occurence_count.iteritems()]))
events.hatch_complete.fire(user_count=self.num_clients)
return
locust = bucket.pop(random.randint(0, len(bucket)-1))
occurence_count[locust.__name__] += 1
def start_locust(_):
try:
locust().run()
except GreenletExit:
pass
new_locust = self.locusts.spawn(start_locust, locust)
if len(self.locusts) % 10 == 0:
logger.debug("%i locusts hatched" % len(self.locusts))
gevent.sleep(sleep_time)
hatch()
if wait:
self.locusts.join()
logger.info("All locusts dead\n")
def kill_locusts(self, kill_count):
"""
Kill a kill_count of weighted locusts from the Group() object in self.locusts
"""
bucket = self.weight_locusts(kill_count)
kill_count = len(bucket)
self.num_clients -= kill_count
logger.info("Killing %i locusts" % kill_count)
dying = []
for g in self.locusts:
for l in bucket:
if l == g.args[0]:
dying.append(g)
bucket.remove(l)
break
for g in dying:
self.locusts.killone(g)
events.hatch_complete.fire(user_count=self.num_clients)
def start_hatching(self, locust_count=None, hatch_rate=None, wait=False):
if self.state != STATE_RUNNING and self.state != STATE_HATCHING:
self.stats.clear_all()
self.stats.start_time = time()
self.exceptions = {}
events.locust_start_hatching.fire()
# Dynamically changing the locust count
if self.state != STATE_INIT and self.state != STATE_STOPPED:
self.state = STATE_HATCHING
if self.num_clients > locust_count:
# Kill some locusts
kill_count = self.num_clients - locust_count
self.kill_locusts(kill_count)
elif self.num_clients < locust_count:
# Spawn some locusts
if hatch_rate:
self.hatch_rate = hatch_rate
spawn_count = locust_count - self.num_clients
self.spawn_locusts(spawn_count=spawn_count)
else:
events.hatch_complete.fire(user_count=self.num_clients)
else:
if hatch_rate:
self.hatch_rate = hatch_rate
if locust_count is not None:
self.spawn_locusts(locust_count, wait=wait)
else:
self.spawn_locusts(wait=wait)
def stop(self):
# if we are currently hatching locusts we need to kill the hatching greenlet first
if self.hatching_greenlet and not self.hatching_greenlet.ready():
self.hatching_greenlet.kill(block=True)
self.locusts.kill(block=True)
self.state = STATE_STOPPED
events.locust_stop_hatching.fire()
def log_exception(self, node_id, msg, formatted_tb):
key = hash(formatted_tb)
row = self.exceptions.setdefault(key, {"count": 0, "msg": msg, "traceback": formatted_tb, "nodes": set()})
row["count"] += 1
row["nodes"].add(node_id)
self.exceptions[key] = row
class LocalLocustRunner(LocustRunner):
def __init__(self, locust_classes, options):
super(LocalLocustRunner, self).__init__(locust_classes, options)
# register listener thats logs the exception for the local runner
def on_locust_error(locust_instance, exception, tb):
formatted_tb = "".join(traceback.format_tb(tb))
self.log_exception("local", str(exception), formatted_tb)
events.locust_error += on_locust_error
def start_hatching(self, locust_count=None, hatch_rate=None, wait=False):
self.hatching_greenlet = gevent.spawn(lambda: super(LocalLocustRunner, self).start_hatching(locust_count, hatch_rate, wait=wait))
self.greenlet = self.hatching_greenlet
class DistributedLocustRunner(LocustRunner):
def __init__(self, locust_classes, options):
super(DistributedLocustRunner, self).__init__(locust_classes, options)
self.master_host = options.master_host
self.master_port = options.master_port
self.master_bind_host = options.master_bind_host
self.master_bind_port = options.master_bind_port
def noop(self, *args, **kwargs):
""" Used to link() greenlets to in order to be compatible with gevent 1.0 """
pass
class SlaveNode(object):
def __init__(self, id, state=STATE_INIT):
self.id = id
self.state = state
self.user_count = 0
class MasterLocustRunner(DistributedLocustRunner):
def __init__(self, *args, **kwargs):
super(MasterLocustRunner, self).__init__(*args, **kwargs)
class SlaveNodesDict(dict):
def get_by_state(self, state):
return [c for c in self.itervalues() if c.state == state]
@property
def ready(self):
return self.get_by_state(STATE_INIT)
@property
def hatching(self):
return self.get_by_state(STATE_HATCHING)
@property
def running(self):
return self.get_by_state(STATE_RUNNING)
self.clients = SlaveNodesDict()
self.server = rpc.Server(self.master_bind_host, self.master_bind_port)
self.greenlet = Group()
self.greenlet.spawn(self.client_listener).link_exception(callback=self.noop)
# listener that gathers info on how many locust users the slaves has spawned
def on_slave_report(client_id, data):
if client_id not in self.clients:
logger.info("Discarded report from unrecognized slave %s", client_id)
return
self.clients[client_id].user_count = data["user_count"]
events.slave_report += on_slave_report
# register listener that sends quit message to slave nodes
def on_quitting():
self.quit()
events.quitting += on_quitting
@property
def user_count(self):
return sum([c.user_count for c in self.clients.itervalues()])
def start_hatching(self, locust_count, hatch_rate):
num_slaves = len(self.clients.ready) + len(self.clients.running)
if not num_slaves:
logger.warning("You are running in distributed mode but have no slave servers connected. "
"Please connect slaves prior to swarming.")
return
self.num_clients = locust_count
slave_num_clients = locust_count / (num_slaves or 1)
slave_hatch_rate = float(hatch_rate) / (num_slaves or 1)
remaining = locust_count % num_slaves
logger.info("Sending hatch jobs to %d ready clients", num_slaves)
if self.state != STATE_RUNNING and self.state != STATE_HATCHING:
self.stats.clear_all()
self.exceptions = {}
events.master_start_hatching.fire()
for client in self.clients.itervalues():
data = {
"hatch_rate":slave_hatch_rate,
"num_clients":slave_num_clients,
"num_requests": self.num_requests,
"host":self.host,
"stop_timeout":None
}
if remaining > 0:
data["num_clients"] += 1
remaining -= 1
self.server.send(Message("hatch", data, None))
self.stats.start_time = time()
self.state = STATE_HATCHING
def stop(self):
for client in self.clients.hatching + self.clients.running:
self.server.send(Message("stop", None, None))
events.master_stop_hatching.fire()
def quit(self):
for client in self.clients.itervalues():
self.server.send(Message("quit", None, None))
self.greenlet.kill(block=True)
def client_listener(self):
while True:
msg = self.server.recv()
if msg.type == "client_ready":
id = msg.node_id
self.clients[id] = SlaveNode(id)
logger.info("Client %r reported as ready. Currently %i clients ready to swarm." % (id, len(self.clients.ready)))
## emit a warning if the slave's clock seem to be out of sync with our clock
#if abs(time() - msg.data["time"]) > 5.0:
# warnings.warn("The slave node's clock seem to be out of sync. For the statistics to be correct the different locust servers need to have synchronized clocks.")
elif msg.type == "client_stopped":
del self.clients[msg.node_id]
if len(self.clients.hatching + self.clients.running) == 0:
self.state = STATE_STOPPED
logger.info("Removing %s client from running clients" % (msg.node_id))
elif msg.type == "stats":
events.slave_report.fire(client_id=msg.node_id, data=msg.data)
elif msg.type == "hatching":
self.clients[msg.node_id].state = STATE_HATCHING
elif msg.type == "hatch_complete":
self.clients[msg.node_id].state = STATE_RUNNING
self.clients[msg.node_id].user_count = msg.data["count"]
if len(self.clients.hatching) == 0:
count = sum(c.user_count for c in self.clients.itervalues())
events.hatch_complete.fire(user_count=count)
elif msg.type == "quit":
if msg.node_id in self.clients:
del self.clients[msg.node_id]
logger.info("Client %r quit. Currently %i clients connected." % (msg.node_id, len(self.clients.ready)))
elif msg.type == "exception":
self.log_exception(msg.node_id, msg.data["msg"], msg.data["traceback"])
@property
def slave_count(self):
return len(self.clients.ready) + len(self.clients.hatching) + len(self.clients.running)
class SlaveLocustRunner(DistributedLocustRunner):
def __init__(self, *args, **kwargs):
super(SlaveLocustRunner, self).__init__(*args, **kwargs)
self.client_id = socket.gethostname() + "_" + md5(str(time() + random.randint(0,10000))).hexdigest()
self.client = rpc.Client(self.master_host, self.master_port)
self.greenlet = Group()
self.greenlet.spawn(self.worker).link_exception(callback=self.noop)
self.client.send(Message("client_ready", None, self.client_id))
self.greenlet.spawn(self.stats_reporter).link_exception(callback=self.noop)
# register listener for when all locust users have hatched, and report it to the master node
def on_hatch_complete(user_count):
self.client.send(Message("hatch_complete", {"count":user_count}, self.client_id))
events.hatch_complete += on_hatch_complete
# register listener that adds the current number of spawned locusts to the report that is sent to the master node
def on_report_to_master(client_id, data):
data["user_count"] = self.user_count
events.report_to_master += on_report_to_master
# register listener that sends quit message to master
def on_quitting():
self.client.send(Message("quit", None, self.client_id))
events.quitting += on_quitting
# register listener thats sends locust exceptions to master
def on_locust_error(locust_instance, exception, tb):
formatted_tb = "".join(traceback.format_tb(tb))
self.client.send(Message("exception", {"msg" : str(exception), "traceback" : formatted_tb}, self.client_id))
events.locust_error += on_locust_error
def worker(self):
while True:
msg = self.client.recv()
if msg.type == "hatch":
self.client.send(Message("hatching", None, self.client_id))
job = msg.data
self.hatch_rate = job["hatch_rate"]
#self.num_clients = job["num_clients"]
self.num_requests = job["num_requests"]
self.host = job["host"]
self.hatching_greenlet = gevent.spawn(lambda: self.start_hatching(locust_count=job["num_clients"], hatch_rate=job["hatch_rate"]))
elif msg.type == "stop":
self.stop()
self.client.send(Message("client_stopped", None, self.client_id))
self.client.send(Message("client_ready", None, self.client_id))
elif msg.type == "quit":
logger.info("Got quit message from master, shutting down...")
self.stop()
self.greenlet.kill(block=True)
def stats_reporter(self):
while True:
data = {}
events.report_to_master.fire(client_id=self.client_id, data=data)
try:
self.client.send(Message("stats", data, self.client_id))
except:
logger.error("Connection lost to master server. Aborting...")
break
gevent.sleep(SLAVE_REPORT_INTERVAL)
|
mit
|
aronsky/home-assistant
|
homeassistant/components/light/isy994.py
|
5
|
1776
|
"""
Support for ISY994 lights.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/light.isy994/
"""
import logging
from typing import Callable
from homeassistant.components.light import (
Light, SUPPORT_BRIGHTNESS, DOMAIN)
from homeassistant.components.isy994 import ISY994_NODES, ISYDevice
from homeassistant.helpers.typing import ConfigType
_LOGGER = logging.getLogger(__name__)
def setup_platform(hass, config: ConfigType,
add_entities: Callable[[list], None], discovery_info=None):
"""Set up the ISY994 light platform."""
devices = []
for node in hass.data[ISY994_NODES][DOMAIN]:
devices.append(ISYLightDevice(node))
add_entities(devices)
class ISYLightDevice(ISYDevice, Light):
"""Representation of an ISY994 light device."""
@property
def is_on(self) -> bool:
"""Get whether the ISY994 light is on."""
if self.is_unknown():
return False
return self.value != 0
@property
def brightness(self) -> float:
"""Get the brightness of the ISY994 light."""
return None if self.is_unknown() else self.value
def turn_off(self, **kwargs) -> None:
"""Send the turn off command to the ISY994 light device."""
if not self._node.off():
_LOGGER.debug("Unable to turn off light")
# pylint: disable=arguments-differ
def turn_on(self, brightness=None, **kwargs) -> None:
"""Send the turn on command to the ISY994 light device."""
if not self._node.on(val=brightness):
_LOGGER.debug("Unable to turn on light")
@property
def supported_features(self):
"""Flag supported features."""
return SUPPORT_BRIGHTNESS
|
apache-2.0
|
savoirfairelinux/sflvault
|
server/sflvault/model/custom_types.py
|
2
|
1414
|
# -=- encoding: utf-8 -=-
#
# SFLvault - Secure networked password store and credentials manager.
#
# Copyright (C) 2008-2009 Savoir-faire Linux inc.
#
# Author: Alexandre Bourget <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import json
from sqlalchemy import types
class JSONEncodedDict(types.TypeDecorator):
"""Represents an mutable structure as a json-encoded string.
Usage::
JSONEncodedDict(255)
"""
impl = types.Text
def process_bind_param(self, value, dialect):
if value is not None:
value = json.dumps(value)
return value
def process_result_value(self, value, dialect):
return json.loads(value) if value else {}
def copy_value(self, value):
return json.loads(simplejson.dumps(value))
|
gpl-3.0
|
slightlymadphoenix/activityPointsApp
|
activitypoints/lib/python3.5/site-packages/django/contrib/postgres/search.py
|
86
|
8710
|
from django.db.models import Field, FloatField
from django.db.models.expressions import CombinedExpression, Func, Value
from django.db.models.functions import Coalesce
from django.db.models.lookups import Lookup
class SearchVectorExact(Lookup):
lookup_name = 'exact'
def process_rhs(self, qn, connection):
if not hasattr(self.rhs, 'resolve_expression'):
config = getattr(self.lhs, 'config', None)
self.rhs = SearchQuery(self.rhs, config=config)
rhs, rhs_params = super(SearchVectorExact, self).process_rhs(qn, connection)
return rhs, rhs_params
def as_sql(self, qn, connection):
lhs, lhs_params = self.process_lhs(qn, connection)
rhs, rhs_params = self.process_rhs(qn, connection)
params = lhs_params + rhs_params
return '%s @@ %s = true' % (lhs, rhs), params
class SearchVectorField(Field):
def db_type(self, connection):
return 'tsvector'
class SearchQueryField(Field):
def db_type(self, connection):
return 'tsquery'
class SearchVectorCombinable(object):
ADD = '||'
def _combine(self, other, connector, reversed, node=None):
if not isinstance(other, SearchVectorCombinable) or not self.config == other.config:
raise TypeError('SearchVector can only be combined with other SearchVectors')
if reversed:
return CombinedSearchVector(other, connector, self, self.config)
return CombinedSearchVector(self, connector, other, self.config)
class SearchVector(SearchVectorCombinable, Func):
function = 'to_tsvector'
arg_joiner = " || ' ' || "
_output_field = SearchVectorField()
config = None
def __init__(self, *expressions, **extra):
super(SearchVector, self).__init__(*expressions, **extra)
self.source_expressions = [
Coalesce(expression, Value('')) for expression in self.source_expressions
]
self.config = self.extra.get('config', self.config)
weight = self.extra.get('weight')
if weight is not None and not hasattr(weight, 'resolve_expression'):
weight = Value(weight)
self.weight = weight
def resolve_expression(self, query=None, allow_joins=True, reuse=None, summarize=False, for_save=False):
resolved = super(SearchVector, self).resolve_expression(query, allow_joins, reuse, summarize, for_save)
if self.config:
if not hasattr(self.config, 'resolve_expression'):
resolved.config = Value(self.config).resolve_expression(query, allow_joins, reuse, summarize, for_save)
else:
resolved.config = self.config.resolve_expression(query, allow_joins, reuse, summarize, for_save)
return resolved
def as_sql(self, compiler, connection, function=None, template=None):
config_params = []
if template is None:
if self.config:
config_sql, config_params = compiler.compile(self.config)
template = "%(function)s({}::regconfig, %(expressions)s)".format(config_sql.replace('%', '%%'))
else:
template = self.template
sql, params = super(SearchVector, self).as_sql(compiler, connection, function=function, template=template)
extra_params = []
if self.weight:
weight_sql, extra_params = compiler.compile(self.weight)
sql = 'setweight({}, {})'.format(sql, weight_sql)
return sql, config_params + params + extra_params
class CombinedSearchVector(SearchVectorCombinable, CombinedExpression):
def __init__(self, lhs, connector, rhs, config, output_field=None):
self.config = config
super(CombinedSearchVector, self).__init__(lhs, connector, rhs, output_field)
class SearchQueryCombinable(object):
BITAND = '&&'
BITOR = '||'
def _combine(self, other, connector, reversed, node=None):
if not isinstance(other, SearchQueryCombinable):
raise TypeError(
'SearchQuery can only be combined with other SearchQuerys, '
'got {}.'.format(type(other))
)
if not self.config == other.config:
raise TypeError("SearchQuery configs don't match.")
if reversed:
return CombinedSearchQuery(other, connector, self, self.config)
return CombinedSearchQuery(self, connector, other, self.config)
# On Combinable, these are not implemented to reduce confusion with Q. In
# this case we are actually (ab)using them to do logical combination so
# it's consistent with other usage in Django.
def __or__(self, other):
return self._combine(other, self.BITOR, False)
def __ror__(self, other):
return self._combine(other, self.BITOR, True)
def __and__(self, other):
return self._combine(other, self.BITAND, False)
def __rand__(self, other):
return self._combine(other, self.BITAND, True)
class SearchQuery(SearchQueryCombinable, Value):
invert = False
_output_field = SearchQueryField()
config = None
def __init__(self, value, output_field=None, **extra):
self.config = extra.pop('config', self.config)
self.invert = extra.pop('invert', self.invert)
super(SearchQuery, self).__init__(value, output_field=output_field)
def resolve_expression(self, query=None, allow_joins=True, reuse=None, summarize=False, for_save=False):
resolved = super(SearchQuery, self).resolve_expression(query, allow_joins, reuse, summarize, for_save)
if self.config:
if not hasattr(self.config, 'resolve_expression'):
resolved.config = Value(self.config).resolve_expression(query, allow_joins, reuse, summarize, for_save)
else:
resolved.config = self.config.resolve_expression(query, allow_joins, reuse, summarize, for_save)
return resolved
def as_sql(self, compiler, connection):
params = [self.value]
if self.config:
config_sql, config_params = compiler.compile(self.config)
template = 'plainto_tsquery({}::regconfig, %s)'.format(config_sql)
params = config_params + [self.value]
else:
template = 'plainto_tsquery(%s)'
if self.invert:
template = '!!({})'.format(template)
return template, params
def _combine(self, other, connector, reversed, node=None):
combined = super(SearchQuery, self)._combine(other, connector, reversed, node)
combined.output_field = SearchQueryField()
return combined
def __invert__(self):
extra = {
'invert': not self.invert,
'config': self.config,
}
return type(self)(self.value, **extra)
class CombinedSearchQuery(SearchQueryCombinable, CombinedExpression):
def __init__(self, lhs, connector, rhs, config, output_field=None):
self.config = config
super(CombinedSearchQuery, self).__init__(lhs, connector, rhs, output_field)
class SearchRank(Func):
function = 'ts_rank'
_output_field = FloatField()
def __init__(self, vector, query, **extra):
if not hasattr(vector, 'resolve_expression'):
vector = SearchVector(vector)
if not hasattr(query, 'resolve_expression'):
query = SearchQuery(query)
weights = extra.get('weights')
if weights is not None and not hasattr(weights, 'resolve_expression'):
weights = Value(weights)
self.weights = weights
super(SearchRank, self).__init__(vector, query, **extra)
def as_sql(self, compiler, connection, function=None, template=None):
extra_params = []
extra_context = {}
if template is None and self.extra.get('weights'):
if self.weights:
template = '%(function)s(%(weights)s, %(expressions)s)'
weight_sql, extra_params = compiler.compile(self.weights)
extra_context['weights'] = weight_sql
sql, params = super(SearchRank, self).as_sql(
compiler, connection,
function=function, template=template, **extra_context
)
return sql, extra_params + params
SearchVectorField.register_lookup(SearchVectorExact)
class TrigramBase(Func):
def __init__(self, expression, string, **extra):
if not hasattr(string, 'resolve_expression'):
string = Value(string)
super(TrigramBase, self).__init__(expression, string, output_field=FloatField(), **extra)
class TrigramSimilarity(TrigramBase):
function = 'SIMILARITY'
class TrigramDistance(TrigramBase):
function = ''
arg_joiner = ' <-> '
|
mit
|
ColinIanKing/autotest
|
cli/label.py
|
6
|
7943
|
#
# Copyright 2008 Google Inc. All Rights Reserved.
"""
The label module contains the objects and methods used to
manage labels in Autotest.
The valid actions are:
add: adds label(s), or hosts to an LABEL
remove: deletes label(s), or hosts from an LABEL
list: lists label(s)
The common options are:
--blist / -B: file containing a list of LABELs
See topic_common.py for a High Level Design and Algorithm.
"""
import os, sys
from autotest.cli import topic_common, action_common
class label(topic_common.atest):
"""Label class
atest label [create|delete|list|add|remove] <options>"""
usage_action = '[create|delete|list|add|remove]'
topic = msg_topic = 'label'
msg_items = '<labels>'
def __init__(self):
"""Add to the parser the options common to all the
label actions"""
super(label, self).__init__()
self.parser.add_option('-B', '--blist',
help='File listing the labels',
type='string',
default=None,
metavar='LABEL_FLIST')
self.topic_parse_info = topic_common.item_parse_info(
attribute_name='labels',
filename_option='blist',
use_leftover=True)
def get_items(self):
return self.labels
class label_help(label):
"""Just here to get the atest logic working.
Usage is set by its parent"""
pass
class label_list(action_common.atest_list, label):
"""atest label list [--platform] [--all] [--atomicgroup]
[--valid-only] [--machine <machine>]
[--blist <file>] [<labels>]"""
def __init__(self):
super(label_list, self).__init__()
self.parser.add_option('-t', '--platform-only',
help='Display only platform labels',
action='store_true')
self.parser.add_option('-d', '--valid-only',
help='Display only valid labels',
action='store_true')
self.parser.add_option('-a', '--all',
help=('Display both normal & '
'platform labels'),
action='store_true')
self.parser.add_option('--atomicgroup',
help=('Display only atomic group labels '
'along with the atomic group name.'),
action='store_true')
self.parser.add_option('-m', '--machine',
help='List LABELs of MACHINE',
type='string',
metavar='MACHINE')
def parse(self):
host_info = topic_common.item_parse_info(attribute_name='hosts',
inline_option='machine')
(options, leftover) = super(label_list, self).parse([host_info])
exclusives = [options.all, options.platform_only, options.atomicgroup]
if exclusives.count(True) > 1:
self.invalid_syntax('Only specify one of --all,'
'--platform, --atomicgroup')
if len(self.hosts) > 1:
self.invalid_syntax(('Only one machine name allowed. '
'''Use '%s host list %s' '''
'instead.') %
(sys.argv[0], ','.join(self.hosts)))
self.all = options.all
self.atomicgroup = options.atomicgroup
self.platform_only = options.platform_only
self.valid_only = options.valid_only
return (options, leftover)
def execute(self):
filters = {}
check_results = {}
if self.hosts:
filters['host__hostname__in'] = self.hosts
check_results['host__hostname__in'] = None
if self.labels:
filters['name__in'] = self.labels
check_results['name__in'] = 'name'
return super(label_list, self).execute(op='get_labels',
filters=filters,
check_results=check_results)
def output(self, results):
if self.valid_only:
results = [label for label in results
if not label['invalid']]
if self.platform_only:
results = [label for label in results
if label['platform']]
keys = ['name', 'invalid']
elif self.atomicgroup:
results = [label for label in results
if label['atomic_group']]
keys = ['name', 'atomic_group.name', 'invalid']
elif not self.all:
results = [label for label in results
if not label['platform']]
keys = ['name', 'only_if_needed', 'invalid']
else:
keys = ['name', 'platform', 'only_if_needed', 'invalid']
super(label_list, self).output(results, keys)
class label_create(action_common.atest_create, label):
"""atest label create <labels>|--blist <file> --platform"""
def __init__(self):
super(label_create, self).__init__()
self.parser.add_option('-t', '--platform',
help='To create this label as a platform',
default=False,
action='store_true')
self.parser.add_option('-o', '--only_if_needed',
help='To mark the label as "only use if needed',
default=False,
action='store_true')
def parse(self):
(options, leftover) = super(label_create,
self).parse(req_items='labels')
self.data_item_key = 'name'
self.data['platform'] = options.platform
self.data['only_if_needed'] = options.only_if_needed
return (options, leftover)
class label_delete(action_common.atest_delete, label):
"""atest label delete <labels>|--blist <file>"""
pass
class label_add_or_remove(label):
def __init__(self):
super(label_add_or_remove, self).__init__()
lower_words = tuple(word.lower() for word in self.usage_words)
self.parser.add_option('-m', '--machine',
help=('%s MACHINE(s) %s the LABEL' %
self.usage_words),
type='string',
metavar='MACHINE')
self.parser.add_option('-M', '--mlist',
help='File containing machines to %s %s '
'the LABEL' % lower_words,
type='string',
metavar='MACHINE_FLIST')
def parse(self):
host_info = topic_common.item_parse_info(attribute_name='hosts',
inline_option='machine',
filename_option='mlist')
(options, leftover) = super(label_add_or_remove,
self).parse([host_info],
req_items='labels')
if not getattr(self, 'hosts', None):
self.invalid_syntax('%s %s requires at least one host' %
(self.msg_topic,
self.usage_action))
return (options, leftover)
class label_add(action_common.atest_add, label_add_or_remove):
"""atest label add <labels>|--blist <file>
--platform [--machine <machine>] [--mlist <file>]"""
pass
class label_remove(action_common.atest_remove, label_add_or_remove):
"""atest label remove <labels>|--blist <file>
[--machine <machine>] [--mlist <file>]"""
pass
|
gpl-2.0
|
leiqzhang/litevirt-api
|
LitevirtAPI/network.py
|
1
|
1068
|
#!/usr/bin/python
class EthernetDevice(object):
"""Representative of an ethernet device. """
def __init__(self,
iface = None,
vendor = None,
driver = None,
macaddr = None,
path = None):
self._iface = iface
self._vendor = vendor
self._driver = driver
self._path = path
self._macaddr = macaddr
def link_status(self):
pass
def flash_light(self):
pass
def interface(self):
pass
def vendor(self):
pass
def driver(self):
pass
def path(self):
pass
def macaddr(self):
pass
class EthernetManager(object):
def __init__(self):
self._devs = self.enum_devs()
def enumerate(self):
pass
def get_devs(self):
pass
def find_devs(self,
iface = None,
vendor = None,
driver = None,
macaddr = None,
path = None):
pass
|
gpl-3.0
|
thisisshi/cloud-custodian
|
tools/c7n_terraform/c7n_terraform/parser.py
|
2
|
14812
|
# Copyright The Cloud Custodian Authors.
# SPDX-License-Identifier: Apache-2.0
from collections.abc import Iterable
import json
import logging
import os
from pathlib import Path
import re
import hcl2
TF_JSON_SUFFIX = ".tf.json"
TF_HCL_SUFFIX = ".tf"
class Block(dict):
__slots__ = ()
def __getattr__(self, k):
return self[k]
class VariableResolver:
log = logging.getLogger("c7n_terraform.hcl.variable")
def __init__(self, resolver, value_map=None):
self.resolver = resolver
self.value_map = value_map or {}
def resolve(self):
for var in self.resolver.iter_blocks(tf_kind="variable"):
source, value = self.resolve_value(var)
serialized = Block(var)
del serialized["source"]["lines"]
del serialized["data"]
for block, expr_path, expr in self.get_references(var):
binding = {
"expr_path": expr_path,
"source": source,
"expr": expr,
"var": serialized,
"value": value,
}
block.setdefault("bindings", []).append(binding)
# binding = dict(binding)
# binding.pop('var')
# binding['data_path'] = block.data_path
# var.setdefault('bindings', []).append(binding)
self._set(block, block.bindings[-1])
def resolve_value(self, var):
if var.name in self.value_map:
value = self.value_map[var.name]
source = "map"
elif var.env_value:
value = var.env_value
source = "env"
else:
value = var.default
source = "default"
return source, value
def get_references(self, var):
regex = self.get_regex(var)
for block in self.resolver.iter_blocks(path_parent=var.path.parent):
for ref in self._search(regex, block.data):
yield (block, *ref)
def _set(self, block, binding):
parent = self._traverse(block["data"], binding["expr_path"][:-1])
part = binding["expr_path"][-1]
regex = self.get_regex(binding["var"])
literal = bool(re.match(r"^" + regex.pattern + "$", binding["expr"]))
parent[part] = (
binding["value"]
if literal
else regex.sub(re.escape(str(binding["value"])), binding["expr"])
)
def _traverse(self, data, path):
cur = data
for p in path:
if isinstance(data, dict):
cur = cur[p]
elif isinstance(data, list):
cur[p]
else:
return cur
return cur
def _search(self, regex, block, path=()):
path = path is None and [] or path
tblock = type(block)
if tblock is dict:
for k, v in block.items():
kpath = list(path)
kpath.append(k)
for ref in self._search(regex, v, kpath):
yield ref
elif tblock is list:
for idx, v in enumerate(block):
kpath = list(path)
kpath.append(idx)
for ref in self._search(regex, v, kpath):
yield ref
elif tblock is str:
if regex.findall(block):
yield path, block
def get_regex(self, var):
regex = r"((?:\$\{)?"
if var.type == "variable":
regex += "var[.]" + re.escape(var.name) + r"(?:\})?)"
if var.type == "local":
regex += "locals[.]" + re.escape(var.name) + r"(?\})?)"
return re.compile(regex)
def iterable(obj):
return isinstance(obj, Iterable)
class HclLocator:
log = logging.getLogger("c7n_terraform.hcl.locator")
def __init__(self):
self.file_cache = {}
self.line_cache = {}
def resolve_source(self, path, data_key):
if path not in self.file_cache:
self._get_lines(path)
position = self._block_header_position(path, data_key)
assert position
return position
def _block_header_position(self, path, data_key):
start_line, end_line = 0, 0
key_set = set(data_key)
for cache_idx, (idx, line) in enumerate(self.line_cache[path]):
tokens = [t.replace('"', "") for t in line.split()]
if key_set.issubset(tokens):
start_line = idx
end_line = self._get_end_line(
start_line, cache_idx, self.line_cache[path]
)
break
if not (start_line and end_line):
return None
return {
"start": start_line,
"end": end_line,
"lines": self.file_cache[path][start_line - 1:end_line - 1],
}
def _get_end_line(self, start_line, cache_idx, lines):
end_line = start_line
idx = 1
s, e = "{", "}"
if s not in lines[cache_idx][1]:
s, e = "(", ")"
for lineno, l in lines[cache_idx + 1:]:
if s in l:
idx += 1
if e in l:
idx -= 1
if idx == 0:
return lineno
return end_line
def _get_lines(self, path):
with open(path) as fh:
self.file_cache[path] = [(idx + 1, l) for idx, l in enumerate(fh)]
lines = []
for idx, line in self.file_cache[path]:
line = line.strip()
if not line:
continue
lines.append((idx, line))
self.line_cache[path] = lines
class TerraformVisitor:
log = logging.getLogger("c7n_terraform.hcl.visitor")
def __init__(self, data, root_path):
self.data = data
self.root_path = root_path
self.hcl_locator = HclLocator()
self.blocks = ()
def iter_blocks(self, path_parent=None, tf_kind=None, name=None):
for b in self.blocks:
if path_parent and b.path.parent != path_parent:
continue
if tf_kind and b.type != tf_kind:
continue
if name:
continue
yield b
def visit(self):
# first pass get annotated blocks
blocks = []
visitor_map = {
"data": self.visit_data,
"variable": self.visit_variable,
"terraform": self.visit_terraform,
"module": self.visit_module,
"output": self.visit_output,
"locals": self.visit_local,
"provider": self.visit_provider,
"resource": self.visit_resource,
}
for path, path_data in self.data.items():
for data_type, data_instances in path_data.items():
for instance in data_instances:
block = visitor_map[data_type](path, instance)
if block is None:
continue
elif isinstance(block, dict):
blocks.append(block)
elif iterable(block):
blocks.extend(block)
self.blocks = blocks
# second pass resolve variables
# yield resolved blocks
def dump(self, path, sort="type"):
import json
import operator
class PathEncoder(json.JSONEncoder):
def default(self, o):
if isinstance(o, Path):
return str(o)
return super().default(o)
blocks = []
for b in self.blocks:
b = dict(b)
b["path"] = str(b["path"])
del b["source"]
blocks.append(b)
with open(path, "w") as fh:
print("dump %d blocks path %s" % (len(blocks), path))
json.dump(
sorted(blocks, key=operator.itemgetter(sort)),
cls=PathEncoder,
indent=2,
fp=fh,
)
def visit_data(self, path, data_block):
provider_type = next(iter(data_block))
for name, resource in data_block[provider_type].items():
data_path = ["data", provider_type, name]
yield self._block(path, data_block, data_path=data_path)
def visit_resource(self, path, data_block):
provider_type = next(iter(data_block))
for name, resource in data_block[provider_type].items():
data_path = ["resource", provider_type, name]
yield Block(
type="resource",
provider_type=provider_type,
name=name,
path=path,
data_path=data_path,
data=resource,
source=self.hcl_locator.resolve_source(path, data_path),
)
def visit_variable(self, path, data_block):
name = next(iter(data_block))
default = data_block[name].get("default")
if default:
default = default[0]
data_path = ["variable", name]
block = Block(
type="variable",
name=name,
path=path,
data_path=data_path,
data=data_block,
source=self.hcl_locator.resolve_source(path, data_path),
value_type=(
"type" in data_block[name]
and data_block[name].get(
"type",
)[0]
or infer_type(default)
),
default=default,
env_value=os.environ.get("TF_VAR_%s" % name),
)
return block
def visit_provider(self, path, data_block):
self.log.debug("provider %s", data_block)
provider = next(iter(data_block))
alias = data_block[provider].get("alias", None)
if alias:
alias = next(iter(data_block))
data_path = ["provider", provider]
return Block(
type="provider",
name=alias or provider,
path=path,
data_path=data_path,
data=data_block,
source=self.hcl_locator.resolve_source(path, data_path),
)
def visit_local(self, path, data_block):
self.log.debug("local %s", data_block)
data_path = ["local", next(iter(data_block))]
source = self.hcl_locator.resolve_source(path, data_path[1:])
return self._block(path, data_block, type="local", source=source)
def visit_module(self, path, data_block):
self.log.debug("module %s", data_block)
return self._block(path, data_block, type="module")
def visit_terraform(self, path, data_block):
self.log.debug("terraform %s", data_block)
data_path = ["terraform", next(iter(data_block))]
source = self.hcl_locator.resolve_source(path, data_path[:1])
return self._block(
path, data_block, data_path=["terraform"], type="terraform", source=source
)
def visit_output(self, path, data_block):
self.log.debug("output %s", data_block)
return self._block(path, data_block, type="output")
def _block(self, path, data_block, type=None, data_path=None, source=True, **kw):
if data_path:
type = data_path[0]
name = data_path[-1]
else:
name = next(iter(data_block))
data_path = [type, name]
if isinstance(source, bool):
source = self.hcl_locator.resolve_source(path, data_path)
return Block(
type=type,
name=name,
path=path,
data_path=data_path,
data=data_block,
source=source,
**kw,
)
TypeMap = {
str: "string",
bool: "bool",
float: "number",
int: "number",
set: "set",
list: "list",
dict: "map",
tuple: "tuple",
"string": str,
"bool": bool,
"number": [float, int],
"set": set,
"list": list,
"map": dict,
"tuple": tuple,
}
def infer_type(value, default="unknown"):
return TypeMap.get(type(value), default)
class Parser:
log = logging.getLogger("c7n_terraform.hcl.parser")
_parser_map = {
TF_HCL_SUFFIX: "_parse_hcl_file",
TF_JSON_SUFFIX: "_parse_json_file",
}
def __init__(self):
self.seen_dirs = set()
self.errors = {}
self.tf_resources = {}
def _parse_hcl_file(self, tf_file):
with open(tf_file) as fp:
return self._parse_tf_data(hcl2.load(fp))
def _parse_json_file(self, tf_file):
with open(tf_file) as fp:
return self._parse_tf_json_data(json.load(fp))
def _parse_tf_json_data(self, data):
def larkify(instance):
"""Emulate output performed during hcl2.load for JSON loaded data"""
if isinstance(instance, list):
return [larkify(el) if isinstance(el, dict) else el for el in instance]
if isinstance(instance, dict):
return {k: larkify(v) for k, v in instance.items()}
return [instance]
output = {}
for block in data:
output[block] = [
{resource: larkify(instance)} for resource, instance in data.get(block, {}).items()
]
return output
def _parse_tf_data(self, data):
for resource_type in data.get("resource", ()):
for instance_name, instance in resource_type.items():
# hcl2 parser injects dynamic for computation
for block in instance.pop("dynamic", ()):
for field, value in block.items():
instance[field] = value
return data
def _resolve_modules(self, path, tf_data):
for m in tf_data.get("module", ()):
for module in m.values():
mpath = (path / module["source"][0]).resolve()
yield mpath
def parse_module(self, path, rglob=False):
directory = Path(path)
modules = set()
for pattern in ("*%s" % TF_HCL_SUFFIX, "*%s" % TF_JSON_SUFFIX):
file_iter = rglob and directory.rglob or directory.glob
for f in file_iter(pattern):
self.seen_dirs.add(f.parent)
try:
file_parser = getattr(self, self._parser_map.get(pattern.replace("*", "")))
self.tf_resources[f] = tf_data = file_parser(f)
modules.update(self._resolve_modules(f.parent, tf_data))
except Exception as e:
self.log.info(f"error parsing {f}", exc_info=e)
self.errors[str(f)] = e
for m in modules:
if m not in self.seen_dirs:
self.parse_module(m)
return self.tf_resources
|
apache-2.0
|
ygol/odoo
|
setup/package.py
|
180
|
22070
|
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-Today OpenERP SA (<http://www.openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import optparse
import os
import pexpect
import shutil
import signal
import subprocess
import tempfile
import time
import xmlrpclib
from contextlib import contextmanager
from glob import glob
from os.path import abspath, dirname, join
from sys import stdout
from tempfile import NamedTemporaryFile
#----------------------------------------------------------
# Utils
#----------------------------------------------------------
execfile(join(dirname(__file__), '..', 'openerp', 'release.py'))
version = version.split('-')[0]
timestamp = time.strftime("%Y%m%d", time.gmtime())
GPGPASSPHRASE = os.getenv('GPGPASSPHRASE')
GPGID = os.getenv('GPGID')
PUBLISH_DIRS = {
'debian': 'deb',
'redhat': 'rpm',
'tarball': 'src',
'windows': 'exe',
}
ADDONS_NOT_TO_PUBLISH = [
'web_analytics'
]
def mkdir(d):
if not os.path.isdir(d):
os.makedirs(d)
def system(l, chdir=None):
print l
if chdir:
cwd = os.getcwd()
os.chdir(chdir)
if isinstance(l, list):
rc = os.spawnvp(os.P_WAIT, l[0], l)
elif isinstance(l, str):
tmp = ['sh', '-c', l]
rc = os.spawnvp(os.P_WAIT, tmp[0], tmp)
if chdir:
os.chdir(cwd)
return rc
def _rpc_count_modules(addr='http://127.0.0.1', port=8069, dbname='mycompany'):
time.sleep(5)
modules = xmlrpclib.ServerProxy('%s:%s/xmlrpc/object' % (addr, port)).execute(
dbname, 1, 'admin', 'ir.module.module', 'search', [('state', '=', 'installed')]
)
if modules and len(modules) > 1:
time.sleep(1)
toinstallmodules = xmlrpclib.ServerProxy('%s:%s/xmlrpc/object' % (addr, port)).execute(
dbname, 1, 'admin', 'ir.module.module', 'search', [('state', '=', 'to install')]
)
if toinstallmodules:
print("Package test: FAILED. Not able to install dependencies of base.")
raise Exception("Installation of package failed")
else:
print("Package test: successfuly installed %s modules" % len(modules))
else:
print("Package test: FAILED. Not able to install base.")
raise Exception("Installation of package failed")
def publish(o, type, extensions):
def _publish(o, release):
arch = ''
filename = release.split(os.path.sep)[-1]
release_dir = PUBLISH_DIRS[type]
release_path = join(o.pub, release_dir, filename)
system('mkdir -p %s' % join(o.pub, release_dir))
shutil.move(join(o.build_dir, release), release_path)
# Latest/symlink handler
release_abspath = abspath(release_path)
latest_abspath = release_abspath.replace(timestamp, 'latest')
if os.path.islink(latest_abspath):
os.unlink(latest_abspath)
os.symlink(release_abspath, latest_abspath)
return release_path
published = []
for extension in extensions:
release = glob("%s/odoo_*.%s" % (o.build_dir, extension))[0]
published.append(_publish(o, release))
return published
class OdooDocker(object):
def __init__(self):
self.log_file = NamedTemporaryFile(mode='w+b', prefix="bash", suffix=".txt", delete=False)
self.port = 8069 # TODO sle: reliable way to get a free port?
self.prompt_re = '[root@nightly-tests] # '
self.timeout = 600
def system(self, command):
self.docker.sendline(command)
self.docker.expect_exact(self.prompt_re)
def start(self, docker_image, build_dir, pub_dir):
self.build_dir = build_dir
self.pub_dir = pub_dir
self.docker = pexpect.spawn(
'docker run -v %s:/opt/release -p 127.0.0.1:%s:8069'
' -t -i %s /bin/bash --noediting' % (self.build_dir, self.port, docker_image),
timeout=self.timeout,
searchwindowsize=len(self.prompt_re) + 1,
)
time.sleep(2) # let the bash start
self.docker.logfile_read = self.log_file
self.id = subprocess.check_output('docker ps -l -q', shell=True)
def end(self):
try:
_rpc_count_modules(port=str(self.port))
except Exception, e:
print('Exception during docker execution: %s:' % str(e))
print('Error during docker execution: printing the bash output:')
with open(self.log_file.name) as f:
print '\n'.join(f.readlines())
raise
finally:
self.docker.close()
system('docker rm -f %s' % self.id)
self.log_file.close()
os.remove(self.log_file.name)
@contextmanager
def docker(docker_image, build_dir, pub_dir):
_docker = OdooDocker()
try:
_docker.start(docker_image, build_dir, pub_dir)
try:
yield _docker
except Exception, e:
raise
finally:
_docker.end()
class KVM(object):
def __init__(self, o, image, ssh_key='', login='openerp'):
self.o = o
self.image = image
self.ssh_key = ssh_key
self.login = login
def timeout(self,signum,frame):
print "vm timeout kill",self.pid
os.kill(self.pid,15)
def start(self):
l="kvm -net nic,model=rtl8139 -net user,hostfwd=tcp:127.0.0.1:10022-:22,hostfwd=tcp:127.0.0.1:18069-:8069,hostfwd=tcp:127.0.0.1:15432-:5432 -drive".split(" ")
#l.append('file=%s,if=virtio,index=0,boot=on,snapshot=on'%self.image)
l.append('file=%s,snapshot=on'%self.image)
#l.extend(['-vnc','127.0.0.1:1'])
l.append('-nographic')
print " ".join(l)
self.pid=os.spawnvp(os.P_NOWAIT, l[0], l)
time.sleep(10)
signal.alarm(2400)
signal.signal(signal.SIGALRM, self.timeout)
try:
self.run()
finally:
signal.signal(signal.SIGALRM, signal.SIG_DFL)
os.kill(self.pid,15)
time.sleep(10)
def ssh(self,cmd):
l=['ssh','-o','UserKnownHostsFile=/dev/null','-o','StrictHostKeyChecking=no','-p','10022','-i',self.ssh_key,'%[email protected]'%self.login,cmd]
system(l)
def rsync(self,args,options='--delete --exclude .bzrignore'):
cmd ='rsync -rt -e "ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -p 10022 -i %s" %s %s' % (self.ssh_key, options, args)
system(cmd)
def run(self):
pass
class KVMWinBuildExe(KVM):
def run(self):
with open(join(self.o.build_dir, 'setup/win32/Makefile.version'), 'w') as f:
f.write("VERSION=%s\n" % self.o.version_full)
with open(join(self.o.build_dir, 'setup/win32/Makefile.python'), 'w') as f:
f.write("PYTHON_VERSION=%s\n" % self.o.vm_winxp_python_version.replace('.', ''))
self.ssh("mkdir -p build")
self.rsync('%s/ %[email protected]:build/server/' % (self.o.build_dir, self.login))
self.ssh("cd build/server/setup/win32;time make allinone;")
self.rsync('%[email protected]:build/server/setup/win32/release/ %s/' % (self.login, self.o.build_dir), '')
print "KVMWinBuildExe.run(): done"
class KVMWinTestExe(KVM):
def run(self):
# Cannot use o.version_full when the version is not correctly parsed
# (for instance, containing *rc* or *dev*)
setuppath = glob("%s/openerp-server-setup-*.exe" % self.o.build_dir)[0]
setupfile = setuppath.split('/')[-1]
setupversion = setupfile.split('openerp-server-setup-')[1].split('.exe')[0]
self.rsync('"%s" %[email protected]:' % (setuppath, self.login))
self.ssh("TEMP=/tmp ./%s /S" % setupfile)
self.ssh('PGPASSWORD=openpgpwd /cygdrive/c/"Program Files"/"Odoo %s"/PostgreSQL/bin/createdb.exe -e -U openpg mycompany' % setupversion)
self.ssh('/cygdrive/c/"Program Files"/"Odoo %s"/server/openerp-server.exe -d mycompany -i base --stop-after-init' % setupversion)
self.ssh('net start odoo-server-8.0')
_rpc_count_modules(port=18069)
#----------------------------------------------------------
# Stage: building
#----------------------------------------------------------
def _prepare_build_dir(o, win32=False):
cmd = ['rsync', '-a', '--exclude', '.git', '--exclude', '*.pyc', '--exclude', '*.pyo']
if not win32:
cmd += ['--exclude', 'setup/win32']
system(cmd + ['%s/' % o.odoo_dir, o.build_dir])
try:
for addon_path in glob(join(o.build_dir, 'addons/*')):
if addon_path.split(os.path.sep)[-1] not in ADDONS_NOT_TO_PUBLISH:
shutil.move(addon_path, join(o.build_dir, 'openerp/addons'))
except shutil.Error:
# Thrown when the add-on is already in openerp/addons (if _prepare_build_dir
# has already been called once)
pass
def build_tgz(o):
system(['python2', 'setup.py', 'sdist', '--quiet', '--formats=gztar,zip'], o.build_dir)
system(['mv', glob('%s/dist/odoo-*.tar.gz' % o.build_dir)[0], '%s/odoo_%s.%s.tar.gz' % (o.build_dir, version, timestamp)])
system(['mv', glob('%s/dist/odoo-*.zip' % o.build_dir)[0], '%s/odoo_%s.%s.zip' % (o.build_dir, version, timestamp)])
def build_deb(o):
# Append timestamp to version for the .dsc to refer the right .tar.gz
cmd=['sed', '-i', '1s/^.*$/odoo (%s.%s) stable; urgency=low/'%(version,timestamp), 'debian/changelog']
subprocess.call(cmd, cwd=o.build_dir)
deb = pexpect.spawn('dpkg-buildpackage -rfakeroot -k%s' % GPGID, cwd=o.build_dir)
deb.logfile = stdout
if GPGPASSPHRASE:
deb.expect_exact('Enter passphrase: ', timeout=1200)
deb.send(GPGPASSPHRASE + '\r\n')
deb.expect_exact('Enter passphrase: ')
deb.send(GPGPASSPHRASE + '\r\n')
deb.expect(pexpect.EOF, timeout=1200)
system(['mv', glob('%s/../odoo_*.deb' % o.build_dir)[0], '%s' % o.build_dir])
system(['mv', glob('%s/../odoo_*.dsc' % o.build_dir)[0], '%s' % o.build_dir])
system(['mv', glob('%s/../odoo_*_amd64.changes' % o.build_dir)[0], '%s' % o.build_dir])
system(['mv', glob('%s/../odoo_*.tar.gz' % o.build_dir)[0], '%s' % o.build_dir])
def build_rpm(o):
system(['python2', 'setup.py', '--quiet', 'bdist_rpm'], o.build_dir)
system(['mv', glob('%s/dist/odoo-*.noarch.rpm' % o.build_dir)[0], '%s/odoo_%s.%s.noarch.rpm' % (o.build_dir, version, timestamp)])
def build_exe(o):
KVMWinBuildExe(o, o.vm_winxp_image, o.vm_winxp_ssh_key, o.vm_winxp_login).start()
system(['cp', glob('%s/openerp*.exe' % o.build_dir)[0], '%s/odoo_%s.%s.exe' % (o.build_dir, version, timestamp)])
#----------------------------------------------------------
# Stage: testing
#----------------------------------------------------------
def _prepare_testing(o):
if not o.no_tarball:
subprocess.call(["mkdir", "docker_src"], cwd=o.build_dir)
subprocess.call(["cp", "package.dfsrc", os.path.join(o.build_dir, "docker_src", "Dockerfile")],
cwd=os.path.join(o.odoo_dir, "setup"))
# Use rsync to copy requirements.txt in order to keep original permissions
subprocess.call(["rsync", "-a", "requirements.txt", os.path.join(o.build_dir, "docker_src")],
cwd=os.path.join(o.odoo_dir))
subprocess.call(["docker", "build", "-t", "odoo-%s-src-nightly-tests" % version, "."],
cwd=os.path.join(o.build_dir, "docker_src"))
if not o.no_debian:
subprocess.call(["mkdir", "docker_debian"], cwd=o.build_dir)
subprocess.call(["cp", "package.dfdebian", os.path.join(o.build_dir, "docker_debian", "Dockerfile")],
cwd=os.path.join(o.odoo_dir, "setup"))
# Use rsync to copy requirements.txt in order to keep original permissions
subprocess.call(["rsync", "-a", "requirements.txt", os.path.join(o.build_dir, "docker_debian")],
cwd=os.path.join(o.odoo_dir))
subprocess.call(["docker", "build", "-t", "odoo-%s-debian-nightly-tests" % version, "."],
cwd=os.path.join(o.build_dir, "docker_debian"))
if not o.no_rpm:
subprocess.call(["mkdir", "docker_centos"], cwd=o.build_dir)
subprocess.call(["cp", "package.dfcentos", os.path.join(o.build_dir, "docker_centos", "Dockerfile")],
cwd=os.path.join(o.odoo_dir, "setup"))
subprocess.call(["docker", "build", "-t", "odoo-%s-centos-nightly-tests" % version, "."],
cwd=os.path.join(o.build_dir, "docker_centos"))
def test_tgz(o):
with docker('odoo-%s-src-nightly-tests' % version, o.build_dir, o.pub) as wheezy:
wheezy.release = '*.tar.gz'
wheezy.system("service postgresql start")
wheezy.system('pip install /opt/release/%s' % wheezy.release)
wheezy.system("useradd --system --no-create-home odoo")
wheezy.system('su postgres -s /bin/bash -c "createuser -s odoo"')
wheezy.system('su postgres -s /bin/bash -c "createdb mycompany"')
wheezy.system('mkdir /var/lib/odoo')
wheezy.system('chown odoo:odoo /var/lib/odoo')
wheezy.system('su odoo -s /bin/bash -c "odoo.py --addons-path=/usr/local/lib/python2.7/dist-packages/openerp/addons -d mycompany -i base --stop-after-init"')
wheezy.system('su odoo -s /bin/bash -c "odoo.py --addons-path=/usr/local/lib/python2.7/dist-packages/openerp/addons -d mycompany &"')
def test_deb(o):
with docker('odoo-%s-debian-nightly-tests' % version, o.build_dir, o.pub) as wheezy:
wheezy.release = '*.deb'
wheezy.system("service postgresql start")
wheezy.system('su postgres -s /bin/bash -c "createdb mycompany"')
wheezy.system('/usr/bin/dpkg -i /opt/release/%s' % wheezy.release)
wheezy.system('/usr/bin/apt-get install -f -y')
wheezy.system('su odoo -s /bin/bash -c "odoo.py -c /etc/odoo/openerp-server.conf -d mycompany -i base --stop-after-init"')
wheezy.system('su odoo -s /bin/bash -c "odoo.py -c /etc/odoo/openerp-server.conf -d mycompany &"')
def test_rpm(o):
with docker('odoo-%s-centos-nightly-tests' % version, o.build_dir, o.pub) as centos7:
centos7.release = '*.noarch.rpm'
# Start postgresql
centos7.system('su postgres -c "/usr/bin/pg_ctl -D /var/lib/postgres/data start"')
centos7.system('sleep 5')
centos7.system('su postgres -c "createdb mycompany"')
# Odoo install
centos7.system('yum install -d 0 -e 0 /opt/release/%s -y' % centos7.release)
centos7.system('su odoo -s /bin/bash -c "openerp-server -c /etc/odoo/openerp-server.conf -d mycompany -i base --stop-after-init"')
centos7.system('su odoo -s /bin/bash -c "openerp-server -c /etc/odoo/openerp-server.conf -d mycompany &"')
def test_exe(o):
KVMWinTestExe(o, o.vm_winxp_image, o.vm_winxp_ssh_key, o.vm_winxp_login).start()
#---------------------------------------------------------
# Generates Packages, Sources and Release files of debian package
#---------------------------------------------------------
def gen_deb_package(o, published_files):
# Executes command to produce file_name in path, and moves it to o.pub/deb
def _gen_file(o, (command, file_name), path):
cur_tmp_file_path = os.path.join(path, file_name)
with open(cur_tmp_file_path, 'w') as out:
subprocess.call(command, stdout=out, cwd=path)
system(['cp', cur_tmp_file_path, os.path.join(o.pub, 'deb', file_name)])
# Copy files to a temp directory (required because the working directory must contain only the
# files of the last release)
temp_path = tempfile.mkdtemp(suffix='debPackages')
for pub_file_path in published_files:
system(['cp', pub_file_path, temp_path])
commands = [
(['dpkg-scanpackages', '.'], "Packages"), # Generate Packages file
(['dpkg-scansources', '.'], "Sources"), # Generate Sources file
(['apt-ftparchive', 'release', '.'], "Release") # Generate Release file
]
# Generate files
for command in commands:
_gen_file(o, command, temp_path)
# Remove temp directory
shutil.rmtree(temp_path)
# Generate Release.gpg (= signed Release)
# Options -abs: -a (Create ASCII armored output), -b (Make a detach signature), -s (Make a signature)
subprocess.call(['gpg', '--default-key', GPGID, '--passphrase', GPGPASSPHRASE, '--yes', '-abs', '--no-tty', '-o', 'Release.gpg', 'Release'], cwd=os.path.join(o.pub, 'deb'))
#---------------------------------------------------------
# Generates an RPM repo
#---------------------------------------------------------
def gen_rpm_repo(o, file_name):
# Sign the RPM
rpmsign = pexpect.spawn('/bin/bash', ['-c', 'rpm --resign %s' % file_name], cwd=os.path.join(o.pub, 'rpm'))
rpmsign.expect_exact('Enter pass phrase: ')
rpmsign.send(GPGPASSPHRASE + '\r\n')
rpmsign.expect(pexpect.EOF)
# Removes the old repodata
subprocess.call(['rm', '-rf', os.path.join(o.pub, 'rpm', 'repodata')])
# Copy files to a temp directory (required because the working directory must contain only the
# files of the last release)
temp_path = tempfile.mkdtemp(suffix='rpmPackages')
subprocess.call(['cp', file_name, temp_path])
subprocess.call(['createrepo', temp_path]) # creates a repodata folder in temp_path
subprocess.call(['cp', '-r', os.path.join(temp_path, "repodata"), os.path.join(o.pub, 'rpm')])
# Remove temp directory
shutil.rmtree(temp_path)
#----------------------------------------------------------
# Options and Main
#----------------------------------------------------------
def options():
op = optparse.OptionParser()
root = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
build_dir = "%s-%s" % (root, timestamp)
op.add_option("-b", "--build-dir", default=build_dir, help="build directory (%default)", metavar="DIR")
op.add_option("-p", "--pub", default=None, help="pub directory (%default)", metavar="DIR")
op.add_option("", "--no-testing", action="store_true", help="don't test the builded packages")
op.add_option("-v", "--version", default='8.0', help="version (%default)")
op.add_option("", "--no-debian", action="store_true", help="don't build the debian package")
op.add_option("", "--no-rpm", action="store_true", help="don't build the rpm package")
op.add_option("", "--no-tarball", action="store_true", help="don't build the tarball")
op.add_option("", "--no-windows", action="store_true", help="don't build the windows package")
# Windows VM
op.add_option("", "--vm-winxp-image", default='/home/odoo/vm/winxp27/winxp27.vdi', help="%default")
op.add_option("", "--vm-winxp-ssh-key", default='/home/odoo/vm/winxp27/id_rsa', help="%default")
op.add_option("", "--vm-winxp-login", default='Naresh', help="Windows login (%default)")
op.add_option("", "--vm-winxp-python-version", default='2.7', help="Windows Python version installed in the VM (default: %default)")
(o, args) = op.parse_args()
# derive other options
o.odoo_dir = root
o.pkg = join(o.build_dir, 'pkg')
o.version_full = '%s-%s' % (o.version, timestamp)
o.work = join(o.build_dir, 'openerp-%s' % o.version_full)
o.work_addons = join(o.work, 'openerp', 'addons')
return o
def main():
o = options()
_prepare_build_dir(o)
if not o.no_testing:
_prepare_testing(o)
try:
if not o.no_tarball:
build_tgz(o)
try:
if not o.no_testing:
test_tgz(o)
published_files = publish(o, 'tarball', ['tar.gz', 'zip'])
except Exception, e:
print("Won't publish the tgz release.\n Exception: %s" % str(e))
if not o.no_debian:
build_deb(o)
try:
if not o.no_testing:
test_deb(o)
published_files = publish(o, 'debian', ['deb', 'dsc', 'changes', 'tar.gz'])
gen_deb_package(o, published_files)
except Exception, e:
print("Won't publish the deb release.\n Exception: %s" % str(e))
if not o.no_rpm:
build_rpm(o)
try:
if not o.no_testing:
test_rpm(o)
published_files = publish(o, 'redhat', ['noarch.rpm'])
gen_rpm_repo(o, published_files[0])
except Exception, e:
print("Won't publish the rpm release.\n Exception: %s" % str(e))
if not o.no_windows:
_prepare_build_dir(o, win32=True)
build_exe(o)
try:
if not o.no_testing:
test_exe(o)
published_files = publish(o, 'windows', ['exe'])
except Exception, e:
print("Won't publish the exe release.\n Exception: %s" % str(e))
except:
pass
finally:
shutil.rmtree(o.build_dir)
print('Build dir %s removed' % o.build_dir)
if not o.no_testing:
system("docker rm -f `docker ps -a | awk '{print $1 }'` 2>>/dev/null")
print('Remaining dockers removed')
if __name__ == '__main__':
main()
|
agpl-3.0
|
akash1808/nova
|
nova/virt/xenapi/image/vdi_through_dev.py
|
80
|
3667
|
# Copyright 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import contextlib
import os
import tarfile
import eventlet
from eventlet import greenio
from nova.image import glance
from nova import utils
from nova.virt.xenapi import vm_utils
class VdiThroughDevStore(object):
"""Deal with virtual disks by attaching them to the OS domU.
At the moment it supports upload to Glance, and the upload format is a raw
disk inside a tgz.
"""
def upload_image(self, context, session, instance, image_id, vdi_uuids):
command = UploadToGlanceAsRawTgz(
context, session, instance, image_id, vdi_uuids)
return command.upload_image()
def download_image(self, context, session, instance, image_id):
# TODO(matelakat) Move through-dev image download functionality to this
# method.
raise NotImplementedError()
class UploadToGlanceAsRawTgz(object):
def __init__(self, context, session, instance, image_id, vdi_uuids):
self.context = context
self.image_id = image_id
self.session = session
self.vdi_uuids = vdi_uuids
def _get_virtual_size(self):
return self.session.call_xenapi(
'VDI.get_virtual_size', self._get_vdi_ref())
def _get_vdi_ref(self):
return self.session.call_xenapi('VDI.get_by_uuid', self.vdi_uuids[0])
def _perform_upload(self, devpath):
readfile, writefile = self._create_pipe()
size = self._get_virtual_size()
producer = TarGzProducer(devpath, writefile, size, 'disk.raw')
consumer = glance.UpdateGlanceImage(
self.context, self.image_id, producer.get_metadata(), readfile)
pool = eventlet.GreenPool()
pool.spawn(producer.start)
pool.spawn(consumer.start)
pool.waitall()
def _create_pipe(self):
rpipe, wpipe = os.pipe()
rfile = greenio.GreenPipe(rpipe, 'rb', 0)
wfile = greenio.GreenPipe(wpipe, 'wb', 0)
return rfile, wfile
def upload_image(self):
vdi_ref = self._get_vdi_ref()
with vm_utils.vdi_attached_here(self.session, vdi_ref,
read_only=True) as dev:
devpath = utils.make_dev_path(dev)
with utils.temporary_chown(devpath):
self._perform_upload(devpath)
class TarGzProducer(object):
def __init__(self, devpath, writefile, size, fname):
self.fpath = devpath
self.output = writefile
self.size = size
self.fname = fname
def get_metadata(self):
return {
'disk_format': 'raw',
'container_format': 'tgz'
}
def start(self):
with contextlib.closing(self.output):
tinfo = tarfile.TarInfo(name=self.fname)
tinfo.size = int(self.size)
with tarfile.open(fileobj=self.output, mode='w|gz') as tfile:
with self._open_file(self.fpath, 'rb') as input_file:
tfile.addfile(tinfo, fileobj=input_file)
def _open_file(self, *args):
return open(*args)
|
apache-2.0
|
GetSomeBlocks/Score_Soccer
|
resources/src/mythbox/ui/uisettings.py
|
5
|
13970
|
#
# MythBox for XBMC - http://mythbox.googlecode.com
# Copyright (C) 2011 [email protected]
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
import logging
import os
import xbmcgui
import mythbox.msg as m
from mythbox.settings import MythSettings, SettingsException
from mythbox.ui.toolkit import window_busy, BaseWindow, enterNumeric, enterText, Action
from mythbox.util import catchall, timed, safe_str
from mythbox.advanced import AdvancedSettings
log = logging.getLogger('mythbox.ui')
class SettingValidator(object):
def __init__(self, errorMessage):
self.errorMessage = errorMessage
def validate(self, value):
"""
@raise SettingsException: On validation failure
"""
raise SettingsException("%s : %s" % (self.errorMessage, value))
def isValid(self, value):
try:
self.validate(value)
return True
except:
return False
class ExternalizedSettingValidator(SettingValidator):
def __init__(self, validatorMethod, arg1=None):
self.validatorMethod = validatorMethod
self.arg1 = arg1
def validate(self, value):
"""
@raise SettingsException: On validation failure
"""
try:
if not self.arg1:
self.validatorMethod(value)
else:
self.validatorMethod(value, self.arg1)
except Exception, ex:
raise SettingsException(str(ex))
class Setting(object):
"""
Binds MythSettings, validation mechanism, ui rendering, and xbmc controls together
to simplify input, update, validation, and ui presentation of settings.
@todo Convert to use window properies instead of widget.get()/set()
"""
def __init__(self, store, key, type, validator, widget):
"""
@param store: MythSettings backing store for settings that gets persisted
@param key: string index into MythSettings get(...) set(...) methods
@param type: class of preferred native type (int, bool, str). Used to determine input method: numeric or string
@param validator: Validator class that encapsulates validation run on entry by user.
If not valid, useful error message should be thrown
@param widget: xbmc.Control* type to set value for presentation - ControlButton, ControlCheckBox
"""
self.store = store
self.key = key
self.type = type
self.widget = widget
if validator is None:
self.validator = None
else:
self.validator = validator.validate
def readInput(self):
ok = False
if self.type == str:
ok, value = enterText(control=self.widget, validator=self.validator)
elif self.type in (int, Seconds,):
ok, value = enterNumeric(control=self.widget, validator=self.validator, current=self.store.get(self.key))
elif self.type == NegativeSeconds:
ok, value = enterNumeric(control=self.widget, validator=self.validator, current= str(int(self.store.get(self.key)) * -1))
if value != '0':
value = '-' + value
elif self.type == bool and type(self.widget) == xbmcgui.ControlRadioButton:
ok, value = True, ['False', 'True'][self.widget.isSelected()]
else:
log.warn('readinput() not activated for type %s and widget %s' % (self.type, type(self.widget)))
if ok:
self.store.put(self.key, value)
self.render() # re-render since enterNumeric(...) doesn't handle special cases like Seconds
def render(self):
value = self.store.get(self.key)
if type(self.widget) == xbmcgui.ControlButton:
if self.type == str:
self.widget.setLabel(label=self.widget.getLabel(), label2=value)
elif self.type == int:
self.widget.setLabel(label=self.widget.getLabel(), label2=str(value))
elif self.type == Seconds:
self.widget.setLabel(label=self.widget.getLabel(), label2='%s seconds' % value)
elif self.type == NegativeSeconds:
self.widget.setLabel(label=self.widget.getLabel(), label2='%s seconds' % str(int(value) * -1))
else:
raise Exception('Dont know how to handle type %s in render()' % self.type)
elif type(self.widget) == xbmcgui.ControlRadioButton:
if self.type == bool:
self.widget.setSelected(self.store.get(self.key) in ('True', 'true', '1'))
else:
raise Exception('Dont know how to handle type %s in render()' % self.type)
else:
raise Exception('Unknown widget in render(): %s' % type(self.widget))
class Seconds(object):
def __init__(self, min, max):
self.min = min
self.max = max
def validate(self, value):
try:
s = int(value)
if s < min or s > max:
raise SettingsException('out of bounds')
except Exception, e:
raise SettingsException(e.message)
class NegativeSeconds(object):
pass
class SettingsWindow(BaseWindow):
def __init__(self, *args, **kwargs):
BaseWindow.__init__(self, *args, **kwargs)
[setattr(self,k,v) for k,v in kwargs.iteritems() if k in ('settings','translator','platform','fanArt','cachesByName',)]
self.settingsMap = {} # key = controlId, value = Setting
self.t = self.translator.get
self.advanced = AdvancedSettings(platform=self.platform)
log.debug('Advanced settings:\n %s' % self.advanced)
def register(self, setting):
self.settingsMap[setting.widget.getId()] = setting
@timed
def onInit(self):
if not self.win:
log.debug('onInit')
self.win = xbmcgui.Window(xbmcgui.getCurrentWindowId())
# Nav Buttons
self.testSettingsButton = self.getControl(253)
self.clearCacheButton = self.getControl(405)
self.streamingEnabledRadioButton = self.getControl(208)
self.recordingsButton = self.getControl(205)
# MythTV Settings
if hasattr(self.settings, 'master') and self.settings.master:
self.setWindowProperty('MasterBackendHostname', '%s / %s' % (self.settings.master.hostname, self.settings.master.ipAddress))
self.setWindowProperty('MasterBackendPort', str(self.settings.master.port))
self.register(Setting(self.settings, 'streaming_enabled', bool, None, self.getControl(208)))
self.register(Setting(self.settings, 'paths_recordedprefix', str, ExternalizedSettingValidator(MythSettings.verifyRecordingDirs), self.getControl(205)))
self.register(Setting(self.settings, 'confirm_on_delete', bool, None, self.getControl(206)))
self.register(Setting(self.settings, 'aggressive_caching', bool, None, self.getControl(207)))
# MySQL Settings
self.register(Setting(self.settings, 'mysql_host', str, ExternalizedSettingValidator(MythSettings.verifyMySQLHost), self.getControl(301)))
self.register(Setting(self.settings, 'mysql_port', int, ExternalizedSettingValidator(MythSettings.verifyMySQLPort), self.getControl(302)))
self.register(Setting(self.settings, 'mysql_database', str, ExternalizedSettingValidator(MythSettings.verifyMySQLDatabase), self.getControl(303)))
self.register(Setting(self.settings, 'mysql_user', str, ExternalizedSettingValidator(MythSettings.verifyMySQLUser), self.getControl(304)))
self.register(Setting(self.settings, 'mysql_password', str, None, self.getControl(305)))
# Fanart Settings
self.register(Setting(self.settings, 'fanart_tvdb', bool, None, self.getControl(401)))
self.register(Setting(self.settings, 'fanart_tvrage', bool, None, self.getControl(406)))
self.register(Setting(self.settings, 'fanart_tmdb', bool, None, self.getControl(402)))
self.register(Setting(self.settings, 'fanart_imdb', bool, None, self.getControl(403)))
self.register(Setting(self.settings, 'fanart_google', bool, None, self.getControl(404)))
# Advanced Settings
self.register(Setting(self.settings, 'logging_enabled', bool, None, self.getControl(502)))
self.register(Setting(self.settings, 'feeds_twitter', str, None, self.getControl(503)))
self.setWindowProperty('debugLogLocation', self.translator.get(m.DEBUG_LOG_LOCATION) % self.platform.getDebugLog())
# Playback settings
self.advanced.get = self.advanced.getSetting
self.advanced.put = self.advanced.setSetting
self.register(Setting(self.advanced, 'video/timeseekforward', Seconds, None, self.getControl(602)))
self.register(Setting(self.advanced, 'video/timeseekbackward', NegativeSeconds, None, self.getControl(603)))
self.register(Setting(self.advanced, 'video/timeseekforwardbig', Seconds, None, self.getControl(604)))
self.register(Setting(self.advanced, 'video/timeseekbackwardbig', NegativeSeconds, None, self.getControl(605)))
self.render()
@catchall
@window_busy
def onClick(self, controlId):
log.debug('onClick %s ' % controlId)
source = self.getControl(controlId)
mappedSetting = self.settingsMap.get(controlId)
if mappedSetting:
mappedSetting.readInput()
if mappedSetting.store == self.advanced:
self.advanced.put('video/usetimeseeking', 'true') # required for seek values to take effect
self.advanced.save()
log.debug(self.advanced)
else:
if self.streamingEnabledRadioButton == source:
self.renderStreaming()
self.settings.save()
elif self.testSettingsButton == source: self.testSettings()
elif self.clearCacheButton == source: self.clearCache()
else: log.debug("nothing done onClick")
log.debug('=================================\n%s' % self.settings)
def onFocus(self, controlId):
pass
@catchall
def onAction(self, action):
if action.getId() in (Action.PREVIOUS_MENU, Action.PARENT_DIR):
self.close()
def renderStreaming(self):
# special mutual exclusion for handling of streaming enabled
self.recordingsButton.setEnabled(not self.streamingEnabledRadioButton.isSelected())
@window_busy
def render(self):
for setting in self.settingsMap.values():
log.debug('Rendering %s' % safe_str(setting.key))
setting.render()
self.renderStreaming()
import default
about = "[B]%s[/B]\n\n%s\n\n%s\n\n%s\n\n\n\nMythBox would not be possible without the\nfollowing opensource software and services" % (default.__scriptname__, default.__author__, default.__url__, self.platform.addonVersion())
opensource = """
[B]Software[/B]
BiDict
BeautifulSoup
Decorator
Eclipse
ElementTree
FeedParser
GNU/Linux
HTMLTestRunner
IMDBPy
Mockito
MythTV
MySQL-Connector-Python
ODict
PyDev for Eclipse
Python
Python-Twitter
SimpleJSON
TheMovieDb Python API
TVDB Python API
TVRage Python API
Twisted
XBMC
[B]Services[/B]
Google Image Search
Google Code Project Hosting
Internet Movie Database
The Movie Database
TVDB
TVRage
Twitter
"""
self.setWindowProperty('AboutText', about)
self.setWindowProperty('OpensourceText', opensource)
self.setWindowProperty('ReadmeText', '%s\n%s' % (
open(os.path.join(self.platform.getScriptDir(), 'README'), 'r').read(),
open(os.path.join(self.platform.getScriptDir(), 'FAQ'), 'r').read()))
@window_busy
def testSettings(self):
try:
self.settings.verify()
self.setWindowProperty('MasterBackendHostname', '%s / %s' % (self.settings.master.hostname, self.settings.master.ipAddress))
self.setWindowProperty('MasterBackendPort', str(self.settings.master.port))
xbmcgui.Dialog().ok(self.t(m.INFO), u'', self.t(m.SETTINGS_OK))
except SettingsException, ex:
self.settings.master = None
self.setWindowProperty('MasterBackendHostname', '')
self.setWindowProperty('MasterBackendPort', '')
xbmcgui.Dialog().ok(self.t(m.ERROR), u'', str(ex))
@window_busy
def clearCache(self):
for fileCache in self.cachesByName.values():
fileCache.clear()
self.fanArt.clear()
xbmcgui.Dialog().ok(self.t(m.INFO), u'', self.t(m.CACHES_CLEARED))
|
mit
|
connormanning/arbiter
|
third/gtest-1.7.0/test/gtest_list_tests_unittest.py
|
1898
|
6515
|
#!/usr/bin/env python
#
# Copyright 2006, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Unit test for Google Test's --gtest_list_tests flag.
A user can ask Google Test to list all tests by specifying the
--gtest_list_tests flag. This script tests such functionality
by invoking gtest_list_tests_unittest_ (a program written with
Google Test) the command line flags.
"""
__author__ = '[email protected] (Patrick Hanna)'
import gtest_test_utils
import re
# Constants.
# The command line flag for enabling/disabling listing all tests.
LIST_TESTS_FLAG = 'gtest_list_tests'
# Path to the gtest_list_tests_unittest_ program.
EXE_PATH = gtest_test_utils.GetTestExecutablePath('gtest_list_tests_unittest_')
# The expected output when running gtest_list_tests_unittest_ with
# --gtest_list_tests
EXPECTED_OUTPUT_NO_FILTER_RE = re.compile(r"""FooDeathTest\.
Test1
Foo\.
Bar1
Bar2
DISABLED_Bar3
Abc\.
Xyz
Def
FooBar\.
Baz
FooTest\.
Test1
DISABLED_Test2
Test3
TypedTest/0\. # TypeParam = (VeryLo{245}|class VeryLo{239})\.\.\.
TestA
TestB
TypedTest/1\. # TypeParam = int\s*\*
TestA
TestB
TypedTest/2\. # TypeParam = .*MyArray<bool,\s*42>
TestA
TestB
My/TypeParamTest/0\. # TypeParam = (VeryLo{245}|class VeryLo{239})\.\.\.
TestA
TestB
My/TypeParamTest/1\. # TypeParam = int\s*\*
TestA
TestB
My/TypeParamTest/2\. # TypeParam = .*MyArray<bool,\s*42>
TestA
TestB
MyInstantiation/ValueParamTest\.
TestA/0 # GetParam\(\) = one line
TestA/1 # GetParam\(\) = two\\nlines
TestA/2 # GetParam\(\) = a very\\nlo{241}\.\.\.
TestB/0 # GetParam\(\) = one line
TestB/1 # GetParam\(\) = two\\nlines
TestB/2 # GetParam\(\) = a very\\nlo{241}\.\.\.
""")
# The expected output when running gtest_list_tests_unittest_ with
# --gtest_list_tests and --gtest_filter=Foo*.
EXPECTED_OUTPUT_FILTER_FOO_RE = re.compile(r"""FooDeathTest\.
Test1
Foo\.
Bar1
Bar2
DISABLED_Bar3
FooBar\.
Baz
FooTest\.
Test1
DISABLED_Test2
Test3
""")
# Utilities.
def Run(args):
"""Runs gtest_list_tests_unittest_ and returns the list of tests printed."""
return gtest_test_utils.Subprocess([EXE_PATH] + args,
capture_stderr=False).output
# The unit test.
class GTestListTestsUnitTest(gtest_test_utils.TestCase):
"""Tests using the --gtest_list_tests flag to list all tests."""
def RunAndVerify(self, flag_value, expected_output_re, other_flag):
"""Runs gtest_list_tests_unittest_ and verifies that it prints
the correct tests.
Args:
flag_value: value of the --gtest_list_tests flag;
None if the flag should not be present.
expected_output_re: regular expression that matches the expected
output after running command;
other_flag: a different flag to be passed to command
along with gtest_list_tests;
None if the flag should not be present.
"""
if flag_value is None:
flag = ''
flag_expression = 'not set'
elif flag_value == '0':
flag = '--%s=0' % LIST_TESTS_FLAG
flag_expression = '0'
else:
flag = '--%s' % LIST_TESTS_FLAG
flag_expression = '1'
args = [flag]
if other_flag is not None:
args += [other_flag]
output = Run(args)
if expected_output_re:
self.assert_(
expected_output_re.match(output),
('when %s is %s, the output of "%s" is "%s",\n'
'which does not match regex "%s"' %
(LIST_TESTS_FLAG, flag_expression, ' '.join(args), output,
expected_output_re.pattern)))
else:
self.assert_(
not EXPECTED_OUTPUT_NO_FILTER_RE.match(output),
('when %s is %s, the output of "%s" is "%s"'%
(LIST_TESTS_FLAG, flag_expression, ' '.join(args), output)))
def testDefaultBehavior(self):
"""Tests the behavior of the default mode."""
self.RunAndVerify(flag_value=None,
expected_output_re=None,
other_flag=None)
def testFlag(self):
"""Tests using the --gtest_list_tests flag."""
self.RunAndVerify(flag_value='0',
expected_output_re=None,
other_flag=None)
self.RunAndVerify(flag_value='1',
expected_output_re=EXPECTED_OUTPUT_NO_FILTER_RE,
other_flag=None)
def testOverrideNonFilterFlags(self):
"""Tests that --gtest_list_tests overrides the non-filter flags."""
self.RunAndVerify(flag_value='1',
expected_output_re=EXPECTED_OUTPUT_NO_FILTER_RE,
other_flag='--gtest_break_on_failure')
def testWithFilterFlags(self):
"""Tests that --gtest_list_tests takes into account the
--gtest_filter flag."""
self.RunAndVerify(flag_value='1',
expected_output_re=EXPECTED_OUTPUT_FILTER_FOO_RE,
other_flag='--gtest_filter=Foo*')
if __name__ == '__main__':
gtest_test_utils.Main()
|
mit
|
Simran-B/arangodb
|
3rdParty/V8-4.3.61/third_party/python_26/Lib/test/test_htmlparser.py
|
56
|
10661
|
"""Tests for HTMLParser.py."""
import HTMLParser
import pprint
import unittest
from test import test_support
class EventCollector(HTMLParser.HTMLParser):
def __init__(self):
self.events = []
self.append = self.events.append
HTMLParser.HTMLParser.__init__(self)
def get_events(self):
# Normalize the list of events so that buffer artefacts don't
# separate runs of contiguous characters.
L = []
prevtype = None
for event in self.events:
type = event[0]
if type == prevtype == "data":
L[-1] = ("data", L[-1][1] + event[1])
else:
L.append(event)
prevtype = type
self.events = L
return L
# structure markup
def handle_starttag(self, tag, attrs):
self.append(("starttag", tag, attrs))
def handle_startendtag(self, tag, attrs):
self.append(("startendtag", tag, attrs))
def handle_endtag(self, tag):
self.append(("endtag", tag))
# all other markup
def handle_comment(self, data):
self.append(("comment", data))
def handle_charref(self, data):
self.append(("charref", data))
def handle_data(self, data):
self.append(("data", data))
def handle_decl(self, data):
self.append(("decl", data))
def handle_entityref(self, data):
self.append(("entityref", data))
def handle_pi(self, data):
self.append(("pi", data))
def unknown_decl(self, decl):
self.append(("unknown decl", decl))
class EventCollectorExtra(EventCollector):
def handle_starttag(self, tag, attrs):
EventCollector.handle_starttag(self, tag, attrs)
self.append(("starttag_text", self.get_starttag_text()))
class TestCaseBase(unittest.TestCase):
def _run_check(self, source, expected_events, collector=EventCollector):
parser = collector()
for s in source:
parser.feed(s)
parser.close()
events = parser.get_events()
if events != expected_events:
self.fail("received events did not match expected events\n"
"Expected:\n" + pprint.pformat(expected_events) +
"\nReceived:\n" + pprint.pformat(events))
def _run_check_extra(self, source, events):
self._run_check(source, events, EventCollectorExtra)
def _parse_error(self, source):
def parse(source=source):
parser = HTMLParser.HTMLParser()
parser.feed(source)
parser.close()
self.assertRaises(HTMLParser.HTMLParseError, parse)
class HTMLParserTestCase(TestCaseBase):
def test_processing_instruction_only(self):
self._run_check("<?processing instruction>", [
("pi", "processing instruction"),
])
self._run_check("<?processing instruction ?>", [
("pi", "processing instruction ?"),
])
def test_simple_html(self):
self._run_check("""
<!DOCTYPE html PUBLIC 'foo'>
<HTML>&entity; 
<!--comment1a
-></foo><bar><<?pi?></foo<bar
comment1b-->
<Img sRc='Bar' isMAP>sample
text
“
<!--comment2a-- --comment2b--><!>
</Html>
""", [
("data", "\n"),
("decl", "DOCTYPE html PUBLIC 'foo'"),
("data", "\n"),
("starttag", "html", []),
("entityref", "entity"),
("charref", "32"),
("data", "\n"),
("comment", "comment1a\n-></foo><bar><<?pi?></foo<bar\ncomment1b"),
("data", "\n"),
("starttag", "img", [("src", "Bar"), ("ismap", None)]),
("data", "sample\ntext\n"),
("charref", "x201C"),
("data", "\n"),
("comment", "comment2a-- --comment2b"),
("data", "\n"),
("endtag", "html"),
("data", "\n"),
])
def test_unclosed_entityref(self):
self._run_check("&entityref foo", [
("entityref", "entityref"),
("data", " foo"),
])
def test_doctype_decl(self):
inside = """\
DOCTYPE html [
<!ELEMENT html - O EMPTY>
<!ATTLIST html
version CDATA #IMPLIED
profile CDATA 'DublinCore'>
<!NOTATION datatype SYSTEM 'http://xml.python.org/notations/python-module'>
<!ENTITY myEntity 'internal parsed entity'>
<!ENTITY anEntity SYSTEM 'http://xml.python.org/entities/something.xml'>
<!ENTITY % paramEntity 'name|name|name'>
%paramEntity;
<!-- comment -->
]"""
self._run_check("<!%s>" % inside, [
("decl", inside),
])
def test_bad_nesting(self):
# Strangely, this *is* supposed to test that overlapping
# elements are allowed. HTMLParser is more geared toward
# lexing the input that parsing the structure.
self._run_check("<a><b></a></b>", [
("starttag", "a", []),
("starttag", "b", []),
("endtag", "a"),
("endtag", "b"),
])
def test_bare_ampersands(self):
self._run_check("this text & contains & ampersands &", [
("data", "this text & contains & ampersands &"),
])
def test_bare_pointy_brackets(self):
self._run_check("this < text > contains < bare>pointy< brackets", [
("data", "this < text > contains < bare>pointy< brackets"),
])
def test_attr_syntax(self):
output = [
("starttag", "a", [("b", "v"), ("c", "v"), ("d", "v"), ("e", None)])
]
self._run_check("""<a b='v' c="v" d=v e>""", output)
self._run_check("""<a b = 'v' c = "v" d = v e>""", output)
self._run_check("""<a\nb\n=\n'v'\nc\n=\n"v"\nd\n=\nv\ne>""", output)
self._run_check("""<a\tb\t=\t'v'\tc\t=\t"v"\td\t=\tv\te>""", output)
def test_attr_values(self):
self._run_check("""<a b='xxx\n\txxx' c="yyy\t\nyyy" d='\txyz\n'>""",
[("starttag", "a", [("b", "xxx\n\txxx"),
("c", "yyy\t\nyyy"),
("d", "\txyz\n")])
])
self._run_check("""<a b='' c="">""", [
("starttag", "a", [("b", ""), ("c", "")]),
])
# Regression test for SF patch #669683.
self._run_check("<e a=rgb(1,2,3)>", [
("starttag", "e", [("a", "rgb(1,2,3)")]),
])
# Regression test for SF bug #921657.
self._run_check("<a href=mailto:[email protected]>", [
("starttag", "a", [("href", "mailto:[email protected]")]),
])
def test_attr_entity_replacement(self):
self._run_check("""<a b='&><"''>""", [
("starttag", "a", [("b", "&><\"'")]),
])
def test_attr_funky_names(self):
self._run_check("""<a a.b='v' c:d=v e-f=v>""", [
("starttag", "a", [("a.b", "v"), ("c:d", "v"), ("e-f", "v")]),
])
def test_illegal_declarations(self):
self._parse_error('<!spacer type="block" height="25">')
def test_starttag_end_boundary(self):
self._run_check("""<a b='<'>""", [("starttag", "a", [("b", "<")])])
self._run_check("""<a b='>'>""", [("starttag", "a", [("b", ">")])])
def test_buffer_artefacts(self):
output = [("starttag", "a", [("b", "<")])]
self._run_check(["<a b='<'>"], output)
self._run_check(["<a ", "b='<'>"], output)
self._run_check(["<a b", "='<'>"], output)
self._run_check(["<a b=", "'<'>"], output)
self._run_check(["<a b='<", "'>"], output)
self._run_check(["<a b='<'", ">"], output)
output = [("starttag", "a", [("b", ">")])]
self._run_check(["<a b='>'>"], output)
self._run_check(["<a ", "b='>'>"], output)
self._run_check(["<a b", "='>'>"], output)
self._run_check(["<a b=", "'>'>"], output)
self._run_check(["<a b='>", "'>"], output)
self._run_check(["<a b='>'", ">"], output)
output = [("comment", "abc")]
self._run_check(["", "<!--abc-->"], output)
self._run_check(["<", "!--abc-->"], output)
self._run_check(["<!", "--abc-->"], output)
self._run_check(["<!-", "-abc-->"], output)
self._run_check(["<!--", "abc-->"], output)
self._run_check(["<!--a", "bc-->"], output)
self._run_check(["<!--ab", "c-->"], output)
self._run_check(["<!--abc", "-->"], output)
self._run_check(["<!--abc-", "->"], output)
self._run_check(["<!--abc--", ">"], output)
self._run_check(["<!--abc-->", ""], output)
def test_starttag_junk_chars(self):
self._parse_error("</>")
self._parse_error("</$>")
self._parse_error("</")
self._parse_error("</a")
self._parse_error("<a<a>")
self._parse_error("</a<a>")
self._parse_error("<!")
self._parse_error("<a $>")
self._parse_error("<a")
self._parse_error("<a foo='bar'")
self._parse_error("<a foo='bar")
self._parse_error("<a foo='>'")
self._parse_error("<a foo='>")
self._parse_error("<a foo=>")
def test_declaration_junk_chars(self):
self._parse_error("<!DOCTYPE foo $ >")
def test_startendtag(self):
self._run_check("<p/>", [
("startendtag", "p", []),
])
self._run_check("<p></p>", [
("starttag", "p", []),
("endtag", "p"),
])
self._run_check("<p><img src='foo' /></p>", [
("starttag", "p", []),
("startendtag", "img", [("src", "foo")]),
("endtag", "p"),
])
def test_get_starttag_text(self):
s = """<foo:bar \n one="1"\ttwo=2 >"""
self._run_check_extra(s, [
("starttag", "foo:bar", [("one", "1"), ("two", "2")]),
("starttag_text", s)])
def test_cdata_content(self):
s = """<script> <!-- not a comment --> ¬-an-entity-ref; </script>"""
self._run_check(s, [
("starttag", "script", []),
("data", " <!-- not a comment --> ¬-an-entity-ref; "),
("endtag", "script"),
])
s = """<script> <not a='start tag'> </script>"""
self._run_check(s, [
("starttag", "script", []),
("data", " <not a='start tag'> "),
("endtag", "script"),
])
def test_entityrefs_in_attributes(self):
self._run_check("<html foo='€&aa&unsupported;'>", [
("starttag", "html", [("foo", u"\u20AC&aa&unsupported;")])
])
def test_main():
test_support.run_unittest(HTMLParserTestCase)
if __name__ == "__main__":
test_main()
|
apache-2.0
|
sietse/pyietflib
|
pyietflib/rfc2045/contenttype_iana.py
|
2
|
1838
|
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
"""IANA registered Content-Type type and subtype values."""
__copyright__ = """Copyright 2011 Lance Finn Helsten ([email protected])"""
from .__meta__ import (__version__, __author__, __license__)
import sys
if sys.version_info < (3, 2):
raise Exception("rfc2045 requires Python 3.2 or higher.")
import locale
import logging
import string
import re
__all__ = ['iana_type', 'iana_subtype', 'iana_default_parameters']
#
# The text file that contains all the IANA registered types and subtypes
# with each pair containing zero or more named lists of parameters.
# - "defaults" is a dictionary of parameters and default values.
# - "mandatory" is a list of parameters that must exist when parsing.
# - "optional" is a list of parameters that may be used with the type/subtype.
#
iana_types = {
'text':{
'plain':{"defaults":[("charset", "us-ascii")]}
},
'image':{
'jpeg':{}
},
'audio':{
'basic':{}
},
'video':{
'mpeg':{}
},
'application':{
'octet-stream':{"defaults":[("type", None), ("padding", '8')]},
'postscript':{}
}
}
def iana_type(t):
"""Is the given type (`t`) a discrete IANA type defined in RFC 2045?"""
return t in iana_types.keys()
def iana_subtype(t, st):
"""Is the given type (`st`) within (`t`) a defined subtype in
RFC 2045?"""
#TODO Build a registry that can contain all the IANA subtypes
return st in iana_types.get(t, {}).keys()
def iana_default_parameters(t, st):
"""Return a dictionary of default parameters for the given type
and subtype."""
t_st = iana_types.get(t, {}).get(st, {}).get("defaults", [])
t_st = dict(t_st)
return t_st
class ContentTypeIANA():
"""This defines a top level IANA Content-Type type value."""
pass
|
apache-2.0
|
daegun/mitro
|
browser-ext/third_party/firefox-addon-sdk/python-lib/cuddlefish/tests/test_preflight.py
|
37
|
5860
|
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
import os, shutil
import simplejson as json
import unittest
import hashlib
import base64
from cuddlefish import preflight
from StringIO import StringIO
class Util(unittest.TestCase):
def get_basedir(self):
return os.path.join(".test_tmp", self.id())
def make_basedir(self):
basedir = self.get_basedir()
if os.path.isdir(basedir):
here = os.path.abspath(os.getcwd())
assert os.path.abspath(basedir).startswith(here) # safety
shutil.rmtree(basedir)
os.makedirs(basedir)
return basedir
def test_base62(self):
for i in range(1000):
h = hashlib.sha1(str(i)).digest()
s1 = base64.b64encode(h, "AB").strip("=")
s2 = base64.b64encode(h).strip("=").replace("+","A").replace("/","B")
self.failUnlessEqual(s1, s2)
def write(self, config):
basedir = self.get_basedir()
fn = os.path.join(basedir, "package.json")
open(fn,"w").write(config)
def read(self):
basedir = self.get_basedir()
fn = os.path.join(basedir, "package.json")
return open(fn,"r").read()
def get_cfg(self):
cfg = json.loads(self.read())
if "name" not in cfg:
# the cfx parser always provides a name, even if package.json
# doesn't contain one
cfg["name"] = "pretend name"
return cfg
def parse(self, keydata):
fields = {}
fieldnames = []
for line in keydata.split("\n"):
if line.strip():
k,v = line.split(":", 1)
k = k.strip() ; v = v.strip()
fields[k] = v
fieldnames.append(k)
return fields, fieldnames
def test_preflight(self):
basedir = self.make_basedir()
fn = os.path.join(basedir, "package.json")
# empty config is not ok: need id (name is automatically supplied)
config_orig = "{}"
self.write(config_orig)
out = StringIO()
cfg = self.get_cfg()
config_was_ok, modified = preflight.preflight_config(cfg, fn,
stderr=out)
self.failUnlessEqual(config_was_ok, False)
self.failUnlessEqual(modified, True)
backup_fn = os.path.join(basedir, "package.json.backup")
config_backup = open(backup_fn,"r").read()
self.failUnlessEqual(config_backup, config_orig)
config = json.loads(self.read())
self.failIf("name" in config)
self.failUnless("id" in config)
self.failUnless(config["id"].startswith("jid1-"), config["id"])
self.failUnlessEqual(out.getvalue().strip(),
"No 'id' in package.json: creating a new ID for you.")
os.unlink(backup_fn)
# just a name? we add the id
config_orig = '{"name": "my-awesome-package"}'
self.write(config_orig)
out = StringIO()
cfg = self.get_cfg()
config_was_ok, modified = preflight.preflight_config(cfg, fn,
stderr=out)
self.failUnlessEqual(config_was_ok, False)
self.failUnlessEqual(modified, True)
backup_fn = os.path.join(basedir, "package.json.backup")
config_backup = open(backup_fn,"r").read()
self.failUnlessEqual(config_backup, config_orig)
config = json.loads(self.read())
self.failUnlessEqual(config["name"], "my-awesome-package")
self.failUnless("id" in config)
self.failUnless(config["id"].startswith("jid1-"), config["id"])
jid = str(config["id"])
self.failUnlessEqual(out.getvalue().strip(),
"No 'id' in package.json: creating a new ID for you.")
os.unlink(backup_fn)
# name and valid id? great! ship it!
config2 = '{"name": "my-awesome-package", "id": "%s"}' % jid
self.write(config2)
out = StringIO()
cfg = self.get_cfg()
config_was_ok, modified = preflight.preflight_config(cfg, fn,
stderr=out)
self.failUnlessEqual(config_was_ok, True)
self.failUnlessEqual(modified, False)
config2a = self.read()
self.failUnlessEqual(config2a, config2)
self.failUnlessEqual(out.getvalue().strip(), "")
# name and anonymous ID? without asking to see its papers, ship it
config3 = '{"name": "my-old-skool-package", "id": "anonid0-deadbeef"}'
self.write(config3)
out = StringIO()
cfg = self.get_cfg()
config_was_ok, modified = preflight.preflight_config(cfg, fn,
stderr=out)
self.failUnlessEqual(config_was_ok, True)
self.failUnlessEqual(modified, False)
config3a = self.read()
self.failUnlessEqual(config3a, config3)
self.failUnlessEqual(out.getvalue().strip(), "")
# name and old-style ID? with nostalgic trepidation, ship it
config4 = '{"name": "my-old-skool-package", "id": "[email protected]"}'
self.write(config4)
out = StringIO()
cfg = self.get_cfg()
config_was_ok, modified = preflight.preflight_config(cfg, fn,
stderr=out)
self.failUnlessEqual(config_was_ok, True)
self.failUnlessEqual(modified, False)
config4a = self.read()
self.failUnlessEqual(config4a, config4)
self.failUnlessEqual(out.getvalue().strip(), "")
if __name__ == '__main__':
unittest.main()
|
gpl-3.0
|
yuewko/neutron
|
neutron/plugins/ml2/drivers/hyperv/mech_hyperv.py
|
27
|
1490
|
# Copyright (c) 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from hyperv.neutron.ml2 import mech_hyperv
from neutron.common import constants
from neutron.extensions import portbindings
from neutron.plugins.ml2.drivers import mech_agent
class HypervMechanismDriver(mech_hyperv.HypervMechanismDriver,
mech_agent.SimpleAgentMechanismDriverBase):
"""Attach to networks using hyperv L2 agent.
The HypervMechanismDriver integrates the ml2 plugin with the
hyperv L2 agent. Port binding with this driver requires the hyperv
agent to be running on the port's host, and that agent to have
connectivity to at least one segment of the port's network.
"""
def __init__(self):
super(HypervMechanismDriver, self).__init__(
constants.AGENT_TYPE_HYPERV,
portbindings.VIF_TYPE_HYPERV,
{portbindings.CAP_PORT_FILTER: False})
|
apache-2.0
|
Knowledge-is-Power/Movie-Platform
|
node_modules/node-gyp/gyp/PRESUBMIT.py
|
1369
|
3662
|
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Top-level presubmit script for GYP.
See http://dev.chromium.org/developers/how-tos/depottools/presubmit-scripts
for more details about the presubmit API built into gcl.
"""
PYLINT_BLACKLIST = [
# TODO: fix me.
# From SCons, not done in google style.
'test/lib/TestCmd.py',
'test/lib/TestCommon.py',
'test/lib/TestGyp.py',
]
PYLINT_DISABLED_WARNINGS = [
# TODO: fix me.
# Many tests include modules they don't use.
'W0611',
# Possible unbalanced tuple unpacking with sequence.
'W0632',
# Attempting to unpack a non-sequence.
'W0633',
# Include order doesn't properly include local files?
'F0401',
# Some use of built-in names.
'W0622',
# Some unused variables.
'W0612',
# Operator not preceded/followed by space.
'C0323',
'C0322',
# Unnecessary semicolon.
'W0301',
# Unused argument.
'W0613',
# String has no effect (docstring in wrong place).
'W0105',
# map/filter on lambda could be replaced by comprehension.
'W0110',
# Use of eval.
'W0123',
# Comma not followed by space.
'C0324',
# Access to a protected member.
'W0212',
# Bad indent.
'W0311',
# Line too long.
'C0301',
# Undefined variable.
'E0602',
# Not exception type specified.
'W0702',
# No member of that name.
'E1101',
# Dangerous default {}.
'W0102',
# Cyclic import.
'R0401',
# Others, too many to sort.
'W0201', 'W0232', 'E1103', 'W0621', 'W0108', 'W0223', 'W0231',
'R0201', 'E0101', 'C0321',
# ************* Module copy
# W0104:427,12:_test.odict.__setitem__: Statement seems to have no effect
'W0104',
]
def CheckChangeOnUpload(input_api, output_api):
report = []
report.extend(input_api.canned_checks.PanProjectChecks(
input_api, output_api))
return report
def CheckChangeOnCommit(input_api, output_api):
report = []
# Accept any year number from 2009 to the current year.
current_year = int(input_api.time.strftime('%Y'))
allowed_years = (str(s) for s in reversed(xrange(2009, current_year + 1)))
years_re = '(' + '|'.join(allowed_years) + ')'
# The (c) is deprecated, but tolerate it until it's removed from all files.
license = (
r'.*? Copyright (\(c\) )?%(year)s Google Inc\. All rights reserved\.\n'
r'.*? Use of this source code is governed by a BSD-style license that '
r'can be\n'
r'.*? found in the LICENSE file\.\n'
) % {
'year': years_re,
}
report.extend(input_api.canned_checks.PanProjectChecks(
input_api, output_api, license_header=license))
report.extend(input_api.canned_checks.CheckTreeIsOpen(
input_api, output_api,
'http://gyp-status.appspot.com/status',
'http://gyp-status.appspot.com/current'))
import os
import sys
old_sys_path = sys.path
try:
sys.path = ['pylib', 'test/lib'] + sys.path
blacklist = PYLINT_BLACKLIST
if sys.platform == 'win32':
blacklist = [os.path.normpath(x).replace('\\', '\\\\')
for x in PYLINT_BLACKLIST]
report.extend(input_api.canned_checks.RunPylint(
input_api,
output_api,
black_list=blacklist,
disabled_warnings=PYLINT_DISABLED_WARNINGS))
finally:
sys.path = old_sys_path
return report
TRYBOTS = [
'linux_try',
'mac_try',
'win_try',
]
def GetPreferredTryMasters(_, change):
return {
'client.gyp': { t: set(['defaulttests']) for t in TRYBOTS },
}
|
mit
|
theflofly/tensorflow
|
tensorflow/python/ops/ragged/ragged_reduce_op_test.py
|
11
|
13754
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for ragged_math_ops.reduce_<AGGREGATE> ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
import numpy as np
from tensorflow.python.eager import context
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops.ragged import ragged_factory_ops
from tensorflow.python.ops.ragged import ragged_math_ops
from tensorflow.python.ops.ragged import ragged_test_util
from tensorflow.python.platform import googletest
_MAX_INT32 = dtypes.int32.max
_MIN_INT32 = dtypes.int32.min
_NAN = np.nan
def mean(*values):
return 1.0 * sum(values) / len(values)
@test_util.run_all_in_graph_and_eager_modes
class RaggedReduceOpsTest(ragged_test_util.RaggedTensorTestCase,
parameterized.TestCase):
@parameterized.parameters(
#=========================================================================
# Docstring examples. RaggedTensor for testing is:
# [[3, 1, 4],
# [1, 5, ],
# [9, ],
# [2, 6 ]]
#=========================================================================
dict(
ragged_reduce_op=ragged_math_ops.reduce_sum,
rt_input=[[3, 1, 4], [1, 5], [9], [2, 6]],
axis=0,
expected=[15, 12, 4] # = [3+1+9+2, 1+5+6, 4]
),
dict(
ragged_reduce_op=ragged_math_ops.reduce_sum,
rt_input=[[3, 1, 4], [1, 5], [9], [2, 6]],
axis=-2,
expected=[15, 12, 4] # = [3+1+9+2, 1+5+6, 4]
),
dict(
ragged_reduce_op=ragged_math_ops.reduce_sum,
rt_input=[[3, 1, 4], [1, 5], [9], [2, 6]],
axis=1,
expected=[8, 6, 9, 8] # = [3+1+4, 1+5, 9, 2+6]
),
dict(
ragged_reduce_op=ragged_math_ops.reduce_sum,
rt_input=[[3, 1, 4], [1, 5], [9], [2, 6]],
axis=-1,
expected=[8, 6, 9, 8] # = [3+1+4, 1+5, 9, 2+6]
),
dict(
ragged_reduce_op=ragged_math_ops.reduce_prod,
rt_input=[[3, 1, 4], [1, 5], [9], [2, 6]],
axis=0,
expected=[54, 30, 4] # = [3*1*9*2, 1*5*6, 4]
),
dict(
ragged_reduce_op=ragged_math_ops.reduce_prod,
rt_input=[[3, 1, 4], [1, 5], [9], [2, 6]],
axis=1,
expected=[12, 5, 9, 12] # = [3*1*4, 1*5, 9, 2*6]
),
dict(
ragged_reduce_op=ragged_math_ops.reduce_min,
rt_input=[[3, 1, 4], [1, 5], [9], [2, 6]],
axis=0,
expected=[1, 1, 4] # = [min(3, 1, 9, 2), min(1, 5, 6), 4]
),
dict(
ragged_reduce_op=ragged_math_ops.reduce_min,
rt_input=[[3, 1, 4], [1, 5], [9], [2, 6]],
axis=1,
expected=[1, 1, 9, 2] # = [min(3, 1, 4), min(1, 5), 9, min(2, 6)]
),
dict(
ragged_reduce_op=ragged_math_ops.reduce_max,
rt_input=[[3, 1, 4], [1, 5], [9], [2, 6]],
axis=0,
expected=[9, 6, 4] # = [max(3, 1, 9, 2), max(1, 5, 6), 4]
),
dict(
ragged_reduce_op=ragged_math_ops.reduce_max,
rt_input=[[3, 1, 4], [1, 5], [9], [2, 6]],
axis=1,
expected=[4, 5, 9, 6] # = [max(3, 1, 4), max(1, 5), 9, max(2, 6)]
),
dict(
ragged_reduce_op=ragged_math_ops.reduce_mean,
rt_input=[[3, 1, 4], [1, 5], [9], [2, 6]],
axis=0,
expected=[3.75, 4, 4] # = [mean(3, 1, 9, 2), mean(1, 5, 6), 4]
),
dict(
ragged_reduce_op=ragged_math_ops.reduce_any,
rt_input=[[True, True], [True, True, False, True], [False, True]],
axis=0,
expected=[True, True, False, True]),
dict(
ragged_reduce_op=ragged_math_ops.reduce_any,
rt_input=[[True, True], [True, True, False, True], [False, True]],
axis=1,
expected=[True, True, True]),
dict(
ragged_reduce_op=ragged_math_ops.reduce_all,
rt_input=[[True, True], [True, True, False, True], [False, True]],
axis=0,
expected=[False, True, False, True]),
dict(
ragged_reduce_op=ragged_math_ops.reduce_all,
rt_input=[[True, True], [True, True, False, True], [False, True]],
axis=1,
expected=[True, False, False]),
#=========================================================================
# Examples with the following RaggedTensor (ragged_rank=1):
# [[0, 1, 2, 3],
# [4 ],
# [ ],
# [5, 6 ],
# [7 ],
# [8, 9 ]]
#=========================================================================
# axis=None
dict(
ragged_reduce_op=ragged_math_ops.reduce_sum,
rt_input=[[0, 1, 2, 3], [4], [], [5, 6], [7], [8, 9]],
axis=None,
expected=0 + 1 + 2 + 3 + 4 + 5 + 6 + 7 + 8 + 9),
dict(
ragged_reduce_op=ragged_math_ops.reduce_prod,
rt_input=[[0, 1, 2, 3], [4], [], [5, 6], [7], [8, 9]],
axis=None,
expected=0 * 1 * 2 * 3 * 4 * 5 * 6 * 7 * 8 * 9),
dict(
ragged_reduce_op=ragged_math_ops.reduce_min,
rt_input=[[0, 1, 2, 3], [4], [], [5, 6], [7], [8, 9]],
axis=None,
expected=min(0, 1, 2, 3, 4, 5, 6, 7, 8, 9)),
dict(
ragged_reduce_op=ragged_math_ops.reduce_max,
rt_input=[[0, 1, 2, 3], [4], [], [5, 6], [7], [8, 9]],
axis=None,
expected=max(0, 1, 2, 3, 4, 5, 6, 7, 8, 9)),
dict(
ragged_reduce_op=ragged_math_ops.reduce_mean,
rt_input=[[0, 1, 2, 3], [4], [], [5, 6], [7], [8, 9]],
axis=None,
expected=mean(0, 1, 2, 3, 4, 5, 6, 7, 8, 9)),
# axis=0
dict(
ragged_reduce_op=ragged_math_ops.reduce_sum,
rt_input=[[0, 1, 2, 3], [4], [], [5, 6], [7], [8, 9]],
axis=0,
expected=[0 + 4 + 5 + 7 + 8, 1 + 6 + 9, 2, 3]),
dict(
ragged_reduce_op=ragged_math_ops.reduce_prod,
rt_input=[[0, 1, 2, 3], [4], [], [5, 6], [7], [8, 9]],
axis=0,
expected=[0 * 4 * 5 * 7 * 8, 1 * 6 * 9, 2, 3]),
dict(
ragged_reduce_op=ragged_math_ops.reduce_min,
rt_input=[[0, 1, 2, 3], [4], [], [5, 6], [7], [8, 9]],
axis=0,
expected=[min(0, 4, 5, 7, 8), min(1, 6, 9), 2, 3]),
dict(
ragged_reduce_op=ragged_math_ops.reduce_max,
rt_input=[[0, 1, 2, 3], [4], [], [5, 6], [7], [8, 9]],
axis=0,
expected=[max(0, 4, 5, 7, 8), max(1, 6, 9), 2, 3]),
dict(
ragged_reduce_op=ragged_math_ops.reduce_mean,
rt_input=[[0, 1, 2, 3], [4], [], [5, 6], [7], [8, 9]],
axis=0,
expected=[mean(0, 4, 5, 7, 8),
mean(1, 6, 9), 2, 3]),
# axis=1
# Note: we don't test mean here because it gives a NaN, and this will
# cause assertEqual to fail (since NaN != NaN). See testMeanNan().
dict(
ragged_reduce_op=ragged_math_ops.reduce_sum,
rt_input=[[0, 1, 2, 3], [4], [], [5, 6], [7], [8, 9]],
axis=1,
expected=[0 + 1 + 2 + 3, 4, 0, 5 + 6, 7, 8 + 9]),
dict(
ragged_reduce_op=ragged_math_ops.reduce_prod,
rt_input=[[0, 1, 2, 3], [4], [], [5, 6], [7], [8, 9]],
axis=1,
expected=[0 * 1 * 2 * 3, 4, 1, 5 * 6, 7, 8 * 9]),
dict(
ragged_reduce_op=ragged_math_ops.reduce_min,
rt_input=[[0, 1, 2, 3], [4], [], [5, 6], [7], [8, 9]],
axis=1,
expected=[min(0, 1, 2, 3), 4, _MAX_INT32,
min(5, 6), 7,
min(8, 9)]),
dict(
ragged_reduce_op=ragged_math_ops.reduce_max,
rt_input=[[0, 1, 2, 3], [4], [], [5, 6], [7], [8, 9]],
axis=1,
expected=[max(0, 1, 2, 3), 4, _MIN_INT32,
max(5, 6), 7,
max(8, 9)]),
#=========================================================================
# Examples with ragged_rank=2:
# [[[1, 2], [ ], [3, 4, 5]],
# [[6, 7], [ ], [8 ]],
# [ ],
# [[9 ] ]]
#=========================================================================
dict(
ragged_reduce_op=ragged_math_ops.reduce_sum,
rt_input=[[[1, 2], [], [3, 4, 5]], [[6, 7], [], [8]], [], [[9]]],
axis=[],
expected=[[[1, 2], [], [3, 4, 5]], [[6, 7], [], [8]], [], [[9]]]),
dict(
ragged_reduce_op=ragged_math_ops.reduce_sum,
rt_input=[[[1, 2], [], [3, 4, 5]], [[6, 7], [], [8]], [], [[9]]],
axis=None,
expected=sum([1, 2, 3, 4, 5, 6, 7, 8, 9])),
dict(
ragged_reduce_op=ragged_math_ops.reduce_sum,
rt_input=[[[1, 2], [], [3, 4, 5]], [[6, 7], [], [8]], [], [[9]]],
axis=0,
expected=[[1 + 6 + 9, 2 + 7], [], [3 + 8, 4, 5]]),
dict(
ragged_reduce_op=ragged_math_ops.reduce_sum,
rt_input=[[[1, 2], [], [3, 4, 5]], [[6, 7], [], [8]], [], [[9]]],
axis=1,
expected=[[1 + 3, 2 + 4, 5], [6 + 8, 7], [], [9]]),
dict(
ragged_reduce_op=ragged_math_ops.reduce_sum,
rt_input=[[[1, 2], [], [3, 4, 5]], [[6, 7], [], [8]], [], [[9]]],
axis=2,
expected=[[1 + 2, 0, 3 + 4 + 5], [6 + 7, 0, 8], [], [9]]),
dict(
ragged_reduce_op=ragged_math_ops.reduce_sum,
rt_input=[[[1, 2], [], [3, 4, 5]], [[6, 7], [], [8]], [], [[9]]],
axis=[0, 1],
expected=[1 + 3 + 6 + 8 + 9, 2 + 4 + 7, 5]),
dict(
ragged_reduce_op=ragged_math_ops.reduce_sum,
rt_input=[[[1, 2], [], [3, 4, 5]], [[6, 7], [], [8]], [], [[9]]],
axis=[0, 2],
expected=[1 + 6 + 9 + 2 + 7, 0, 3 + 8 + 4 + 5]),
dict(
ragged_reduce_op=ragged_math_ops.reduce_sum,
rt_input=[[[1, 2], [], [3, 4, 5]], [[6, 7], [], [8]], [], [[9]]],
axis=[1, 2],
expected=[1 + 2 + 3 + 4 + 5, 6 + 7 + 8, 0, 9]),
dict(
ragged_reduce_op=ragged_math_ops.reduce_sum,
rt_input=[[[1, 2], [], [3, 4, 5]], [[6, 7], [], [8]], [], [[9]]],
axis=[0, 1, 2],
expected=sum([1, 2, 3, 4, 5, 6, 7, 8, 9])),
#=========================================================================
# Examples for ragged_reduce_mean ragged_rank=2:
# [[[1, 2], [3, 4, 5]],
# [[6, 7], [8 ]],
# [[9 ] ]]
#=========================================================================
dict(
ragged_reduce_op=ragged_math_ops.reduce_mean,
rt_input=[[[1, 2], [3, 4, 5]], [[6, 7], [8]], [[9]]],
axis=0,
expected=[[mean(1, 6, 9), mean(2, 7)], [mean(3, 8), 4, 5]]),
dict(
ragged_reduce_op=ragged_math_ops.reduce_mean,
rt_input=[[[1, 2], [3, 4, 5]], [[6, 7], [8]], [[9]]],
axis=1,
expected=[[mean(1, 3), mean(2, 4), 5], [mean(6, 8), 7], [9]]),
dict(
ragged_reduce_op=ragged_math_ops.reduce_mean,
rt_input=[[[1, 2], [3, 4, 5]], [[6, 7], [8]], [[9]]],
axis=2,
expected=[[mean(1, 2), mean(3, 4, 5)], [mean(6, 7), 8], [9]]),
)
def testReduce(self, ragged_reduce_op, rt_input, axis, expected):
rt_input = ragged_factory_ops.constant(rt_input)
reduced = ragged_reduce_op(rt_input, axis)
self.assertRaggedEqual(reduced, expected)
def assertEqualWithNan(self, actual, expected):
"""Like assertEqual, but NaN==NaN."""
self.assertTrue(
((actual == expected) | (np.isnan(actual) & np.isnan(expected))).all())
def testMeanNan(self):
rt_as_list = [[0, 1, 2, 3], [4], [], [5, 6], [7], [8, 9]]
expected = (
np.array([0 + 1 + 2 + 3, 4, 0, 5 + 6, 7, 8 + 9]) / np.array(
[4, 1, 0, 2, 1, 2]))
rt_input = ragged_factory_ops.constant(rt_as_list)
reduced = ragged_math_ops.reduce_mean(rt_input, axis=1)
self.assertEqualWithNan(self.evaluate(reduced), expected)
def testMeanWithTensorInputs(self):
tensor = [[1.0, 2.0, 3.0], [10.0, 20.0, 30.0]]
expected = [2.0, 20.0]
reduced = ragged_math_ops.reduce_mean(tensor, axis=1)
self.assertRaggedEqual(reduced, expected)
def testErrors(self):
rt_input = ragged_factory_ops.constant([[1, 2, 3], [4, 5]])
axis = array_ops.placeholder_with_default(constant_op.constant([0]), None)
if not context.executing_eagerly():
self.assertRaisesRegexp(
ValueError, r'axis must be known at graph construction time.',
ragged_math_ops.reduce_sum, rt_input, axis)
self.assertRaisesRegexp(TypeError, r'axis must be an int; got str.*',
ragged_math_ops.reduce_sum, rt_input, ['x'])
if __name__ == '__main__':
googletest.main()
|
apache-2.0
|
google-research/google-research
|
eeg_modelling/eeg_viewer/utils_test.py
|
1
|
2218
|
# coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python2, python3
import json
from absl.testing import absltest
from eeg_modelling.eeg_viewer import utils
class UtilsTest(absltest.TestCase):
def testGetSampleRange(self):
self.assertEqual((10, 20), utils.GetSampleRange(1, 10, 10))
def testInitDataTableInputsWithTimeAxis(self):
data, _ = utils.InitDataTableInputsWithTimeAxis(1, 3, 3, 8)
self.assertEqual([{'seconds': 3.0}, {'seconds': 4.0}, {'seconds': 5.0}],
data)
def testConvertToDataTableJSon(self):
data, _ = utils.InitDataTableInputsWithTimeAxis(freq=1,
chunk_duration_sec=4,
chunk_start=4,
max_samples=10)
json_data = json.loads(utils.ConvertToDataTableJSon(data, ['seconds']))
self.assertEqual([{'id': 'seconds', 'label': 'seconds', 'type': 'number'}],
json_data['cols'])
self.assertEqual([{'c': [{'v': 4}]}, {'c': [{'v': 5}]}, {'c': [{'v': 6}]},
{'c': [{'v': 7}]}], json_data['rows'])
def testCreateEmptyTable(self):
return_value = utils.CreateEmptyTable(5, 10)
json_data = json.loads(return_value)
self.assertEqual([{'id': 'seconds', 'label': 'seconds', 'type': 'number'}],
json_data['cols'])
self.assertEqual([{'c': [{'v': 0}]}, {'c': [{'v': 1}]}, {'c': [{'v': 2}]},
{'c': [{'v': 3}]}, {'c': [{'v': 4}]}, {'c': [{'v': 5}]}],
json_data['rows'])
if __name__ == '__main__':
absltest.main()
|
apache-2.0
|
rgommers/statsmodels
|
statsmodels/sandbox/examples/thirdparty/findow_0.py
|
33
|
2147
|
# -*- coding: utf-8 -*-
"""A quick look at volatility of stock returns for 2009
Just an exercise to find my way around the pandas methods.
Shows the daily rate of return, the square of it (volatility) and
a 5 day moving average of the volatility.
No guarantee for correctness.
Assumes no missing values.
colors of lines in graphs are not great
uses DataFrame and WidePanel to hold data downloaded from yahoo using matplotlib.
I haven't figured out storage, so the download happens at each run
of the script.
getquotes is from pandas\examples\finance.py
Created on Sat Jan 30 16:30:18 2010
Author: josef-pktd
"""
from statsmodels.compat.python import lzip
import numpy as np
import matplotlib.finance as fin
import matplotlib.pyplot as plt
import datetime as dt
import pandas as pa
def getquotes(symbol, start, end):
quotes = fin.quotes_historical_yahoo(symbol, start, end)
dates, open, close, high, low, volume = lzip(*quotes)
data = {
'open' : open,
'close' : close,
'high' : high,
'low' : low,
'volume' : volume
}
dates = pa.Index([dt.datetime.fromordinal(int(d)) for d in dates])
return pa.DataFrame(data, index=dates)
start_date = dt.datetime(2009, 1, 1)
end_date = dt.datetime(2010, 1, 1)
mysym = ['msft', 'ibm', 'goog']
indexsym = ['gspc', 'dji']
# download data
dmall = {}
for sy in mysym:
dmall[sy] = getquotes(sy, start_date, end_date)
# combine into WidePanel
pawp = pa.WidePanel.fromDict(dmall)
print(pawp.values.shape)
# select closing prices
paclose = pawp.getMinorXS('close')
# take log and first difference over time
paclose_ratereturn = paclose.apply(np.log).diff()
plt.figure()
paclose_ratereturn.plot()
plt.title('daily rate of return')
# square the returns
paclose_ratereturn_vol = paclose_ratereturn.apply(lambda x:np.power(x,2))
plt.figure()
plt.title('volatility (with 5 day moving average')
paclose_ratereturn_vol.plot()
# use convolution to get moving average
paclose_ratereturn_vol_mov = paclose_ratereturn_vol.apply(
lambda x:np.convolve(x,np.ones(5)/5.,'same'))
paclose_ratereturn_vol_mov.plot()
#plt.show()
|
bsd-3-clause
|
petewarden/tensorflow
|
tensorflow/python/ops/image_grad_test.py
|
13
|
1354
|
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functional tests for Image Op Gradients."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.ops import image_grad_test_base as test_base
from tensorflow.python.platform import test
ResizeNearestNeighborOpTest = test_base.ResizeNearestNeighborOpTestBase
ResizeBilinearOpTest = test_base.ResizeBilinearOpTestBase
ResizeBicubicOpTest = test_base.ResizeBicubicOpTestBase
ScaleAndTranslateOpTest = test_base.ScaleAndTranslateOpTestBase
CropAndResizeOpTest = test_base.CropAndResizeOpTestBase
RGBToHSVOpTest = test_base.RGBToHSVOpTestBase
if __name__ == "__main__":
test.main()
|
apache-2.0
|
zxombie/aarch64-freebsd-binutils
|
gdb/testsuite/gdb.ada/pp-rec-component.py
|
41
|
1101
|
# Copyright (C) 2014 Free Software Foundation, Inc.
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from time import asctime, gmtime
import gdb # silence pyflakes
class TimeTPrinter:
def __init__(self, val):
self.val = val
def to_string(self):
secs = int(self.val['secs'])
return "%s (%d)" % (asctime(gmtime(secs)), secs)
def time_sniffer(val):
if val.type.tag == "pck__time_t":
return TimeTPrinter(val)
return None
gdb.pretty_printers.append(time_sniffer)
|
gpl-2.0
|
shangvven/Wox
|
PythonHome/Lib/site-packages/requests/packages/chardet/charsetprober.py
|
3127
|
1902
|
######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Universal charset detector code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 2001
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
# Shy Shalom - original C code
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
from . import constants
import re
class CharSetProber:
def __init__(self):
pass
def reset(self):
self._mState = constants.eDetecting
def get_charset_name(self):
return None
def feed(self, aBuf):
pass
def get_state(self):
return self._mState
def get_confidence(self):
return 0.0
def filter_high_bit_only(self, aBuf):
aBuf = re.sub(b'([\x00-\x7F])+', b' ', aBuf)
return aBuf
def filter_without_english_letters(self, aBuf):
aBuf = re.sub(b'([A-Za-z])+', b' ', aBuf)
return aBuf
def filter_with_english_letters(self, aBuf):
# TODO
return aBuf
|
mit
|
GoogleCloudPlatform/training-data-analyst
|
courses/machine_learning/deepdive2/structured/solutions/serving/application/lib/werkzeug/middleware/lint.py
|
12
|
12967
|
"""
WSGI Protocol Linter
====================
This module provides a middleware that performs sanity checks on the
behavior of the WSGI server and application. It checks that the
:pep:`3333` WSGI spec is properly implemented. It also warns on some
common HTTP errors such as non-empty responses for 304 status codes.
.. autoclass:: LintMiddleware
:copyright: 2007 Pallets
:license: BSD-3-Clause
"""
from warnings import warn
from .._compat import implements_iterator
from .._compat import PY2
from .._compat import string_types
from ..datastructures import Headers
from ..http import is_entity_header
from ..wsgi import FileWrapper
try:
from urllib.parse import urlparse
except ImportError:
from urlparse import urlparse
class WSGIWarning(Warning):
"""Warning class for WSGI warnings."""
class HTTPWarning(Warning):
"""Warning class for HTTP warnings."""
def check_string(context, obj, stacklevel=3):
if type(obj) is not str:
warn(
"'%s' requires strings, got '%s'" % (context, type(obj).__name__),
WSGIWarning,
)
class InputStream(object):
def __init__(self, stream):
self._stream = stream
def read(self, *args):
if len(args) == 0:
warn(
"WSGI does not guarantee an EOF marker on the input stream, thus making"
" calls to 'wsgi.input.read()' unsafe. Conforming servers may never"
" return from this call.",
WSGIWarning,
stacklevel=2,
)
elif len(args) != 1:
warn(
"Too many parameters passed to 'wsgi.input.read()'.",
WSGIWarning,
stacklevel=2,
)
return self._stream.read(*args)
def readline(self, *args):
if len(args) == 0:
warn(
"Calls to 'wsgi.input.readline()' without arguments are unsafe. Use"
" 'wsgi.input.read()' instead.",
WSGIWarning,
stacklevel=2,
)
elif len(args) == 1:
warn(
"'wsgi.input.readline()' was called with a size hint. WSGI does not"
" support this, although it's available on all major servers.",
WSGIWarning,
stacklevel=2,
)
else:
raise TypeError("Too many arguments passed to 'wsgi.input.readline()'.")
return self._stream.readline(*args)
def __iter__(self):
try:
return iter(self._stream)
except TypeError:
warn("'wsgi.input' is not iterable.", WSGIWarning, stacklevel=2)
return iter(())
def close(self):
warn("The application closed the input stream!", WSGIWarning, stacklevel=2)
self._stream.close()
class ErrorStream(object):
def __init__(self, stream):
self._stream = stream
def write(self, s):
check_string("wsgi.error.write()", s)
self._stream.write(s)
def flush(self):
self._stream.flush()
def writelines(self, seq):
for line in seq:
self.write(line)
def close(self):
warn("The application closed the error stream!", WSGIWarning, stacklevel=2)
self._stream.close()
class GuardedWrite(object):
def __init__(self, write, chunks):
self._write = write
self._chunks = chunks
def __call__(self, s):
check_string("write()", s)
self._write.write(s)
self._chunks.append(len(s))
@implements_iterator
class GuardedIterator(object):
def __init__(self, iterator, headers_set, chunks):
self._iterator = iterator
if PY2:
self._next = iter(iterator).next
else:
self._next = iter(iterator).__next__
self.closed = False
self.headers_set = headers_set
self.chunks = chunks
def __iter__(self):
return self
def __next__(self):
if self.closed:
warn("Iterated over closed 'app_iter'.", WSGIWarning, stacklevel=2)
rv = self._next()
if not self.headers_set:
warn(
"The application returned before it started the response.",
WSGIWarning,
stacklevel=2,
)
check_string("application iterator items", rv)
self.chunks.append(len(rv))
return rv
def close(self):
self.closed = True
if hasattr(self._iterator, "close"):
self._iterator.close()
if self.headers_set:
status_code, headers = self.headers_set
bytes_sent = sum(self.chunks)
content_length = headers.get("content-length", type=int)
if status_code == 304:
for key, _value in headers:
key = key.lower()
if key not in ("expires", "content-location") and is_entity_header(
key
):
warn(
"Entity header %r found in 304 response." % key, HTTPWarning
)
if bytes_sent:
warn("304 responses must not have a body.", HTTPWarning)
elif 100 <= status_code < 200 or status_code == 204:
if content_length != 0:
warn(
"%r responses must have an empty content length." % status_code,
HTTPWarning,
)
if bytes_sent:
warn(
"%r responses must not have a body." % status_code, HTTPWarning
)
elif content_length is not None and content_length != bytes_sent:
warn(
"Content-Length and the number of bytes sent to the client do not"
" match.",
WSGIWarning,
)
def __del__(self):
if not self.closed:
try:
warn(
"Iterator was garbage collected before it was closed.", WSGIWarning
)
except Exception:
pass
class LintMiddleware(object):
"""Warns about common errors in the WSGI and HTTP behavior of the
server and wrapped application. Some of the issues it check are:
- invalid status codes
- non-bytestrings sent to the WSGI server
- strings returned from the WSGI application
- non-empty conditional responses
- unquoted etags
- relative URLs in the Location header
- unsafe calls to wsgi.input
- unclosed iterators
Error information is emitted using the :mod:`warnings` module.
:param app: The WSGI application to wrap.
.. code-block:: python
from werkzeug.middleware.lint import LintMiddleware
app = LintMiddleware(app)
"""
def __init__(self, app):
self.app = app
def check_environ(self, environ):
if type(environ) is not dict:
warn(
"WSGI environment is not a standard Python dict.",
WSGIWarning,
stacklevel=4,
)
for key in (
"REQUEST_METHOD",
"SERVER_NAME",
"SERVER_PORT",
"wsgi.version",
"wsgi.input",
"wsgi.errors",
"wsgi.multithread",
"wsgi.multiprocess",
"wsgi.run_once",
):
if key not in environ:
warn(
"Required environment key %r not found" % key,
WSGIWarning,
stacklevel=3,
)
if environ["wsgi.version"] != (1, 0):
warn("Environ is not a WSGI 1.0 environ.", WSGIWarning, stacklevel=3)
script_name = environ.get("SCRIPT_NAME", "")
path_info = environ.get("PATH_INFO", "")
if script_name and script_name[0] != "/":
warn(
"'SCRIPT_NAME' does not start with a slash: %r" % script_name,
WSGIWarning,
stacklevel=3,
)
if path_info and path_info[0] != "/":
warn(
"'PATH_INFO' does not start with a slash: %r" % path_info,
WSGIWarning,
stacklevel=3,
)
def check_start_response(self, status, headers, exc_info):
check_string("status", status)
status_code = status.split(None, 1)[0]
if len(status_code) != 3 or not status_code.isdigit():
warn(WSGIWarning("Status code must be three digits"), stacklevel=3)
if len(status) < 4 or status[3] != " ":
warn(
WSGIWarning(
"Invalid value for status %r. Valid "
"status strings are three digits, a space "
"and a status explanation"
),
stacklevel=3,
)
status_code = int(status_code)
if status_code < 100:
warn(WSGIWarning("status code < 100 detected"), stacklevel=3)
if type(headers) is not list:
warn(WSGIWarning("header list is not a list"), stacklevel=3)
for item in headers:
if type(item) is not tuple or len(item) != 2:
warn(WSGIWarning("Headers must tuple 2-item tuples"), stacklevel=3)
name, value = item
if type(name) is not str or type(value) is not str:
warn(WSGIWarning("header items must be strings"), stacklevel=3)
if name.lower() == "status":
warn(
WSGIWarning(
"The status header is not supported due to "
"conflicts with the CGI spec."
),
stacklevel=3,
)
if exc_info is not None and not isinstance(exc_info, tuple):
warn(WSGIWarning("invalid value for exc_info"), stacklevel=3)
headers = Headers(headers)
self.check_headers(headers)
return status_code, headers
def check_headers(self, headers):
etag = headers.get("etag")
if etag is not None:
if etag.startswith(("W/", "w/")):
if etag.startswith("w/"):
warn(
HTTPWarning("weak etag indicator should be upcase."),
stacklevel=4,
)
etag = etag[2:]
if not (etag[:1] == etag[-1:] == '"'):
warn(HTTPWarning("unquoted etag emitted."), stacklevel=4)
location = headers.get("location")
if location is not None:
if not urlparse(location).netloc:
warn(
HTTPWarning("absolute URLs required for location header"),
stacklevel=4,
)
def check_iterator(self, app_iter):
if isinstance(app_iter, string_types):
warn(
"The application returned astring. The response will send one character"
" at a time to the client, which will kill performance. Return a list"
" or iterable instead.",
WSGIWarning,
stacklevel=3,
)
def __call__(self, *args, **kwargs):
if len(args) != 2:
warn("A WSGI app takes two arguments.", WSGIWarning, stacklevel=2)
if kwargs:
warn(
"A WSGI app does not take keyword arguments.", WSGIWarning, stacklevel=2
)
environ, start_response = args
self.check_environ(environ)
environ["wsgi.input"] = InputStream(environ["wsgi.input"])
environ["wsgi.errors"] = ErrorStream(environ["wsgi.errors"])
# Hook our own file wrapper in so that applications will always
# iterate to the end and we can check the content length.
environ["wsgi.file_wrapper"] = FileWrapper
headers_set = []
chunks = []
def checking_start_response(*args, **kwargs):
if len(args) not in (2, 3):
warn(
"Invalid number of arguments: %s, expected 2 or 3." % len(args),
WSGIWarning,
stacklevel=2,
)
if kwargs:
warn("'start_response' does not take keyword arguments.", WSGIWarning)
status, headers = args[:2]
if len(args) == 3:
exc_info = args[2]
else:
exc_info = None
headers_set[:] = self.check_start_response(status, headers, exc_info)
return GuardedWrite(start_response(status, headers, exc_info), chunks)
app_iter = self.app(environ, checking_start_response)
self.check_iterator(app_iter)
return GuardedIterator(app_iter, headers_set, chunks)
|
apache-2.0
|
asquarezone/AnsibleZone
|
BookExamples/ScalingToMultipleHosts/ec2.py
|
20
|
60320
|
#!/usr/bin/env python
'''
EC2 external inventory script
=================================
Generates inventory that Ansible can understand by making API request to
AWS EC2 using the Boto library.
NOTE: This script assumes Ansible is being executed where the environment
variables needed for Boto have already been set:
export AWS_ACCESS_KEY_ID='AK123'
export AWS_SECRET_ACCESS_KEY='abc123'
This script also assumes there is an ec2.ini file alongside it. To specify a
different path to ec2.ini, define the EC2_INI_PATH environment variable:
export EC2_INI_PATH=/path/to/my_ec2.ini
If you're using eucalyptus you need to set the above variables and
you need to define:
export EC2_URL=http://hostname_of_your_cc:port/services/Eucalyptus
If you're using boto profiles (requires boto>=2.24.0) you can choose a profile
using the --boto-profile command line argument (e.g. ec2.py --boto-profile prod) or using
the AWS_PROFILE variable:
AWS_PROFILE=prod ansible-playbook -i ec2.py myplaybook.yml
For more details, see: http://docs.pythonboto.org/en/latest/boto_config_tut.html
When run against a specific host, this script returns the following variables:
- ec2_ami_launch_index
- ec2_architecture
- ec2_association
- ec2_attachTime
- ec2_attachment
- ec2_attachmentId
- ec2_client_token
- ec2_deleteOnTermination
- ec2_description
- ec2_deviceIndex
- ec2_dns_name
- ec2_eventsSet
- ec2_group_name
- ec2_hypervisor
- ec2_id
- ec2_image_id
- ec2_instanceState
- ec2_instance_type
- ec2_ipOwnerId
- ec2_ip_address
- ec2_item
- ec2_kernel
- ec2_key_name
- ec2_launch_time
- ec2_monitored
- ec2_monitoring
- ec2_networkInterfaceId
- ec2_ownerId
- ec2_persistent
- ec2_placement
- ec2_platform
- ec2_previous_state
- ec2_private_dns_name
- ec2_private_ip_address
- ec2_publicIp
- ec2_public_dns_name
- ec2_ramdisk
- ec2_reason
- ec2_region
- ec2_requester_id
- ec2_root_device_name
- ec2_root_device_type
- ec2_security_group_ids
- ec2_security_group_names
- ec2_shutdown_state
- ec2_sourceDestCheck
- ec2_spot_instance_request_id
- ec2_state
- ec2_state_code
- ec2_state_reason
- ec2_status
- ec2_subnet_id
- ec2_tenancy
- ec2_virtualization_type
- ec2_vpc_id
These variables are pulled out of a boto.ec2.instance object. There is a lack of
consistency with variable spellings (camelCase and underscores) since this
just loops through all variables the object exposes. It is preferred to use the
ones with underscores when multiple exist.
In addition, if an instance has AWS Tags associated with it, each tag is a new
variable named:
- ec2_tag_[Key] = [Value]
Security groups are comma-separated in 'ec2_security_group_ids' and
'ec2_security_group_names'.
'''
# (c) 2012, Peter Sankauskas
#
# This file is part of Ansible,
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
######################################################################
import sys
import os
import argparse
import re
from time import time
import boto
from boto import ec2
from boto import rds
from boto import elasticache
from boto import route53
import six
from six.moves import configparser
from collections import defaultdict
try:
import json
except ImportError:
import simplejson as json
class Ec2Inventory(object):
def _empty_inventory(self):
return {"_meta" : {"hostvars" : {}}}
def __init__(self):
''' Main execution path '''
# Inventory grouped by instance IDs, tags, security groups, regions,
# and availability zones
self.inventory = self._empty_inventory()
# Index of hostname (address) to instance ID
self.index = {}
# Boto profile to use (if any)
self.boto_profile = None
# AWS credentials.
self.credentials = {}
# Read settings and parse CLI arguments
self.parse_cli_args()
self.read_settings()
# Make sure that profile_name is not passed at all if not set
# as pre 2.24 boto will fall over otherwise
if self.boto_profile:
if not hasattr(boto.ec2.EC2Connection, 'profile_name'):
self.fail_with_error("boto version must be >= 2.24 to use profile")
# Cache
if self.args.refresh_cache:
self.do_api_calls_update_cache()
elif not self.is_cache_valid():
self.do_api_calls_update_cache()
# Data to print
if self.args.host:
data_to_print = self.get_host_info()
elif self.args.list:
# Display list of instances for inventory
if self.inventory == self._empty_inventory():
data_to_print = self.get_inventory_from_cache()
else:
data_to_print = self.json_format_dict(self.inventory, True)
print(data_to_print)
def is_cache_valid(self):
''' Determines if the cache files have expired, or if it is still valid '''
if os.path.isfile(self.cache_path_cache):
mod_time = os.path.getmtime(self.cache_path_cache)
current_time = time()
if (mod_time + self.cache_max_age) > current_time:
if os.path.isfile(self.cache_path_index):
return True
return False
def read_settings(self):
''' Reads the settings from the ec2.ini file '''
if six.PY3:
config = configparser.ConfigParser()
else:
config = configparser.SafeConfigParser()
ec2_default_ini_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'ec2.ini')
ec2_ini_path = os.path.expanduser(os.path.expandvars(os.environ.get('EC2_INI_PATH', ec2_default_ini_path)))
config.read(ec2_ini_path)
# is eucalyptus?
self.eucalyptus_host = None
self.eucalyptus = False
if config.has_option('ec2', 'eucalyptus'):
self.eucalyptus = config.getboolean('ec2', 'eucalyptus')
if self.eucalyptus and config.has_option('ec2', 'eucalyptus_host'):
self.eucalyptus_host = config.get('ec2', 'eucalyptus_host')
# Regions
self.regions = []
configRegions = config.get('ec2', 'regions')
configRegions_exclude = config.get('ec2', 'regions_exclude')
if (configRegions == 'all'):
if self.eucalyptus_host:
self.regions.append(boto.connect_euca(host=self.eucalyptus_host).region.name, **self.credentials)
else:
for regionInfo in ec2.regions():
if regionInfo.name not in configRegions_exclude:
self.regions.append(regionInfo.name)
else:
self.regions = configRegions.split(",")
# Destination addresses
self.destination_variable = config.get('ec2', 'destination_variable')
self.vpc_destination_variable = config.get('ec2', 'vpc_destination_variable')
if config.has_option('ec2', 'hostname_variable'):
self.hostname_variable = config.get('ec2', 'hostname_variable')
else:
self.hostname_variable = None
if config.has_option('ec2', 'destination_format') and \
config.has_option('ec2', 'destination_format_tags'):
self.destination_format = config.get('ec2', 'destination_format')
self.destination_format_tags = config.get('ec2', 'destination_format_tags').split(',')
else:
self.destination_format = None
self.destination_format_tags = None
# Route53
self.route53_enabled = config.getboolean('ec2', 'route53')
self.route53_excluded_zones = []
if config.has_option('ec2', 'route53_excluded_zones'):
self.route53_excluded_zones.extend(
config.get('ec2', 'route53_excluded_zones', '').split(','))
# Include RDS instances?
self.rds_enabled = True
if config.has_option('ec2', 'rds'):
self.rds_enabled = config.getboolean('ec2', 'rds')
# Include ElastiCache instances?
self.elasticache_enabled = True
if config.has_option('ec2', 'elasticache'):
self.elasticache_enabled = config.getboolean('ec2', 'elasticache')
# Return all EC2 instances?
if config.has_option('ec2', 'all_instances'):
self.all_instances = config.getboolean('ec2', 'all_instances')
else:
self.all_instances = False
# Instance states to be gathered in inventory. Default is 'running'.
# Setting 'all_instances' to 'yes' overrides this option.
ec2_valid_instance_states = [
'pending',
'running',
'shutting-down',
'terminated',
'stopping',
'stopped'
]
self.ec2_instance_states = []
if self.all_instances:
self.ec2_instance_states = ec2_valid_instance_states
elif config.has_option('ec2', 'instance_states'):
for instance_state in config.get('ec2', 'instance_states').split(','):
instance_state = instance_state.strip()
if instance_state not in ec2_valid_instance_states:
continue
self.ec2_instance_states.append(instance_state)
else:
self.ec2_instance_states = ['running']
# Return all RDS instances? (if RDS is enabled)
if config.has_option('ec2', 'all_rds_instances') and self.rds_enabled:
self.all_rds_instances = config.getboolean('ec2', 'all_rds_instances')
else:
self.all_rds_instances = False
# Return all ElastiCache replication groups? (if ElastiCache is enabled)
if config.has_option('ec2', 'all_elasticache_replication_groups') and self.elasticache_enabled:
self.all_elasticache_replication_groups = config.getboolean('ec2', 'all_elasticache_replication_groups')
else:
self.all_elasticache_replication_groups = False
# Return all ElastiCache clusters? (if ElastiCache is enabled)
if config.has_option('ec2', 'all_elasticache_clusters') and self.elasticache_enabled:
self.all_elasticache_clusters = config.getboolean('ec2', 'all_elasticache_clusters')
else:
self.all_elasticache_clusters = False
# Return all ElastiCache nodes? (if ElastiCache is enabled)
if config.has_option('ec2', 'all_elasticache_nodes') and self.elasticache_enabled:
self.all_elasticache_nodes = config.getboolean('ec2', 'all_elasticache_nodes')
else:
self.all_elasticache_nodes = False
# boto configuration profile (prefer CLI argument)
self.boto_profile = self.args.boto_profile
if config.has_option('ec2', 'boto_profile') and not self.boto_profile:
self.boto_profile = config.get('ec2', 'boto_profile')
# AWS credentials (prefer environment variables)
if not (self.boto_profile or os.environ.get('AWS_ACCESS_KEY_ID') or
os.environ.get('AWS_PROFILE')):
if config.has_option('credentials', 'aws_access_key_id'):
aws_access_key_id = config.get('credentials', 'aws_access_key_id')
else:
aws_access_key_id = None
if config.has_option('credentials', 'aws_secret_access_key'):
aws_secret_access_key = config.get('credentials', 'aws_secret_access_key')
else:
aws_secret_access_key = None
if config.has_option('credentials', 'aws_security_token'):
aws_security_token = config.get('credentials', 'aws_security_token')
else:
aws_security_token = None
if aws_access_key_id:
self.credentials = {
'aws_access_key_id': aws_access_key_id,
'aws_secret_access_key': aws_secret_access_key
}
if aws_security_token:
self.credentials['security_token'] = aws_security_token
# Cache related
cache_dir = os.path.expanduser(config.get('ec2', 'cache_path'))
if self.boto_profile:
cache_dir = os.path.join(cache_dir, 'profile_' + self.boto_profile)
if not os.path.exists(cache_dir):
os.makedirs(cache_dir)
cache_name = 'ansible-ec2'
aws_profile = lambda: (self.boto_profile or
os.environ.get('AWS_PROFILE') or
os.environ.get('AWS_ACCESS_KEY_ID') or
self.credentials.get('aws_access_key_id', None))
if aws_profile():
cache_name = '%s-%s' % (cache_name, aws_profile())
self.cache_path_cache = cache_dir + "/%s.cache" % cache_name
self.cache_path_index = cache_dir + "/%s.index" % cache_name
self.cache_max_age = config.getint('ec2', 'cache_max_age')
if config.has_option('ec2', 'expand_csv_tags'):
self.expand_csv_tags = config.getboolean('ec2', 'expand_csv_tags')
else:
self.expand_csv_tags = False
# Configure nested groups instead of flat namespace.
if config.has_option('ec2', 'nested_groups'):
self.nested_groups = config.getboolean('ec2', 'nested_groups')
else:
self.nested_groups = False
# Replace dash or not in group names
if config.has_option('ec2', 'replace_dash_in_groups'):
self.replace_dash_in_groups = config.getboolean('ec2', 'replace_dash_in_groups')
else:
self.replace_dash_in_groups = True
# Configure which groups should be created.
group_by_options = [
'group_by_instance_id',
'group_by_region',
'group_by_availability_zone',
'group_by_ami_id',
'group_by_instance_type',
'group_by_key_pair',
'group_by_vpc_id',
'group_by_security_group',
'group_by_tag_keys',
'group_by_tag_none',
'group_by_route53_names',
'group_by_rds_engine',
'group_by_rds_parameter_group',
'group_by_elasticache_engine',
'group_by_elasticache_cluster',
'group_by_elasticache_parameter_group',
'group_by_elasticache_replication_group',
]
for option in group_by_options:
if config.has_option('ec2', option):
setattr(self, option, config.getboolean('ec2', option))
else:
setattr(self, option, True)
# Do we need to just include hosts that match a pattern?
try:
pattern_include = config.get('ec2', 'pattern_include')
if pattern_include and len(pattern_include) > 0:
self.pattern_include = re.compile(pattern_include)
else:
self.pattern_include = None
except configparser.NoOptionError:
self.pattern_include = None
# Do we need to exclude hosts that match a pattern?
try:
pattern_exclude = config.get('ec2', 'pattern_exclude');
if pattern_exclude and len(pattern_exclude) > 0:
self.pattern_exclude = re.compile(pattern_exclude)
else:
self.pattern_exclude = None
except configparser.NoOptionError:
self.pattern_exclude = None
# Instance filters (see boto and EC2 API docs). Ignore invalid filters.
self.ec2_instance_filters = defaultdict(list)
if config.has_option('ec2', 'instance_filters'):
filters = [f for f in config.get('ec2', 'instance_filters').split(',') if f]
for instance_filter in filters:
instance_filter = instance_filter.strip()
if not instance_filter or '=' not in instance_filter:
continue
filter_key, filter_value = [x.strip() for x in instance_filter.split('=', 1)]
if not filter_key:
continue
self.ec2_instance_filters[filter_key].append(filter_value)
def parse_cli_args(self):
''' Command line argument processing '''
parser = argparse.ArgumentParser(description='Produce an Ansible Inventory file based on EC2')
parser.add_argument('--list', action='store_true', default=True,
help='List instances (default: True)')
parser.add_argument('--host', action='store',
help='Get all the variables about a specific instance')
parser.add_argument('--refresh-cache', action='store_true', default=False,
help='Force refresh of cache by making API requests to EC2 (default: False - use cache files)')
parser.add_argument('--profile', '--boto-profile', action='store', dest='boto_profile',
help='Use boto profile for connections to EC2')
self.args = parser.parse_args()
def do_api_calls_update_cache(self):
''' Do API calls to each region, and save data in cache files '''
if self.route53_enabled:
self.get_route53_records()
for region in self.regions:
self.get_instances_by_region(region)
if self.rds_enabled:
self.get_rds_instances_by_region(region)
if self.elasticache_enabled:
self.get_elasticache_clusters_by_region(region)
self.get_elasticache_replication_groups_by_region(region)
self.write_to_cache(self.inventory, self.cache_path_cache)
self.write_to_cache(self.index, self.cache_path_index)
def connect(self, region):
''' create connection to api server'''
if self.eucalyptus:
conn = boto.connect_euca(host=self.eucalyptus_host, **self.credentials)
conn.APIVersion = '2010-08-31'
else:
conn = self.connect_to_aws(ec2, region)
return conn
def boto_fix_security_token_in_profile(self, connect_args):
''' monkey patch for boto issue boto/boto#2100 '''
profile = 'profile ' + self.boto_profile
if boto.config.has_option(profile, 'aws_security_token'):
connect_args['security_token'] = boto.config.get(profile, 'aws_security_token')
return connect_args
def connect_to_aws(self, module, region):
connect_args = self.credentials
# only pass the profile name if it's set (as it is not supported by older boto versions)
if self.boto_profile:
connect_args['profile_name'] = self.boto_profile
self.boto_fix_security_token_in_profile(connect_args)
conn = module.connect_to_region(region, **connect_args)
# connect_to_region will fail "silently" by returning None if the region name is wrong or not supported
if conn is None:
self.fail_with_error("region name: %s likely not supported, or AWS is down. connection to region failed." % region)
return conn
def get_instances_by_region(self, region):
''' Makes an AWS EC2 API call to the list of instances in a particular
region '''
try:
conn = self.connect(region)
reservations = []
if self.ec2_instance_filters:
for filter_key, filter_values in self.ec2_instance_filters.items():
reservations.extend(conn.get_all_instances(filters = { filter_key : filter_values }))
else:
reservations = conn.get_all_instances()
# Pull the tags back in a second step
# AWS are on record as saying that the tags fetched in the first `get_all_instances` request are not
# reliable and may be missing, and the only way to guarantee they are there is by calling `get_all_tags`
instance_ids = []
for reservation in reservations:
instance_ids.extend([instance.id for instance in reservation.instances])
max_filter_value = 199
tags = []
for i in range(0, len(instance_ids), max_filter_value):
tags.extend(conn.get_all_tags(filters={'resource-type': 'instance', 'resource-id': instance_ids[i:i+max_filter_value]}))
tags_by_instance_id = defaultdict(dict)
for tag in tags:
tags_by_instance_id[tag.res_id][tag.name] = tag.value
for reservation in reservations:
for instance in reservation.instances:
instance.tags = tags_by_instance_id[instance.id]
self.add_instance(instance, region)
except boto.exception.BotoServerError as e:
if e.error_code == 'AuthFailure':
error = self.get_auth_error_message()
else:
backend = 'Eucalyptus' if self.eucalyptus else 'AWS'
error = "Error connecting to %s backend.\n%s" % (backend, e.message)
self.fail_with_error(error, 'getting EC2 instances')
def get_rds_instances_by_region(self, region):
''' Makes an AWS API call to the list of RDS instances in a particular
region '''
try:
conn = self.connect_to_aws(rds, region)
if conn:
marker = None
while True:
instances = conn.get_all_dbinstances(marker=marker)
marker = instances.marker
for instance in instances:
self.add_rds_instance(instance, region)
if not marker:
break
except boto.exception.BotoServerError as e:
error = e.reason
if e.error_code == 'AuthFailure':
error = self.get_auth_error_message()
if not e.reason == "Forbidden":
error = "Looks like AWS RDS is down:\n%s" % e.message
self.fail_with_error(error, 'getting RDS instances')
def get_elasticache_clusters_by_region(self, region):
''' Makes an AWS API call to the list of ElastiCache clusters (with
nodes' info) in a particular region.'''
# ElastiCache boto module doesn't provide a get_all_intances method,
# that's why we need to call describe directly (it would be called by
# the shorthand method anyway...)
try:
conn = self.connect_to_aws(elasticache, region)
if conn:
# show_cache_node_info = True
# because we also want nodes' information
response = conn.describe_cache_clusters(None, None, None, True)
except boto.exception.BotoServerError as e:
error = e.reason
if e.error_code == 'AuthFailure':
error = self.get_auth_error_message()
if not e.reason == "Forbidden":
error = "Looks like AWS ElastiCache is down:\n%s" % e.message
self.fail_with_error(error, 'getting ElastiCache clusters')
try:
# Boto also doesn't provide wrapper classes to CacheClusters or
# CacheNodes. Because of that wo can't make use of the get_list
# method in the AWSQueryConnection. Let's do the work manually
clusters = response['DescribeCacheClustersResponse']['DescribeCacheClustersResult']['CacheClusters']
except KeyError as e:
error = "ElastiCache query to AWS failed (unexpected format)."
self.fail_with_error(error, 'getting ElastiCache clusters')
for cluster in clusters:
self.add_elasticache_cluster(cluster, region)
def get_elasticache_replication_groups_by_region(self, region):
''' Makes an AWS API call to the list of ElastiCache replication groups
in a particular region.'''
# ElastiCache boto module doesn't provide a get_all_intances method,
# that's why we need to call describe directly (it would be called by
# the shorthand method anyway...)
try:
conn = self.connect_to_aws(elasticache, region)
if conn:
response = conn.describe_replication_groups()
except boto.exception.BotoServerError as e:
error = e.reason
if e.error_code == 'AuthFailure':
error = self.get_auth_error_message()
if not e.reason == "Forbidden":
error = "Looks like AWS ElastiCache [Replication Groups] is down:\n%s" % e.message
self.fail_with_error(error, 'getting ElastiCache clusters')
try:
# Boto also doesn't provide wrapper classes to ReplicationGroups
# Because of that wo can't make use of the get_list method in the
# AWSQueryConnection. Let's do the work manually
replication_groups = response['DescribeReplicationGroupsResponse']['DescribeReplicationGroupsResult']['ReplicationGroups']
except KeyError as e:
error = "ElastiCache [Replication Groups] query to AWS failed (unexpected format)."
self.fail_with_error(error, 'getting ElastiCache clusters')
for replication_group in replication_groups:
self.add_elasticache_replication_group(replication_group, region)
def get_auth_error_message(self):
''' create an informative error message if there is an issue authenticating'''
errors = ["Authentication error retrieving ec2 inventory."]
if None in [os.environ.get('AWS_ACCESS_KEY_ID'), os.environ.get('AWS_SECRET_ACCESS_KEY')]:
errors.append(' - No AWS_ACCESS_KEY_ID or AWS_SECRET_ACCESS_KEY environment vars found')
else:
errors.append(' - AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY environment vars found but may not be correct')
boto_paths = ['/etc/boto.cfg', '~/.boto', '~/.aws/credentials']
boto_config_found = list(p for p in boto_paths if os.path.isfile(os.path.expanduser(p)))
if len(boto_config_found) > 0:
errors.append(" - Boto configs found at '%s', but the credentials contained may not be correct" % ', '.join(boto_config_found))
else:
errors.append(" - No Boto config found at any expected location '%s'" % ', '.join(boto_paths))
return '\n'.join(errors)
def fail_with_error(self, err_msg, err_operation=None):
'''log an error to std err for ansible-playbook to consume and exit'''
if err_operation:
err_msg = 'ERROR: "{err_msg}", while: {err_operation}'.format(
err_msg=err_msg, err_operation=err_operation)
sys.stderr.write(err_msg)
sys.exit(1)
def get_instance(self, region, instance_id):
conn = self.connect(region)
reservations = conn.get_all_instances([instance_id])
for reservation in reservations:
for instance in reservation.instances:
return instance
def add_instance(self, instance, region):
''' Adds an instance to the inventory and index, as long as it is
addressable '''
# Only return instances with desired instance states
if instance.state not in self.ec2_instance_states:
return
# Select the best destination address
if self.destination_format and self.destination_format_tags:
dest = self.destination_format.format(*[ getattr(instance, 'tags').get(tag, '') for tag in self.destination_format_tags ])
elif instance.subnet_id:
dest = getattr(instance, self.vpc_destination_variable, None)
if dest is None:
dest = getattr(instance, 'tags').get(self.vpc_destination_variable, None)
else:
dest = getattr(instance, self.destination_variable, None)
if dest is None:
dest = getattr(instance, 'tags').get(self.destination_variable, None)
if not dest:
# Skip instances we cannot address (e.g. private VPC subnet)
return
# Set the inventory name
hostname = None
if self.hostname_variable:
if self.hostname_variable.startswith('tag_'):
hostname = instance.tags.get(self.hostname_variable[4:], None)
else:
hostname = getattr(instance, self.hostname_variable)
# If we can't get a nice hostname, use the destination address
if not hostname:
hostname = dest
else:
hostname = self.to_safe(hostname).lower()
# if we only want to include hosts that match a pattern, skip those that don't
if self.pattern_include and not self.pattern_include.match(hostname):
return
# if we need to exclude hosts that match a pattern, skip those
if self.pattern_exclude and self.pattern_exclude.match(hostname):
return
# Add to index
self.index[hostname] = [region, instance.id]
# Inventory: Group by instance ID (always a group of 1)
if self.group_by_instance_id:
self.inventory[instance.id] = [hostname]
if self.nested_groups:
self.push_group(self.inventory, 'instances', instance.id)
# Inventory: Group by region
if self.group_by_region:
self.push(self.inventory, region, hostname)
if self.nested_groups:
self.push_group(self.inventory, 'regions', region)
# Inventory: Group by availability zone
if self.group_by_availability_zone:
self.push(self.inventory, instance.placement, hostname)
if self.nested_groups:
if self.group_by_region:
self.push_group(self.inventory, region, instance.placement)
self.push_group(self.inventory, 'zones', instance.placement)
# Inventory: Group by Amazon Machine Image (AMI) ID
if self.group_by_ami_id:
ami_id = self.to_safe(instance.image_id)
self.push(self.inventory, ami_id, hostname)
if self.nested_groups:
self.push_group(self.inventory, 'images', ami_id)
# Inventory: Group by instance type
if self.group_by_instance_type:
type_name = self.to_safe('type_' + instance.instance_type)
self.push(self.inventory, type_name, hostname)
if self.nested_groups:
self.push_group(self.inventory, 'types', type_name)
# Inventory: Group by key pair
if self.group_by_key_pair and instance.key_name:
key_name = self.to_safe('key_' + instance.key_name)
self.push(self.inventory, key_name, hostname)
if self.nested_groups:
self.push_group(self.inventory, 'keys', key_name)
# Inventory: Group by VPC
if self.group_by_vpc_id and instance.vpc_id:
vpc_id_name = self.to_safe('vpc_id_' + instance.vpc_id)
self.push(self.inventory, vpc_id_name, hostname)
if self.nested_groups:
self.push_group(self.inventory, 'vpcs', vpc_id_name)
# Inventory: Group by security group
if self.group_by_security_group:
try:
for group in instance.groups:
key = self.to_safe("security_group_" + group.name)
self.push(self.inventory, key, hostname)
if self.nested_groups:
self.push_group(self.inventory, 'security_groups', key)
except AttributeError:
self.fail_with_error('\n'.join(['Package boto seems a bit older.',
'Please upgrade boto >= 2.3.0.']))
# Inventory: Group by tag keys
if self.group_by_tag_keys:
for k, v in instance.tags.items():
if self.expand_csv_tags and v and ',' in v:
values = map(lambda x: x.strip(), v.split(','))
else:
values = [v]
for v in values:
if v:
key = self.to_safe("tag_" + k + "=" + v)
else:
key = self.to_safe("tag_" + k)
self.push(self.inventory, key, hostname)
if self.nested_groups:
self.push_group(self.inventory, 'tags', self.to_safe("tag_" + k))
if v:
self.push_group(self.inventory, self.to_safe("tag_" + k), key)
# Inventory: Group by Route53 domain names if enabled
if self.route53_enabled and self.group_by_route53_names:
route53_names = self.get_instance_route53_names(instance)
for name in route53_names:
self.push(self.inventory, name, hostname)
if self.nested_groups:
self.push_group(self.inventory, 'route53', name)
# Global Tag: instances without tags
if self.group_by_tag_none and len(instance.tags) == 0:
self.push(self.inventory, 'tag_none', hostname)
if self.nested_groups:
self.push_group(self.inventory, 'tags', 'tag_none')
# Global Tag: tag all EC2 instances
self.push(self.inventory, 'ec2', hostname)
self.inventory["_meta"]["hostvars"][hostname] = self.get_host_info_dict_from_instance(instance)
self.inventory["_meta"]["hostvars"][hostname]['ansible_ssh_host'] = dest
def add_rds_instance(self, instance, region):
''' Adds an RDS instance to the inventory and index, as long as it is
addressable '''
# Only want available instances unless all_rds_instances is True
if not self.all_rds_instances and instance.status != 'available':
return
# Select the best destination address
dest = instance.endpoint[0]
if not dest:
# Skip instances we cannot address (e.g. private VPC subnet)
return
# Set the inventory name
hostname = None
if self.hostname_variable:
if self.hostname_variable.startswith('tag_'):
hostname = instance.tags.get(self.hostname_variable[4:], None)
else:
hostname = getattr(instance, self.hostname_variable)
# If we can't get a nice hostname, use the destination address
if not hostname:
hostname = dest
hostname = self.to_safe(hostname).lower()
# Add to index
self.index[hostname] = [region, instance.id]
# Inventory: Group by instance ID (always a group of 1)
if self.group_by_instance_id:
self.inventory[instance.id] = [hostname]
if self.nested_groups:
self.push_group(self.inventory, 'instances', instance.id)
# Inventory: Group by region
if self.group_by_region:
self.push(self.inventory, region, hostname)
if self.nested_groups:
self.push_group(self.inventory, 'regions', region)
# Inventory: Group by availability zone
if self.group_by_availability_zone:
self.push(self.inventory, instance.availability_zone, hostname)
if self.nested_groups:
if self.group_by_region:
self.push_group(self.inventory, region, instance.availability_zone)
self.push_group(self.inventory, 'zones', instance.availability_zone)
# Inventory: Group by instance type
if self.group_by_instance_type:
type_name = self.to_safe('type_' + instance.instance_class)
self.push(self.inventory, type_name, hostname)
if self.nested_groups:
self.push_group(self.inventory, 'types', type_name)
# Inventory: Group by VPC
if self.group_by_vpc_id and instance.subnet_group and instance.subnet_group.vpc_id:
vpc_id_name = self.to_safe('vpc_id_' + instance.subnet_group.vpc_id)
self.push(self.inventory, vpc_id_name, hostname)
if self.nested_groups:
self.push_group(self.inventory, 'vpcs', vpc_id_name)
# Inventory: Group by security group
if self.group_by_security_group:
try:
if instance.security_group:
key = self.to_safe("security_group_" + instance.security_group.name)
self.push(self.inventory, key, hostname)
if self.nested_groups:
self.push_group(self.inventory, 'security_groups', key)
except AttributeError:
self.fail_with_error('\n'.join(['Package boto seems a bit older.',
'Please upgrade boto >= 2.3.0.']))
# Inventory: Group by engine
if self.group_by_rds_engine:
self.push(self.inventory, self.to_safe("rds_" + instance.engine), hostname)
if self.nested_groups:
self.push_group(self.inventory, 'rds_engines', self.to_safe("rds_" + instance.engine))
# Inventory: Group by parameter group
if self.group_by_rds_parameter_group:
self.push(self.inventory, self.to_safe("rds_parameter_group_" + instance.parameter_group.name), hostname)
if self.nested_groups:
self.push_group(self.inventory, 'rds_parameter_groups', self.to_safe("rds_parameter_group_" + instance.parameter_group.name))
# Global Tag: all RDS instances
self.push(self.inventory, 'rds', hostname)
self.inventory["_meta"]["hostvars"][hostname] = self.get_host_info_dict_from_instance(instance)
self.inventory["_meta"]["hostvars"][hostname]['ansible_ssh_host'] = dest
def add_elasticache_cluster(self, cluster, region):
''' Adds an ElastiCache cluster to the inventory and index, as long as
it's nodes are addressable '''
# Only want available clusters unless all_elasticache_clusters is True
if not self.all_elasticache_clusters and cluster['CacheClusterStatus'] != 'available':
return
# Select the best destination address
if 'ConfigurationEndpoint' in cluster and cluster['ConfigurationEndpoint']:
# Memcached cluster
dest = cluster['ConfigurationEndpoint']['Address']
is_redis = False
else:
# Redis sigle node cluster
# Because all Redis clusters are single nodes, we'll merge the
# info from the cluster with info about the node
dest = cluster['CacheNodes'][0]['Endpoint']['Address']
is_redis = True
if not dest:
# Skip clusters we cannot address (e.g. private VPC subnet)
return
# Add to index
self.index[dest] = [region, cluster['CacheClusterId']]
# Inventory: Group by instance ID (always a group of 1)
if self.group_by_instance_id:
self.inventory[cluster['CacheClusterId']] = [dest]
if self.nested_groups:
self.push_group(self.inventory, 'instances', cluster['CacheClusterId'])
# Inventory: Group by region
if self.group_by_region and not is_redis:
self.push(self.inventory, region, dest)
if self.nested_groups:
self.push_group(self.inventory, 'regions', region)
# Inventory: Group by availability zone
if self.group_by_availability_zone and not is_redis:
self.push(self.inventory, cluster['PreferredAvailabilityZone'], dest)
if self.nested_groups:
if self.group_by_region:
self.push_group(self.inventory, region, cluster['PreferredAvailabilityZone'])
self.push_group(self.inventory, 'zones', cluster['PreferredAvailabilityZone'])
# Inventory: Group by node type
if self.group_by_instance_type and not is_redis:
type_name = self.to_safe('type_' + cluster['CacheNodeType'])
self.push(self.inventory, type_name, dest)
if self.nested_groups:
self.push_group(self.inventory, 'types', type_name)
# Inventory: Group by VPC (information not available in the current
# AWS API version for ElastiCache)
# Inventory: Group by security group
if self.group_by_security_group and not is_redis:
# Check for the existence of the 'SecurityGroups' key and also if
# this key has some value. When the cluster is not placed in a SG
# the query can return None here and cause an error.
if 'SecurityGroups' in cluster and cluster['SecurityGroups'] is not None:
for security_group in cluster['SecurityGroups']:
key = self.to_safe("security_group_" + security_group['SecurityGroupId'])
self.push(self.inventory, key, dest)
if self.nested_groups:
self.push_group(self.inventory, 'security_groups', key)
# Inventory: Group by engine
if self.group_by_elasticache_engine and not is_redis:
self.push(self.inventory, self.to_safe("elasticache_" + cluster['Engine']), dest)
if self.nested_groups:
self.push_group(self.inventory, 'elasticache_engines', self.to_safe(cluster['Engine']))
# Inventory: Group by parameter group
if self.group_by_elasticache_parameter_group:
self.push(self.inventory, self.to_safe("elasticache_parameter_group_" + cluster['CacheParameterGroup']['CacheParameterGroupName']), dest)
if self.nested_groups:
self.push_group(self.inventory, 'elasticache_parameter_groups', self.to_safe(cluster['CacheParameterGroup']['CacheParameterGroupName']))
# Inventory: Group by replication group
if self.group_by_elasticache_replication_group and 'ReplicationGroupId' in cluster and cluster['ReplicationGroupId']:
self.push(self.inventory, self.to_safe("elasticache_replication_group_" + cluster['ReplicationGroupId']), dest)
if self.nested_groups:
self.push_group(self.inventory, 'elasticache_replication_groups', self.to_safe(cluster['ReplicationGroupId']))
# Global Tag: all ElastiCache clusters
self.push(self.inventory, 'elasticache_clusters', cluster['CacheClusterId'])
host_info = self.get_host_info_dict_from_describe_dict(cluster)
self.inventory["_meta"]["hostvars"][dest] = host_info
# Add the nodes
for node in cluster['CacheNodes']:
self.add_elasticache_node(node, cluster, region)
def add_elasticache_node(self, node, cluster, region):
''' Adds an ElastiCache node to the inventory and index, as long as
it is addressable '''
# Only want available nodes unless all_elasticache_nodes is True
if not self.all_elasticache_nodes and node['CacheNodeStatus'] != 'available':
return
# Select the best destination address
dest = node['Endpoint']['Address']
if not dest:
# Skip nodes we cannot address (e.g. private VPC subnet)
return
node_id = self.to_safe(cluster['CacheClusterId'] + '_' + node['CacheNodeId'])
# Add to index
self.index[dest] = [region, node_id]
# Inventory: Group by node ID (always a group of 1)
if self.group_by_instance_id:
self.inventory[node_id] = [dest]
if self.nested_groups:
self.push_group(self.inventory, 'instances', node_id)
# Inventory: Group by region
if self.group_by_region:
self.push(self.inventory, region, dest)
if self.nested_groups:
self.push_group(self.inventory, 'regions', region)
# Inventory: Group by availability zone
if self.group_by_availability_zone:
self.push(self.inventory, cluster['PreferredAvailabilityZone'], dest)
if self.nested_groups:
if self.group_by_region:
self.push_group(self.inventory, region, cluster['PreferredAvailabilityZone'])
self.push_group(self.inventory, 'zones', cluster['PreferredAvailabilityZone'])
# Inventory: Group by node type
if self.group_by_instance_type:
type_name = self.to_safe('type_' + cluster['CacheNodeType'])
self.push(self.inventory, type_name, dest)
if self.nested_groups:
self.push_group(self.inventory, 'types', type_name)
# Inventory: Group by VPC (information not available in the current
# AWS API version for ElastiCache)
# Inventory: Group by security group
if self.group_by_security_group:
# Check for the existence of the 'SecurityGroups' key and also if
# this key has some value. When the cluster is not placed in a SG
# the query can return None here and cause an error.
if 'SecurityGroups' in cluster and cluster['SecurityGroups'] is not None:
for security_group in cluster['SecurityGroups']:
key = self.to_safe("security_group_" + security_group['SecurityGroupId'])
self.push(self.inventory, key, dest)
if self.nested_groups:
self.push_group(self.inventory, 'security_groups', key)
# Inventory: Group by engine
if self.group_by_elasticache_engine:
self.push(self.inventory, self.to_safe("elasticache_" + cluster['Engine']), dest)
if self.nested_groups:
self.push_group(self.inventory, 'elasticache_engines', self.to_safe("elasticache_" + cluster['Engine']))
# Inventory: Group by parameter group (done at cluster level)
# Inventory: Group by replication group (done at cluster level)
# Inventory: Group by ElastiCache Cluster
if self.group_by_elasticache_cluster:
self.push(self.inventory, self.to_safe("elasticache_cluster_" + cluster['CacheClusterId']), dest)
# Global Tag: all ElastiCache nodes
self.push(self.inventory, 'elasticache_nodes', dest)
host_info = self.get_host_info_dict_from_describe_dict(node)
if dest in self.inventory["_meta"]["hostvars"]:
self.inventory["_meta"]["hostvars"][dest].update(host_info)
else:
self.inventory["_meta"]["hostvars"][dest] = host_info
def add_elasticache_replication_group(self, replication_group, region):
''' Adds an ElastiCache replication group to the inventory and index '''
# Only want available clusters unless all_elasticache_replication_groups is True
if not self.all_elasticache_replication_groups and replication_group['Status'] != 'available':
return
# Select the best destination address (PrimaryEndpoint)
dest = replication_group['NodeGroups'][0]['PrimaryEndpoint']['Address']
if not dest:
# Skip clusters we cannot address (e.g. private VPC subnet)
return
# Add to index
self.index[dest] = [region, replication_group['ReplicationGroupId']]
# Inventory: Group by ID (always a group of 1)
if self.group_by_instance_id:
self.inventory[replication_group['ReplicationGroupId']] = [dest]
if self.nested_groups:
self.push_group(self.inventory, 'instances', replication_group['ReplicationGroupId'])
# Inventory: Group by region
if self.group_by_region:
self.push(self.inventory, region, dest)
if self.nested_groups:
self.push_group(self.inventory, 'regions', region)
# Inventory: Group by availability zone (doesn't apply to replication groups)
# Inventory: Group by node type (doesn't apply to replication groups)
# Inventory: Group by VPC (information not available in the current
# AWS API version for replication groups
# Inventory: Group by security group (doesn't apply to replication groups)
# Check this value in cluster level
# Inventory: Group by engine (replication groups are always Redis)
if self.group_by_elasticache_engine:
self.push(self.inventory, 'elasticache_redis', dest)
if self.nested_groups:
self.push_group(self.inventory, 'elasticache_engines', 'redis')
# Global Tag: all ElastiCache clusters
self.push(self.inventory, 'elasticache_replication_groups', replication_group['ReplicationGroupId'])
host_info = self.get_host_info_dict_from_describe_dict(replication_group)
self.inventory["_meta"]["hostvars"][dest] = host_info
def get_route53_records(self):
''' Get and store the map of resource records to domain names that
point to them. '''
r53_conn = route53.Route53Connection()
all_zones = r53_conn.get_zones()
route53_zones = [ zone for zone in all_zones if zone.name[:-1]
not in self.route53_excluded_zones ]
self.route53_records = {}
for zone in route53_zones:
rrsets = r53_conn.get_all_rrsets(zone.id)
for record_set in rrsets:
record_name = record_set.name
if record_name.endswith('.'):
record_name = record_name[:-1]
for resource in record_set.resource_records:
self.route53_records.setdefault(resource, set())
self.route53_records[resource].add(record_name)
def get_instance_route53_names(self, instance):
''' Check if an instance is referenced in the records we have from
Route53. If it is, return the list of domain names pointing to said
instance. If nothing points to it, return an empty list. '''
instance_attributes = [ 'public_dns_name', 'private_dns_name',
'ip_address', 'private_ip_address' ]
name_list = set()
for attrib in instance_attributes:
try:
value = getattr(instance, attrib)
except AttributeError:
continue
if value in self.route53_records:
name_list.update(self.route53_records[value])
return list(name_list)
def get_host_info_dict_from_instance(self, instance):
instance_vars = {}
for key in vars(instance):
value = getattr(instance, key)
key = self.to_safe('ec2_' + key)
# Handle complex types
# state/previous_state changed to properties in boto in https://github.com/boto/boto/commit/a23c379837f698212252720d2af8dec0325c9518
if key == 'ec2__state':
instance_vars['ec2_state'] = instance.state or ''
instance_vars['ec2_state_code'] = instance.state_code
elif key == 'ec2__previous_state':
instance_vars['ec2_previous_state'] = instance.previous_state or ''
instance_vars['ec2_previous_state_code'] = instance.previous_state_code
elif type(value) in [int, bool]:
instance_vars[key] = value
elif isinstance(value, six.string_types):
instance_vars[key] = value.strip()
elif type(value) == type(None):
instance_vars[key] = ''
elif key == 'ec2_region':
instance_vars[key] = value.name
elif key == 'ec2__placement':
instance_vars['ec2_placement'] = value.zone
elif key == 'ec2_tags':
for k, v in value.items():
if self.expand_csv_tags and ',' in v:
v = map(lambda x: x.strip(), v.split(','))
key = self.to_safe('ec2_tag_' + k)
instance_vars[key] = v
elif key == 'ec2_groups':
group_ids = []
group_names = []
for group in value:
group_ids.append(group.id)
group_names.append(group.name)
instance_vars["ec2_security_group_ids"] = ','.join([str(i) for i in group_ids])
instance_vars["ec2_security_group_names"] = ','.join([str(i) for i in group_names])
else:
pass
# TODO Product codes if someone finds them useful
#print key
#print type(value)
#print value
return instance_vars
def get_host_info_dict_from_describe_dict(self, describe_dict):
''' Parses the dictionary returned by the API call into a flat list
of parameters. This method should be used only when 'describe' is
used directly because Boto doesn't provide specific classes. '''
# I really don't agree with prefixing everything with 'ec2'
# because EC2, RDS and ElastiCache are different services.
# I'm just following the pattern used until now to not break any
# compatibility.
host_info = {}
for key in describe_dict:
value = describe_dict[key]
key = self.to_safe('ec2_' + self.uncammelize(key))
# Handle complex types
# Target: Memcached Cache Clusters
if key == 'ec2_configuration_endpoint' and value:
host_info['ec2_configuration_endpoint_address'] = value['Address']
host_info['ec2_configuration_endpoint_port'] = value['Port']
# Target: Cache Nodes and Redis Cache Clusters (single node)
if key == 'ec2_endpoint' and value:
host_info['ec2_endpoint_address'] = value['Address']
host_info['ec2_endpoint_port'] = value['Port']
# Target: Redis Replication Groups
if key == 'ec2_node_groups' and value:
host_info['ec2_endpoint_address'] = value[0]['PrimaryEndpoint']['Address']
host_info['ec2_endpoint_port'] = value[0]['PrimaryEndpoint']['Port']
replica_count = 0
for node in value[0]['NodeGroupMembers']:
if node['CurrentRole'] == 'primary':
host_info['ec2_primary_cluster_address'] = node['ReadEndpoint']['Address']
host_info['ec2_primary_cluster_port'] = node['ReadEndpoint']['Port']
host_info['ec2_primary_cluster_id'] = node['CacheClusterId']
elif node['CurrentRole'] == 'replica':
host_info['ec2_replica_cluster_address_'+ str(replica_count)] = node['ReadEndpoint']['Address']
host_info['ec2_replica_cluster_port_'+ str(replica_count)] = node['ReadEndpoint']['Port']
host_info['ec2_replica_cluster_id_'+ str(replica_count)] = node['CacheClusterId']
replica_count += 1
# Target: Redis Replication Groups
if key == 'ec2_member_clusters' and value:
host_info['ec2_member_clusters'] = ','.join([str(i) for i in value])
# Target: All Cache Clusters
elif key == 'ec2_cache_parameter_group':
host_info["ec2_cache_node_ids_to_reboot"] = ','.join([str(i) for i in value['CacheNodeIdsToReboot']])
host_info['ec2_cache_parameter_group_name'] = value['CacheParameterGroupName']
host_info['ec2_cache_parameter_apply_status'] = value['ParameterApplyStatus']
# Target: Almost everything
elif key == 'ec2_security_groups':
# Skip if SecurityGroups is None
# (it is possible to have the key defined but no value in it).
if value is not None:
sg_ids = []
for sg in value:
sg_ids.append(sg['SecurityGroupId'])
host_info["ec2_security_group_ids"] = ','.join([str(i) for i in sg_ids])
# Target: Everything
# Preserve booleans and integers
elif type(value) in [int, bool]:
host_info[key] = value
# Target: Everything
# Sanitize string values
elif isinstance(value, six.string_types):
host_info[key] = value.strip()
# Target: Everything
# Replace None by an empty string
elif type(value) == type(None):
host_info[key] = ''
else:
# Remove non-processed complex types
pass
return host_info
def get_host_info(self):
''' Get variables about a specific host '''
if len(self.index) == 0:
# Need to load index from cache
self.load_index_from_cache()
if not self.args.host in self.index:
# try updating the cache
self.do_api_calls_update_cache()
if not self.args.host in self.index:
# host might not exist anymore
return self.json_format_dict({}, True)
(region, instance_id) = self.index[self.args.host]
instance = self.get_instance(region, instance_id)
return self.json_format_dict(self.get_host_info_dict_from_instance(instance), True)
def push(self, my_dict, key, element):
''' Push an element onto an array that may not have been defined in
the dict '''
group_info = my_dict.setdefault(key, [])
if isinstance(group_info, dict):
host_list = group_info.setdefault('hosts', [])
host_list.append(element)
else:
group_info.append(element)
def push_group(self, my_dict, key, element):
''' Push a group as a child of another group. '''
parent_group = my_dict.setdefault(key, {})
if not isinstance(parent_group, dict):
parent_group = my_dict[key] = {'hosts': parent_group}
child_groups = parent_group.setdefault('children', [])
if element not in child_groups:
child_groups.append(element)
def get_inventory_from_cache(self):
''' Reads the inventory from the cache file and returns it as a JSON
object '''
cache = open(self.cache_path_cache, 'r')
json_inventory = cache.read()
return json_inventory
def load_index_from_cache(self):
''' Reads the index from the cache file sets self.index '''
cache = open(self.cache_path_index, 'r')
json_index = cache.read()
self.index = json.loads(json_index)
def write_to_cache(self, data, filename):
''' Writes data in JSON format to a file '''
json_data = self.json_format_dict(data, True)
cache = open(filename, 'w')
cache.write(json_data)
cache.close()
def uncammelize(self, key):
temp = re.sub('(.)([A-Z][a-z]+)', r'\1_\2', key)
return re.sub('([a-z0-9])([A-Z])', r'\1_\2', temp).lower()
def to_safe(self, word):
''' Converts 'bad' characters in a string to underscores so they can be used as Ansible groups '''
regex = "[^A-Za-z0-9\_"
if not self.replace_dash_in_groups:
regex += "\-"
return re.sub(regex + "]", "_", word)
def json_format_dict(self, data, pretty=False):
''' Converts a dict to a JSON object and dumps it as a formatted
string '''
if pretty:
return json.dumps(data, sort_keys=True, indent=2)
else:
return json.dumps(data)
# Run the script
Ec2Inventory()
|
apache-2.0
|
juliebehr/gaff2xml
|
openmoltools/packmol.py
|
4
|
9546
|
import numpy as np
import shutil
import os
import mdtraj as md
from mdtraj.utils import enter_temp_directory
from mdtraj.utils.delay_import import import_
import tempfile
from distutils.spawn import find_executable
import simtk.unit as units
PACKMOL_PATH = find_executable("packmol")
HEADER_TEMPLATE = """
# Mixture
tolerance %f
filetype pdb
output %s
add_amber_ter
"""
BOX_TEMPLATE = """
structure %s
number %d
inside box 0. 0. 0. %f %f %f
end structure
"""
def pack_box(pdb_filenames_or_trajectories, n_molecules_list, tolerance=2.0, box_size=None):
"""Run packmol to generate a box containing a mixture of molecules.
Parameters
----------
pdb_filenames_or_trajectories : list({str, Trajectory})
List of pdb filenames or trajectories for each component of mixture. If this is
a list of trajectories, the trajectories will be saved to as
temporary files to be run in packmol.
n_molecules_list : list(int)
The number of molecules of each mixture component.
tolerance : float, optional, default=2.0
The mininum spacing between molecules during packing. In ANGSTROMS!
box_size : float, optional
The size of the box to generate. In ANGSTROMS.
Default generates boxes that are very large for increased stability.
May require extra time for energy minimization and equilibration.
Returns
-------
trj : MDTraj.Trajectory
Single frame trajectory with mixture box.
Notes
-----
Be aware that MDTraj uses nanometers internally, but packmol uses angstrom
units. The present function takes `tolerance` and `box_size` in
angstrom units, but the output trajectory will have data in nm.
Also note that OpenMM is pretty picky about the format of unit cell input,
so use the example in tests/test_packmol.py to ensure that you do the right thing.
"""
assert len(pdb_filenames_or_trajectories) == len(n_molecules_list), "Must input same number of pdb filenames as num molecules"
pdb_filenames = []
for obj in pdb_filenames_or_trajectories:
try: # See if MDTraj Trajectory
tmp_filename = tempfile.mktemp(suffix=".pdb")
obj.save_pdb(tmp_filename)
pdb_filenames.append(tmp_filename)
except AttributeError: # Not an MDTraj Trajectory, assume filename
pdb_filenames.append(obj)
if PACKMOL_PATH is None:
raise(IOError("Packmol not found, cannot run pack_box()"))
output_filename = tempfile.mktemp(suffix=".pdb")
# approximating volume to initialize box
if box_size is None:
box_size = approximate_volume(pdb_filenames, n_molecules_list)
header = HEADER_TEMPLATE % (tolerance, output_filename)
for k in range(len(pdb_filenames)):
filename = pdb_filenames[k]
n_molecules = n_molecules_list[k]
header = header + BOX_TEMPLATE % (filename, n_molecules, box_size, box_size, box_size)
pwd = os.getcwd()
print(header)
packmol_filename = "packmol_input.txt"
packmol_filename = tempfile.mktemp(suffix=".txt")
file_handle = open(packmol_filename, 'w')
file_handle.write(header)
file_handle.close()
print(header)
os.system("%s < %s" % (PACKMOL_PATH, packmol_filename))
trj = md.load(output_filename)
assert trj.topology.n_chains == sum(n_molecules_list), "Packmol error: molecules missing from output"
#Begin hack to introduce bonds for the MISSING CONECT ENTRIES THAT PACKMOL FAILS TO WRITE
top, bonds = trj.top.to_dataframe()
trj_i = [md.load(filename) for filename in pdb_filenames]
bonds_i = [t.top.to_dataframe()[1] for t in trj_i]
offset = 0
bonds = []
for i in range(len(pdb_filenames)):
n_atoms = trj_i[i].n_atoms
for j in range(n_molecules_list[i]):
bonds.extend(bonds_i[i] + offset)
offset += n_atoms
bonds = np.array(bonds)
trj.top = md.Topology.from_dataframe(top, bonds)
trj.unitcell_vectors = np.array([np.eye(3)]) * box_size / 10.
return trj
def approximate_volume(pdb_filenames, n_molecules_list, box_scaleup_factor=2.0):
"""Approximate the appropriate box size based on the number and types of atoms present.
Parameters
----------
pdb_filenames : list(str)
List of pdb filenames for each component of mixture.
n_molecules_list : list(int)
The number of molecules of each mixture component.
box_scaleup_factor : float, optional, default = 2.0
Factor by which the estimated box size is increased
Returns
-------
box_size : float
The size of the box to generate. In ANGSTROMS.
Notes
-----
By default, boxes are very large for increased stability, and therefore may
require extra time for energy minimization and equilibration.
"""
volume = 0.0 # in cubic angstroms
for k, (pdb_file) in enumerate(pdb_filenames):
molecule_volume = 0.0
molecule_trj = md.load(pdb_filenames[k])
for atom in molecule_trj.topology.atoms:
if atom.element.symbol == 'H':
molecule_volume += 5.0 # approximated from bondi radius = 1.06 angstroms
else:
molecule_volume += 15.0 # approximated from bondi radius of carbon = 1.53 angstroms
volume += molecule_volume * n_molecules_list[k]
box_size = volume**(1.0/3.0) * box_scaleup_factor
return box_size
def approximate_volume_by_density( smiles_strings, n_molecules_list, density = 1.0, box_scaleup_factor = 1.1):
"""Generate an approximate box size based on the number and molecular weight of molecules present, and a target density for the final solvated mixture. If no density is specified, the target density is assumed to be 1 g/ml.
Parameters
----------
smiles_strings : list(str)
List of smiles strings for each component of mixture.
n_molecules_list : list(int)
The number of molecules of each mixture component.
box_scaleup_factor : float, optional, default = 1.1
Factor by which the estimated box size is increased
density : float, optional, default 1.0
Target density for final system in g/ml
Returns
-------
box_size : float
The size (edge length) of the box to generate. In ANGSTROMS.
Notes
-----
By default, boxes are only modestly large. This approach has not been extensively tested for stability but has been used in th Mobley lab for perhaps ~100 different systems without substantial problems.
"""
oechem = import_("openeye.oechem")
density = density * units.grams/units.milliliter
#Load molecules to get molecular weights
wts = []
mass = 0.0*units.grams/units.mole * 1./units.AVOGADRO_CONSTANT_NA #For calculating total mass
for (idx,smi) in enumerate(smiles_strings):
mol = oechem.OEMol()
oechem.OEParseSmiles(mol, smi)
wts.append( oechem.OECalculateMolecularWeight(mol)*units.grams/units.mole )
mass += n_molecules_list[idx] * wts[idx] * 1./units.AVOGADRO_CONSTANT_NA
#Estimate volume based on mass and density
#Density = mass/volume so volume = mass/density (volume units are ml)
vol = mass/density
#Convert to box length in angstroms
edge = vol**(1./3.)
#Compute final box size
box_size = edge*box_scaleup_factor/units.angstroms
return box_size
def rename_water_atoms( pdb_filename, O_name = 'O', H1_name = 'H1', H2_name = 'H2' ):
"""Rename water atoms in a specified PDB file to have target names. Typically used to ensure a packmol-generated box containing water has water atom names corresponding to what tleap expects for standard water models.
Parameters
----------
pdb_filename : str
The target PDB filename to edit
O_name : str, optional, default 'O'
Target name to set water oxygen names to
H1_name : str, optional, default 'H1'
Target name to set water hydrogen names to, for first hydrogen
H2_name : str, optional, default 'H2'
Target name to set water hydrogen names to, for second hydrogen
Returns
-------
Notes
-------
Uses ParmEd to makes edits. Identifies waters by reading residues from target PDB file and identifying any residue containing three atoms with names O or O#, H or H#, and H or H# (where # is a digit or sequence of digits) as water molecules.
"""
parmed = import_("parmed")
pdb = parmed.load_file( pdb_filename )
#Find waters and rename
for residue in pdb.residues:
if len(residue)==3:
#Build list of atom types (PDB files don't store these) from names after stripping off digits
types = []
for atom in residue.atoms:
name = atom.name
while name[-1].isdigit():
name = name[:-1]
types.append(name)
#See if it's water and, if so, rename
if 'O' in types and types.count('H')==2:
hct = 0
for atom in residue.atoms:
if 'O' in atom.name:
atom.name = O_name
elif 'H' in atom.name:
if hct==0:
atom.name = H1_name
else:
atom.name = H2_name
hct+=1
#Write file
pdb.write_pdb( pdb_filename )
|
gpl-2.0
|
PopCap/GameIdea
|
Engine/Source/ThirdParty/HTML5/emsdk/Win64/python/2.7.5.3_64bit/Lib/dummy_thread.py
|
326
|
4418
|
"""Drop-in replacement for the thread module.
Meant to be used as a brain-dead substitute so that threaded code does
not need to be rewritten for when the thread module is not present.
Suggested usage is::
try:
import thread
except ImportError:
import dummy_thread as thread
"""
# Exports only things specified by thread documentation;
# skipping obsolete synonyms allocate(), start_new(), exit_thread().
__all__ = ['error', 'start_new_thread', 'exit', 'get_ident', 'allocate_lock',
'interrupt_main', 'LockType']
import traceback as _traceback
class error(Exception):
"""Dummy implementation of thread.error."""
def __init__(self, *args):
self.args = args
def start_new_thread(function, args, kwargs={}):
"""Dummy implementation of thread.start_new_thread().
Compatibility is maintained by making sure that ``args`` is a
tuple and ``kwargs`` is a dictionary. If an exception is raised
and it is SystemExit (which can be done by thread.exit()) it is
caught and nothing is done; all other exceptions are printed out
by using traceback.print_exc().
If the executed function calls interrupt_main the KeyboardInterrupt will be
raised when the function returns.
"""
if type(args) != type(tuple()):
raise TypeError("2nd arg must be a tuple")
if type(kwargs) != type(dict()):
raise TypeError("3rd arg must be a dict")
global _main
_main = False
try:
function(*args, **kwargs)
except SystemExit:
pass
except:
_traceback.print_exc()
_main = True
global _interrupt
if _interrupt:
_interrupt = False
raise KeyboardInterrupt
def exit():
"""Dummy implementation of thread.exit()."""
raise SystemExit
def get_ident():
"""Dummy implementation of thread.get_ident().
Since this module should only be used when threadmodule is not
available, it is safe to assume that the current process is the
only thread. Thus a constant can be safely returned.
"""
return -1
def allocate_lock():
"""Dummy implementation of thread.allocate_lock()."""
return LockType()
def stack_size(size=None):
"""Dummy implementation of thread.stack_size()."""
if size is not None:
raise error("setting thread stack size not supported")
return 0
class LockType(object):
"""Class implementing dummy implementation of thread.LockType.
Compatibility is maintained by maintaining self.locked_status
which is a boolean that stores the state of the lock. Pickling of
the lock, though, should not be done since if the thread module is
then used with an unpickled ``lock()`` from here problems could
occur from this class not having atomic methods.
"""
def __init__(self):
self.locked_status = False
def acquire(self, waitflag=None):
"""Dummy implementation of acquire().
For blocking calls, self.locked_status is automatically set to
True and returned appropriately based on value of
``waitflag``. If it is non-blocking, then the value is
actually checked and not set if it is already acquired. This
is all done so that threading.Condition's assert statements
aren't triggered and throw a little fit.
"""
if waitflag is None or waitflag:
self.locked_status = True
return True
else:
if not self.locked_status:
self.locked_status = True
return True
else:
return False
__enter__ = acquire
def __exit__(self, typ, val, tb):
self.release()
def release(self):
"""Release the dummy lock."""
# XXX Perhaps shouldn't actually bother to test? Could lead
# to problems for complex, threaded code.
if not self.locked_status:
raise error
self.locked_status = False
return True
def locked(self):
return self.locked_status
# Used to signal that interrupt_main was called in a "thread"
_interrupt = False
# True when not executing in a "thread"
_main = True
def interrupt_main():
"""Set _interrupt flag to True to have start_new_thread raise
KeyboardInterrupt upon exiting."""
if _main:
raise KeyboardInterrupt
else:
global _interrupt
_interrupt = True
|
bsd-2-clause
|
fitermay/intellij-community
|
python/lib/Lib/distutils/fancy_getopt.py
|
85
|
18441
|
"""distutils.fancy_getopt
Wrapper around the standard getopt module that provides the following
additional features:
* short and long options are tied together
* options have help strings, so fancy_getopt could potentially
create a complete usage summary
* options set attributes of a passed-in object
"""
# This module should be kept compatible with Python 2.1.
__revision__ = "$Id: fancy_getopt.py 37828 2004-11-10 22:23:15Z loewis $"
import sys, string, re
from types import *
import getopt
from distutils.errors import *
# Much like command_re in distutils.core, this is close to but not quite
# the same as a Python NAME -- except, in the spirit of most GNU
# utilities, we use '-' in place of '_'. (The spirit of LISP lives on!)
# The similarities to NAME are again not a coincidence...
longopt_pat = r'[a-zA-Z](?:[a-zA-Z0-9-]*)'
longopt_re = re.compile(r'^%s$' % longopt_pat)
# For recognizing "negative alias" options, eg. "quiet=!verbose"
neg_alias_re = re.compile("^(%s)=!(%s)$" % (longopt_pat, longopt_pat))
# This is used to translate long options to legitimate Python identifiers
# (for use as attributes of some object).
longopt_xlate = string.maketrans('-', '_')
class FancyGetopt:
"""Wrapper around the standard 'getopt()' module that provides some
handy extra functionality:
* short and long options are tied together
* options have help strings, and help text can be assembled
from them
* options set attributes of a passed-in object
* boolean options can have "negative aliases" -- eg. if
--quiet is the "negative alias" of --verbose, then "--quiet"
on the command line sets 'verbose' to false
"""
def __init__ (self, option_table=None):
# The option table is (currently) a list of tuples. The
# tuples may have 3 or four values:
# (long_option, short_option, help_string [, repeatable])
# if an option takes an argument, its long_option should have '='
# appended; short_option should just be a single character, no ':'
# in any case. If a long_option doesn't have a corresponding
# short_option, short_option should be None. All option tuples
# must have long options.
self.option_table = option_table
# 'option_index' maps long option names to entries in the option
# table (ie. those 3-tuples).
self.option_index = {}
if self.option_table:
self._build_index()
# 'alias' records (duh) alias options; {'foo': 'bar'} means
# --foo is an alias for --bar
self.alias = {}
# 'negative_alias' keeps track of options that are the boolean
# opposite of some other option
self.negative_alias = {}
# These keep track of the information in the option table. We
# don't actually populate these structures until we're ready to
# parse the command-line, since the 'option_table' passed in here
# isn't necessarily the final word.
self.short_opts = []
self.long_opts = []
self.short2long = {}
self.attr_name = {}
self.takes_arg = {}
# And 'option_order' is filled up in 'getopt()'; it records the
# original order of options (and their values) on the command-line,
# but expands short options, converts aliases, etc.
self.option_order = []
# __init__ ()
def _build_index (self):
self.option_index.clear()
for option in self.option_table:
self.option_index[option[0]] = option
def set_option_table (self, option_table):
self.option_table = option_table
self._build_index()
def add_option (self, long_option, short_option=None, help_string=None):
if self.option_index.has_key(long_option):
raise DistutilsGetoptError, \
"option conflict: already an option '%s'" % long_option
else:
option = (long_option, short_option, help_string)
self.option_table.append(option)
self.option_index[long_option] = option
def has_option (self, long_option):
"""Return true if the option table for this parser has an
option with long name 'long_option'."""
return self.option_index.has_key(long_option)
def get_attr_name (self, long_option):
"""Translate long option name 'long_option' to the form it
has as an attribute of some object: ie., translate hyphens
to underscores."""
return string.translate(long_option, longopt_xlate)
def _check_alias_dict (self, aliases, what):
assert type(aliases) is DictionaryType
for (alias, opt) in aliases.items():
if not self.option_index.has_key(alias):
raise DistutilsGetoptError, \
("invalid %s '%s': "
"option '%s' not defined") % (what, alias, alias)
if not self.option_index.has_key(opt):
raise DistutilsGetoptError, \
("invalid %s '%s': "
"aliased option '%s' not defined") % (what, alias, opt)
def set_aliases (self, alias):
"""Set the aliases for this option parser."""
self._check_alias_dict(alias, "alias")
self.alias = alias
def set_negative_aliases (self, negative_alias):
"""Set the negative aliases for this option parser.
'negative_alias' should be a dictionary mapping option names to
option names, both the key and value must already be defined
in the option table."""
self._check_alias_dict(negative_alias, "negative alias")
self.negative_alias = negative_alias
def _grok_option_table (self):
"""Populate the various data structures that keep tabs on the
option table. Called by 'getopt()' before it can do anything
worthwhile.
"""
self.long_opts = []
self.short_opts = []
self.short2long.clear()
self.repeat = {}
for option in self.option_table:
if len(option) == 3:
long, short, help = option
repeat = 0
elif len(option) == 4:
long, short, help, repeat = option
else:
# the option table is part of the code, so simply
# assert that it is correct
raise ValueError, "invalid option tuple: %r" % (option,)
# Type- and value-check the option names
if type(long) is not StringType or len(long) < 2:
raise DistutilsGetoptError, \
("invalid long option '%s': "
"must be a string of length >= 2") % long
if (not ((short is None) or
(type(short) is StringType and len(short) == 1))):
raise DistutilsGetoptError, \
("invalid short option '%s': "
"must a single character or None") % short
self.repeat[long] = repeat
self.long_opts.append(long)
if long[-1] == '=': # option takes an argument?
if short: short = short + ':'
long = long[0:-1]
self.takes_arg[long] = 1
else:
# Is option is a "negative alias" for some other option (eg.
# "quiet" == "!verbose")?
alias_to = self.negative_alias.get(long)
if alias_to is not None:
if self.takes_arg[alias_to]:
raise DistutilsGetoptError, \
("invalid negative alias '%s': "
"aliased option '%s' takes a value") % \
(long, alias_to)
self.long_opts[-1] = long # XXX redundant?!
self.takes_arg[long] = 0
else:
self.takes_arg[long] = 0
# If this is an alias option, make sure its "takes arg" flag is
# the same as the option it's aliased to.
alias_to = self.alias.get(long)
if alias_to is not None:
if self.takes_arg[long] != self.takes_arg[alias_to]:
raise DistutilsGetoptError, \
("invalid alias '%s': inconsistent with "
"aliased option '%s' (one of them takes a value, "
"the other doesn't") % (long, alias_to)
# Now enforce some bondage on the long option name, so we can
# later translate it to an attribute name on some object. Have
# to do this a bit late to make sure we've removed any trailing
# '='.
if not longopt_re.match(long):
raise DistutilsGetoptError, \
("invalid long option name '%s' " +
"(must be letters, numbers, hyphens only") % long
self.attr_name[long] = self.get_attr_name(long)
if short:
self.short_opts.append(short)
self.short2long[short[0]] = long
# for option_table
# _grok_option_table()
def getopt (self, args=None, object=None):
"""Parse command-line options in args. Store as attributes on object.
If 'args' is None or not supplied, uses 'sys.argv[1:]'. If
'object' is None or not supplied, creates a new OptionDummy
object, stores option values there, and returns a tuple (args,
object). If 'object' is supplied, it is modified in place and
'getopt()' just returns 'args'; in both cases, the returned
'args' is a modified copy of the passed-in 'args' list, which
is left untouched.
"""
if args is None:
args = sys.argv[1:]
if object is None:
object = OptionDummy()
created_object = 1
else:
created_object = 0
self._grok_option_table()
short_opts = string.join(self.short_opts)
try:
opts, args = getopt.getopt(args, short_opts, self.long_opts)
except getopt.error, msg:
raise DistutilsArgError, msg
for opt, val in opts:
if len(opt) == 2 and opt[0] == '-': # it's a short option
opt = self.short2long[opt[1]]
else:
assert len(opt) > 2 and opt[:2] == '--'
opt = opt[2:]
alias = self.alias.get(opt)
if alias:
opt = alias
if not self.takes_arg[opt]: # boolean option?
assert val == '', "boolean option can't have value"
alias = self.negative_alias.get(opt)
if alias:
opt = alias
val = 0
else:
val = 1
attr = self.attr_name[opt]
# The only repeating option at the moment is 'verbose'.
# It has a negative option -q quiet, which should set verbose = 0.
if val and self.repeat.get(attr) is not None:
val = getattr(object, attr, 0) + 1
setattr(object, attr, val)
self.option_order.append((opt, val))
# for opts
if created_object:
return args, object
else:
return args
# getopt()
def get_option_order (self):
"""Returns the list of (option, value) tuples processed by the
previous run of 'getopt()'. Raises RuntimeError if
'getopt()' hasn't been called yet.
"""
if self.option_order is None:
raise RuntimeError, "'getopt()' hasn't been called yet"
else:
return self.option_order
def generate_help (self, header=None):
"""Generate help text (a list of strings, one per suggested line of
output) from the option table for this FancyGetopt object.
"""
# Blithely assume the option table is good: probably wouldn't call
# 'generate_help()' unless you've already called 'getopt()'.
# First pass: determine maximum length of long option names
max_opt = 0
for option in self.option_table:
long = option[0]
short = option[1]
l = len(long)
if long[-1] == '=':
l = l - 1
if short is not None:
l = l + 5 # " (-x)" where short == 'x'
if l > max_opt:
max_opt = l
opt_width = max_opt + 2 + 2 + 2 # room for indent + dashes + gutter
# Typical help block looks like this:
# --foo controls foonabulation
# Help block for longest option looks like this:
# --flimflam set the flim-flam level
# and with wrapped text:
# --flimflam set the flim-flam level (must be between
# 0 and 100, except on Tuesdays)
# Options with short names will have the short name shown (but
# it doesn't contribute to max_opt):
# --foo (-f) controls foonabulation
# If adding the short option would make the left column too wide,
# we push the explanation off to the next line
# --flimflam (-l)
# set the flim-flam level
# Important parameters:
# - 2 spaces before option block start lines
# - 2 dashes for each long option name
# - min. 2 spaces between option and explanation (gutter)
# - 5 characters (incl. space) for short option name
# Now generate lines of help text. (If 80 columns were good enough
# for Jesus, then 78 columns are good enough for me!)
line_width = 78
text_width = line_width - opt_width
big_indent = ' ' * opt_width
if header:
lines = [header]
else:
lines = ['Option summary:']
for option in self.option_table:
long, short, help = option[:3]
text = wrap_text(help, text_width)
if long[-1] == '=':
long = long[0:-1]
# Case 1: no short option at all (makes life easy)
if short is None:
if text:
lines.append(" --%-*s %s" % (max_opt, long, text[0]))
else:
lines.append(" --%-*s " % (max_opt, long))
# Case 2: we have a short option, so we have to include it
# just after the long option
else:
opt_names = "%s (-%s)" % (long, short)
if text:
lines.append(" --%-*s %s" %
(max_opt, opt_names, text[0]))
else:
lines.append(" --%-*s" % opt_names)
for l in text[1:]:
lines.append(big_indent + l)
# for self.option_table
return lines
# generate_help ()
def print_help (self, header=None, file=None):
if file is None:
file = sys.stdout
for line in self.generate_help(header):
file.write(line + "\n")
# class FancyGetopt
def fancy_getopt (options, negative_opt, object, args):
parser = FancyGetopt(options)
parser.set_negative_aliases(negative_opt)
return parser.getopt(args, object)
WS_TRANS = string.maketrans(string.whitespace, ' ' * len(string.whitespace))
def wrap_text (text, width):
"""wrap_text(text : string, width : int) -> [string]
Split 'text' into multiple lines of no more than 'width' characters
each, and return the list of strings that results.
"""
if text is None:
return []
if len(text) <= width:
return [text]
text = string.expandtabs(text)
text = string.translate(text, WS_TRANS)
chunks = re.split(r'( +|-+)', text)
chunks = filter(None, chunks) # ' - ' results in empty strings
lines = []
while chunks:
cur_line = [] # list of chunks (to-be-joined)
cur_len = 0 # length of current line
while chunks:
l = len(chunks[0])
if cur_len + l <= width: # can squeeze (at least) this chunk in
cur_line.append(chunks[0])
del chunks[0]
cur_len = cur_len + l
else: # this line is full
# drop last chunk if all space
if cur_line and cur_line[-1][0] == ' ':
del cur_line[-1]
break
if chunks: # any chunks left to process?
# if the current line is still empty, then we had a single
# chunk that's too big too fit on a line -- so we break
# down and break it up at the line width
if cur_len == 0:
cur_line.append(chunks[0][0:width])
chunks[0] = chunks[0][width:]
# all-whitespace chunks at the end of a line can be discarded
# (and we know from the re.split above that if a chunk has
# *any* whitespace, it is *all* whitespace)
if chunks[0][0] == ' ':
del chunks[0]
# and store this line in the list-of-all-lines -- as a single
# string, of course!
lines.append(string.join(cur_line, ''))
# while chunks
return lines
# wrap_text ()
def translate_longopt (opt):
"""Convert a long option name to a valid Python identifier by
changing "-" to "_".
"""
return string.translate(opt, longopt_xlate)
class OptionDummy:
"""Dummy class just used as a place to hold command-line option
values as instance attributes."""
def __init__ (self, options=[]):
"""Create a new OptionDummy instance. The attributes listed in
'options' will be initialized to None."""
for opt in options:
setattr(self, opt, None)
# class OptionDummy
if __name__ == "__main__":
text = """\
Tra-la-la, supercalifragilisticexpialidocious.
How *do* you spell that odd word, anyways?
(Someone ask Mary -- she'll know [or she'll
say, "How should I know?"].)"""
for w in (10, 20, 30, 40):
print "width: %d" % w
print string.join(wrap_text(text, w), "\n")
print
|
apache-2.0
|
keum/KC_CSO_Status
|
cso_status_geojson.py
|
1
|
7847
|
#peter remove, not used# import sys
#peter remove, not used# import os
import subprocess
import time
import csv
import urllib2
import pprint
import json
"""
Take status csv file from url and coordinate csv file to create
data dictionary format and convert that format into geojson type.
This geojson data can then be loaded into GitHub page to view
using it's mapping component
"""
"""
Input data in csv strcture and output data structure in geojson format.
Given remote rows from cso_status_data. Data when downloaded from FTP url site.
Example Data:
11TH.CSOSTATUS_N,3
30TH.CSOSTATUS_N,3
3RD.CSOSTATUS_N,3
And cso_coord.csv in the form of
[{'CSO_TagName': 'ALKI', 'X_COORD': '-122.4225', 'Y_COORD': '47.57024', 'Name':'Alki, 'DSN':'051'},
{.......}]
formatted_geojson_data_dict = {'type':'FeatureCollection','features':
[{'type':'Feature','properties':{},'geometry':{'type':'Point','coordinates':[]}}]}
NEED a Data structure template in python to look like this then convert to GeoJSON
{'type':'FeatureCollection",
'features': [{'type': 'Features',
'properties':{'CSO_TagName': 'ALKI',
'Value': 3},
'geometries':{'type':'point',
'coordinates':[-122.322,
47.607]}
}
]
}
"""
# Downloading csv status values from the web, ftp site.
cso_status_data = urllib2.urlopen("http://your.kingcounty.gov/dnrp/library/wastewater/cso/img/CSO.CSV")
# Read csv file into a python list named cso_status_csv
text = cso_status_data.readlines() #reading each line of downloaded csv file
cso_status_csv = csv.reader(text) #creating new object call cso_status_csv from CSV file from url
#debug# pprint.pprint(cso_status_csv)
#Reading CSO with Coordinate in csv file locally and create list,
#subtitue with full data file cso_coord.csv or partial_coord.csv for two point data
cso_cord = open('cso_coord.csv', 'r')
reader = csv.DictReader(cso_cord)
location = list (reader)
cso_cord.close()
#debug# pprint.pprint(location)
"""this the format we want to output
-question: not sure how to iterate the location object into below formatted_data_dict
formatted_geojson_data_dict = {'type':'FeatureCollection','features':
[{'type':'Feature','properties':{},'geometry':{'type':'Point','coordinates':[]}}]}
for row in location:
formatted_geojson_data_dict['features'][row['CSO_TagName']] =
{'type':'Feature',
'properties':{},
'geometry':{'coordinates':[(row['X_COORD'])],[(row['Y_COORD'])]}}
"""
#Create dictionary with geojson template
geojson_data_dict = {'type':'FeatureCollection','features':[]}
for row in location:
# debug print type(row["X_COORD"])
# We want to populate this stub, for every row, in the location list
# {'type':'Features','properties':{},'geometry':{'type':'Point','coordinates':[]}}
geojson_data_dict['features'].append({'type':'Feature',
'properties':{'CSO_TagName':row['CSO_TagName'],
'DSN':row['DSN'],
'Name':row['Name'],
'Time_stamp':time.strftime("%Y-%m-%d %I:%M:%S %p", time.localtime()),
'Location':"%1.3f , %1.3f" % (float(row["X_COORD"]) ,float(row["Y_COORD"])),
'CSO_Status':0,'marker-color':'#666',
'description':'No Data Available'},
'geometry':{'type':'Point',
'coordinates':[float(row["X_COORD"]), float(row["Y_COORD"])]
}
})
#create brand new dictionary style with color according to that status
style_dict = {"1":{'marker-color':'#C12D2D','marker-symbol':'square','marker-size':'large','description':'Overflowing now'},
"2":{'marker-color':'#FFD700','marker-symbol':'triangle','marker-size':'medium','description':'Overflowed in the last 48 hrs'},
"3":{'marker-color':'#689F38','marker-symbol':'circle','marker-size':'small','description':'No recent overflow'},
"4":{'marker-color':'#A2A2A2','marker-symbol':'cross','marker-size':'small','description':'Data not available'}
}
#??? - Not sure how to add value to be added onto geojson_data_dict object, replace with
##default vaue of 0........
"""Paul M. helped to crated loop to add CSO_Status value
(geojson_data_dict['features'][0]) is dict
and print it returns
{'geometry':{coordinates':[-122.4225,47.57024],'type':Point'},
'properties':{'CSO_Status':0,'CSO_TagName':'ALKI'},'type':'Feature'}
Replace geojson_data_dict's one of the value with CSO status. Refer to the note.
"""
# Populate with station values, based on station names.
for line in cso_status_csv:
#Test to see record is in Seattle CSO data or not
if line[0][0:5]=="NPDES": # this indicates the data is Seattle CSO
cso_name = line[0]
#cso_symbol = 'x' #indicate Seattle CSO
else: #this is not Seattle CSO and is for King County CSO
cso_name = line[0][0:len(line[0])-12]
#cso_symbol = 'circle' # this indicates KC CSO
#for all records
CSO_Status = line[1]
# If CSO exists, add to it.
#Iterate through 'features' list
for element in geojson_data_dict['features']:
if cso_name == element['properties']['CSO_TagName']:
element['properties']['CSO_Status'] = CSO_Status
#element['properties'].append(style_dict[CSO_Status])
element['properties']['marker-color']=style_dict[CSO_Status]['marker-color']
element['properties']['marker-size']=style_dict[CSO_Status]['marker-size']
element['properties']['description']=style_dict[CSO_Status]['description']
#adding new element with symbol specific to seattle and KC
element['properties']['marker-symbol']=style_dict[CSO_Status]['marker-symbol']
#write out same element with additional style properties
formatted_geojson_data_dict = json.dumps(geojson_data_dict)
pprint.pprint(formatted_geojson_data_dict)
#take formatted_geojson_data_dict file and convert '' string into a file using with open
#out_file_fullpath ='/Users/peter/Documents/KC_CSO_Status/test_file5_5.geojson'
#take formatted_geojson_data_dict file and convert '' string into a file using with open down is for windows
#out_file_fullpath ='/Users/keump/Documents/GitHub/KC_CSO_Status/test_file.geojson' #for Windows 7
# file for public repo for Windows machine
out_file_fullpath = '/Users/keump/Documents/GitHub/data_display/cso_test_file.geojson'
# directory for public repo
out_file_fullpath_directory = '/Users/keump/Documents/GitHub/data_display'
# file for public repo for OS machine
#out_file_fullpath ='/Users/peter/Documents/KC_CSO_Status/test_file.geojson' #for macbook
with open(out_file_fullpath, 'w') as out_file:
out_file.write(formatted_geojson_data_dict)
#using subprocess module to push the data into GitHub site to be view
subprocess.call(['git', '--git-dir', out_file_fullpath_directory + '/.git',
'--work-tree', out_file_fullpath_directory,
'add', out_file_fullpath])
subprocess.call(['git', '--git-dir', out_file_fullpath_directory +'/.git',
'--work-tree', out_file_fullpath_directory,
'commit', '-a', '-m', '"Data Upload: ' + time.strftime("%Y-%m-%d %I:%M:%S %p", time.localtime()) + '"'])
subprocess.call(['git', '--git-dir', out_file_fullpath_directory + '/.git',
'--work-tree', out_file_fullpath_directory,
'push'])
|
mit
|
t794104/ansible
|
test/integration/targets/aws_lambda/files/mini_lambda.py
|
139
|
1237
|
from __future__ import print_function
import json
import os
def handler(event, context):
"""
The handler function is the function which gets called each time
the lambda is run.
"""
# printing goes to the cloudwatch log allowing us to simply debug the lambda if we can find
# the log entry.
print("got event:\n" + json.dumps(event))
# if the name parameter isn't present this can throw an exception
# which will result in an amazon chosen failure from the lambda
# which can be completely fine.
name = event["name"]
# we can use environment variables as part of the configuration of the lambda
# which can change the behaviour of the lambda without needing a new upload
extra = os.environ.get("EXTRA_MESSAGE")
if extra is not None and len(extra) > 0:
greeting = "hello {0}. {1}".format(name, extra)
else:
greeting = "hello " + name
return {"message": greeting}
def main():
"""
This main function will normally never be called during normal
lambda use. It is here for testing the lambda program only.
"""
event = {"name": "james"}
context = None
print(handler(event, context))
if __name__ == '__main__':
main()
|
gpl-3.0
|
Pure4Team/desire820
|
tools/perf/tests/attr.py
|
3174
|
9441
|
#! /usr/bin/python
import os
import sys
import glob
import optparse
import tempfile
import logging
import shutil
import ConfigParser
class Fail(Exception):
def __init__(self, test, msg):
self.msg = msg
self.test = test
def getMsg(self):
return '\'%s\' - %s' % (self.test.path, self.msg)
class Unsup(Exception):
def __init__(self, test):
self.test = test
def getMsg(self):
return '\'%s\'' % self.test.path
class Event(dict):
terms = [
'cpu',
'flags',
'type',
'size',
'config',
'sample_period',
'sample_type',
'read_format',
'disabled',
'inherit',
'pinned',
'exclusive',
'exclude_user',
'exclude_kernel',
'exclude_hv',
'exclude_idle',
'mmap',
'comm',
'freq',
'inherit_stat',
'enable_on_exec',
'task',
'watermark',
'precise_ip',
'mmap_data',
'sample_id_all',
'exclude_host',
'exclude_guest',
'exclude_callchain_kernel',
'exclude_callchain_user',
'wakeup_events',
'bp_type',
'config1',
'config2',
'branch_sample_type',
'sample_regs_user',
'sample_stack_user',
]
def add(self, data):
for key, val in data:
log.debug(" %s = %s" % (key, val))
self[key] = val
def __init__(self, name, data, base):
log.debug(" Event %s" % name);
self.name = name;
self.group = ''
self.add(base)
self.add(data)
def compare_data(self, a, b):
# Allow multiple values in assignment separated by '|'
a_list = a.split('|')
b_list = b.split('|')
for a_item in a_list:
for b_item in b_list:
if (a_item == b_item):
return True
elif (a_item == '*') or (b_item == '*'):
return True
return False
def equal(self, other):
for t in Event.terms:
log.debug(" [%s] %s %s" % (t, self[t], other[t]));
if not self.has_key(t) or not other.has_key(t):
return False
if not self.compare_data(self[t], other[t]):
return False
return True
def diff(self, other):
for t in Event.terms:
if not self.has_key(t) or not other.has_key(t):
continue
if not self.compare_data(self[t], other[t]):
log.warning("expected %s=%s, got %s" % (t, self[t], other[t]))
# Test file description needs to have following sections:
# [config]
# - just single instance in file
# - needs to specify:
# 'command' - perf command name
# 'args' - special command arguments
# 'ret' - expected command return value (0 by default)
#
# [eventX:base]
# - one or multiple instances in file
# - expected values assignments
class Test(object):
def __init__(self, path, options):
parser = ConfigParser.SafeConfigParser()
parser.read(path)
log.warning("running '%s'" % path)
self.path = path
self.test_dir = options.test_dir
self.perf = options.perf
self.command = parser.get('config', 'command')
self.args = parser.get('config', 'args')
try:
self.ret = parser.get('config', 'ret')
except:
self.ret = 0
self.expect = {}
self.result = {}
log.debug(" loading expected events");
self.load_events(path, self.expect)
def is_event(self, name):
if name.find("event") == -1:
return False
else:
return True
def load_events(self, path, events):
parser_event = ConfigParser.SafeConfigParser()
parser_event.read(path)
# The event record section header contains 'event' word,
# optionaly followed by ':' allowing to load 'parent
# event' first as a base
for section in filter(self.is_event, parser_event.sections()):
parser_items = parser_event.items(section);
base_items = {}
# Read parent event if there's any
if (':' in section):
base = section[section.index(':') + 1:]
parser_base = ConfigParser.SafeConfigParser()
parser_base.read(self.test_dir + '/' + base)
base_items = parser_base.items('event')
e = Event(section, parser_items, base_items)
events[section] = e
def run_cmd(self, tempdir):
cmd = "PERF_TEST_ATTR=%s %s %s -o %s/perf.data %s" % (tempdir,
self.perf, self.command, tempdir, self.args)
ret = os.WEXITSTATUS(os.system(cmd))
log.info(" '%s' ret %d " % (cmd, ret))
if ret != int(self.ret):
raise Unsup(self)
def compare(self, expect, result):
match = {}
log.debug(" compare");
# For each expected event find all matching
# events in result. Fail if there's not any.
for exp_name, exp_event in expect.items():
exp_list = []
log.debug(" matching [%s]" % exp_name)
for res_name, res_event in result.items():
log.debug(" to [%s]" % res_name)
if (exp_event.equal(res_event)):
exp_list.append(res_name)
log.debug(" ->OK")
else:
log.debug(" ->FAIL");
log.debug(" match: [%s] matches %s" % (exp_name, str(exp_list)))
# we did not any matching event - fail
if (not exp_list):
exp_event.diff(res_event)
raise Fail(self, 'match failure');
match[exp_name] = exp_list
# For each defined group in the expected events
# check we match the same group in the result.
for exp_name, exp_event in expect.items():
group = exp_event.group
if (group == ''):
continue
for res_name in match[exp_name]:
res_group = result[res_name].group
if res_group not in match[group]:
raise Fail(self, 'group failure')
log.debug(" group: [%s] matches group leader %s" %
(exp_name, str(match[group])))
log.debug(" matched")
def resolve_groups(self, events):
for name, event in events.items():
group_fd = event['group_fd'];
if group_fd == '-1':
continue;
for iname, ievent in events.items():
if (ievent['fd'] == group_fd):
event.group = iname
log.debug('[%s] has group leader [%s]' % (name, iname))
break;
def run(self):
tempdir = tempfile.mkdtemp();
try:
# run the test script
self.run_cmd(tempdir);
# load events expectation for the test
log.debug(" loading result events");
for f in glob.glob(tempdir + '/event*'):
self.load_events(f, self.result);
# resolve group_fd to event names
self.resolve_groups(self.expect);
self.resolve_groups(self.result);
# do the expectation - results matching - both ways
self.compare(self.expect, self.result)
self.compare(self.result, self.expect)
finally:
# cleanup
shutil.rmtree(tempdir)
def run_tests(options):
for f in glob.glob(options.test_dir + '/' + options.test):
try:
Test(f, options).run()
except Unsup, obj:
log.warning("unsupp %s" % obj.getMsg())
def setup_log(verbose):
global log
level = logging.CRITICAL
if verbose == 1:
level = logging.WARNING
if verbose == 2:
level = logging.INFO
if verbose >= 3:
level = logging.DEBUG
log = logging.getLogger('test')
log.setLevel(level)
ch = logging.StreamHandler()
ch.setLevel(level)
formatter = logging.Formatter('%(message)s')
ch.setFormatter(formatter)
log.addHandler(ch)
USAGE = '''%s [OPTIONS]
-d dir # tests dir
-p path # perf binary
-t test # single test
-v # verbose level
''' % sys.argv[0]
def main():
parser = optparse.OptionParser(usage=USAGE)
parser.add_option("-t", "--test",
action="store", type="string", dest="test")
parser.add_option("-d", "--test-dir",
action="store", type="string", dest="test_dir")
parser.add_option("-p", "--perf",
action="store", type="string", dest="perf")
parser.add_option("-v", "--verbose",
action="count", dest="verbose")
options, args = parser.parse_args()
if args:
parser.error('FAILED wrong arguments %s' % ' '.join(args))
return -1
setup_log(options.verbose)
if not options.test_dir:
print 'FAILED no -d option specified'
sys.exit(-1)
if not options.test:
options.test = 'test*'
try:
run_tests(options)
except Fail, obj:
print "FAILED %s" % obj.getMsg();
sys.exit(-1)
sys.exit(0)
if __name__ == '__main__':
main()
|
gpl-2.0
|
migueldsw/TL-DA-TF
|
report_helper.py
|
1
|
2266
|
import matplotlib.pyplot as plt
import numpy as np
import os
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
# pretty printing
np.set_printoptions(suppress=True)
np.set_printoptions(precision=3)
def checkDir(directory):
if not os.path.exists(directory):
os.makedirs(directory)
# graph plotting
def plotLine(x, y, path, fileName):
line, = plt.plot(x, y, '-', linewidth=2, color='blue', label='J')
plt.plot(x, y, 'bo')
# tick no eixo x
# plt.xticks(np.arange(min(x), max(x)+1, 1.0))
# tick no eixo y
# plt.yticks(np.arange(min(x), max(x)+1, .05))
# defines fixed x y range
# plt.axis([0,5,1,2])
# # draw vertical line from [xfinal,xinicial][yfinal,yinicial]
# for i in [1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16]:
# plt.plot([i, i], [2, 1], 'k--')
# plt.legend(bbox_to_anchor=(1, 1), loc=2, borderaxespad=0.)
plt.legend(loc=1, borderaxespad=0.)
# dashes = [10, 5, 100, 5] # 10 points on, 5 off, 100 on, 5 off
# line.set_dashes(dashes)
# plt.show()
# stub
for i, j in zip(x, y):
# plt.annotate(str(j),xy=(i,j))
plt.annotate(str("%.3f" % j), xy=(i, j), xytext=(5, 5), textcoords='offset points')
# end stub
checkDir(path)
plt.savefig(path + "/" + fileName)
plt.clf()
plt.close()
def plotValuesLine(y, path, fileName):
plotLine(range(len(y)), y, path, fileName)
def plotDots(y, path, fileName):
x = range(len(y))
plt.plot(x, y, 'bo', label='classes')
plt.legend(loc=1, borderaxespad=0.)
# defines fixed x y range
plt.axis([0, len(y), min(y) - 1, max(y) + 1])
# for i,j in zip(x,y):
# plt.annotate(str(j),xy=(i,j))
checkDir(path)
plt.savefig(path + "/" + fileName)
plt.clf()
plt.close()
# file writing
def writeFile(fileName, lines):
f = open(fileName, 'w')
for l in lines:
f.write(l + "\n")
f.close()
def appendFile(fileName, lines):
f = open(fileName, 'a')
for l in lines:
f.write(str(l) + "\n")
f.close()
def strArray(l):
return ''.join(str(i) + ' ' for i in l)
def strMat(m):
out = []
for i in m:
out.append(strArray(i))
return out
def writeMat(fileName, m):
writeFile(fileName, strMat(m))
|
apache-2.0
|
kdar/Wox
|
PythonHome/Lib/site-packages/pip/_vendor/requests/status_codes.py
|
695
|
3136
|
# -*- coding: utf-8 -*-
from .structures import LookupDict
_codes = {
# Informational.
100: ('continue',),
101: ('switching_protocols',),
102: ('processing',),
103: ('checkpoint',),
122: ('uri_too_long', 'request_uri_too_long'),
200: ('ok', 'okay', 'all_ok', 'all_okay', 'all_good', '\\o/', '✓'),
201: ('created',),
202: ('accepted',),
203: ('non_authoritative_info', 'non_authoritative_information'),
204: ('no_content',),
205: ('reset_content', 'reset'),
206: ('partial_content', 'partial'),
207: ('multi_status', 'multiple_status', 'multi_stati', 'multiple_stati'),
208: ('already_reported',),
226: ('im_used',),
# Redirection.
300: ('multiple_choices',),
301: ('moved_permanently', 'moved', '\\o-'),
302: ('found',),
303: ('see_other', 'other'),
304: ('not_modified',),
305: ('use_proxy',),
306: ('switch_proxy',),
307: ('temporary_redirect', 'temporary_moved', 'temporary'),
308: ('resume_incomplete', 'resume'),
# Client Error.
400: ('bad_request', 'bad'),
401: ('unauthorized',),
402: ('payment_required', 'payment'),
403: ('forbidden',),
404: ('not_found', '-o-'),
405: ('method_not_allowed', 'not_allowed'),
406: ('not_acceptable',),
407: ('proxy_authentication_required', 'proxy_auth', 'proxy_authentication'),
408: ('request_timeout', 'timeout'),
409: ('conflict',),
410: ('gone',),
411: ('length_required',),
412: ('precondition_failed', 'precondition'),
413: ('request_entity_too_large',),
414: ('request_uri_too_large',),
415: ('unsupported_media_type', 'unsupported_media', 'media_type'),
416: ('requested_range_not_satisfiable', 'requested_range', 'range_not_satisfiable'),
417: ('expectation_failed',),
418: ('im_a_teapot', 'teapot', 'i_am_a_teapot'),
422: ('unprocessable_entity', 'unprocessable'),
423: ('locked',),
424: ('failed_dependency', 'dependency'),
425: ('unordered_collection', 'unordered'),
426: ('upgrade_required', 'upgrade'),
428: ('precondition_required', 'precondition'),
429: ('too_many_requests', 'too_many'),
431: ('header_fields_too_large', 'fields_too_large'),
444: ('no_response', 'none'),
449: ('retry_with', 'retry'),
450: ('blocked_by_windows_parental_controls', 'parental_controls'),
451: ('unavailable_for_legal_reasons', 'legal_reasons'),
499: ('client_closed_request',),
# Server Error.
500: ('internal_server_error', 'server_error', '/o\\', '✗'),
501: ('not_implemented',),
502: ('bad_gateway',),
503: ('service_unavailable', 'unavailable'),
504: ('gateway_timeout',),
505: ('http_version_not_supported', 'http_version'),
506: ('variant_also_negotiates',),
507: ('insufficient_storage',),
509: ('bandwidth_limit_exceeded', 'bandwidth'),
510: ('not_extended',),
}
codes = LookupDict(name='status_codes')
for (code, titles) in list(_codes.items()):
for title in titles:
setattr(codes, title, code)
if not title.startswith('\\'):
setattr(codes, title.upper(), code)
|
mit
|
slaughterjames/static
|
FILtriage.py
|
1
|
4667
|
'''
Static v0.1 - Copyright 2016 James Slaughter,
This file is part of Static v0.1.
Static v0.1 is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Static v0.1 is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with Static v0.1. If not, see <http://www.gnu.org/licenses/>.
'''
'''
FILtriage.py - This file is responsible for obtaining basic information about a target
file including verifying the file type and gather hashes
'''
#python imports
import sys
import os
import subprocess
#programmer generated imports
from logger import logger
from fileclasses import peclass, pdfclass, msoclass, elfclass
'''
filetriage
Class: This file is responsible for obtaining basic information about a target
file including verifying the file type and gather hashes
'''
class filetriage:
'''
Constructor
'''
def __init__(self):
fn = ''
'''
MD5()
Function: - Get the MD5 sum of the uploaded sample
'''
def MD5(self, target, logdir, logging, LOG, debug):
temp = ''
strpos = 0
if (logging == True):
LOG = logger()
newlogentry = ''
#Run the MD5 sum to pull the hash from the target file
subproc = subprocess.Popen('md5sum ' + target, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
for md5data in subproc.stdout.readlines():
temp = md5data
strpos = temp.find(' ')
MD5 = temp[0:strpos]
print '[*] MD5 hash of file ' + target + ': ' + str(MD5)
if (logging == True):
newlogentry = 'MD5 hash of file ' + target + ': <strong>' + str(MD5) + '</strong>'
LOG.WriteLog(logdir, target, newlogentry)
return MD5
'''
SHA256()
Function: - Get the SHA256 sum of the uploaded sample
'''
def SHA256(self, target, logdir, logging, LOG, debug):
temp = ''
strpos = 0
newlogentry = ''
#Run the sha256 sum to pull the hash from the target file
subproc = subprocess.Popen('sha256sum '+ target, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
for sha256data in subproc.stdout.readlines():
temp = sha256data
strpos = temp.find(' ')
SHA256 = temp[0:strpos]
print '[*] SHA256 hash of file ' + target + ': ' + str(SHA256)
if (logging == True):
newlogentry = 'SHA256 hash of file ' + target + ': <strong>' + str(SHA256) + '</strong>'
LOG.WriteLog(logdir, target, newlogentry)
newlogentry = 'The live VirusTotal Data can be found here: <a href=\"''https://www.virustotal.com/en/file/' + str(SHA256) + '/analysis/' '\"> VirusTotal Report </a>'
LOG.WriteLog(logdir, target, newlogentry)
print '[*] If a VirusTotal record exists, it will be located here: https://www.virustotal.com/en/file/' + str(SHA256) + '/analysis/'
return SHA256
'''
filetype()
Function: - verify the filetype of the sample
'''
def filetype(self, target, logdir, logging, LOG, debug):
temp = ''
intLen = 0
strpos = 0
newlogentry = ''
#Run the file command to pull the header data from the target
subproc = subprocess.Popen('file '+ target, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
for filedata in subproc.stdout.readlines():
temp = filedata
intLen = len(temp)
strpos = temp.find(':')
header = temp[strpos+1:intLen]
print '[*] Fileheader: ' + filedata
if (logging == True):
newlogentry = 'Fileheader for ' + target + ': <strong>' + filedata + '</strong>'
LOG.WriteLog(logdir, target, newlogentry)
return header
'''
Triage()
Function: - Function caller
'''
def Triage(self, FIL, logging, logdir, debug):
if (logging == True):
LOG = logger()
else:
LOG = ''
FIL.MD5 = self.MD5(FIL.filename, logdir, logging, LOG, debug)
FIL.SHA256 = self.SHA256(FIL.filename, logdir, logging, LOG, debug)
FIL.header = self.filetype(FIL.filename, logdir, logging, LOG, debug)
return FIL
|
gpl-2.0
|
boberfly/gaffer
|
python/GafferUI/NodeSetEditor.py
|
2
|
16799
|
##########################################################################
#
# Copyright (c) 2011-2012, John Haddon. All rights reserved.
# Copyright (c) 2011-2013, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of John Haddon nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import six
import IECore
import Gaffer
import GafferUI
from Qt import QtCore
import weakref
## The NodeSetEditor is a base class for all Editors which focus their
# editing on a subset of nodes beneath a ScriptNode. This set defaults
# to the ScriptNode.selection() but can be modified to be any Set of nodes.
#
# The node set for any given editor can be optionally driven by that of some
# other editor, such that they don't need to be independently maintained. The
# default mode simply ensures they have the same node set. Custom modes can be
# registered for extended functionality.
class NodeSetEditor( GafferUI.Editor ) :
DriverModeNodeSet = "NodeSet"
__nodeSetDriverModes = {}
def __init__( self, topLevelWidget, scriptNode, **kw ) :
self.__nodeSet = Gaffer.StandardSet()
self.__nodeSetChangedSignal = GafferUI.WidgetSignal()
self.__nodeSetDriver = {}
self.__nodeSetDriverChangedSignal = GafferUI.WidgetSignal()
self.__drivenNodeSets = {}
self.__drivenNodeSetsChangedSignal = GafferUI.WidgetSignal()
GafferUI.Editor.__init__( self, topLevelWidget, scriptNode, **kw )
self.__titleFormat = None
# Allow derived classes to call `_updateFromSet()` themselves after construction,
# to avoid being called when they're only half constructed.
## \todo Should we call `__lazyUpdate()` instead, so `_updateFromSet()` is called
# when the editor becomes visible? Then derived classes shouldn't need to call
# `_updateFromSet()` in their constructors at all.
self.__setNodeSetInternal( self.scriptNode().selection(), callUpdateFromSet=False )
## Sets the nodes that will be displayed by this editor. As members are
# added to and removed from the set, the UI will be updated automatically
# to show them. This also calls `nodeSet.setRemoveOrphans( True )` so that
# deleted nodes are not visible in the UI.
#
# This will break any editor links, if set.
#
# The driver will be updated *after* the node set, such that calling
# `getNodeSetDriver` in the nodeSetChangedSignal will return the departing
# driver. TODO: We need to work out a sensible way to signal once state has
# stabilised
def setNodeSet( self, nodeSet ) :
self.__setNodeSetInternal( nodeSet, callUpdateFromSet=True )
# We do this after setting the node set, so that when the driver changed
# signal is emitted, we will have the new node set. Otherwise the editor
# looks like it has the old drivers node set still despite not having a
# driver...
self.setNodeSetDriver( None )
def getNodeSet( self ) :
return self.__nodeSet
## Called before nodeSetDriverChangedSignal in the event that setNodeSet breaks a driver link.
def nodeSetChangedSignal( self ) :
return self.__nodeSetChangedSignal
## Links the nodeSet for this editor to that of the supplied drivingEditor.
# The default mode results in a simple mirroring of the driver's node set
# to this editor. Other modes may be registered by other Gaffer modules.
# If drivingEditor is None, any existing links will be broken.
def setNodeSetDriver( self, drivingEditor, mode = DriverModeNodeSet ) :
if drivingEditor is not None :
assert( isinstance( drivingEditor, GafferUI.NodeSetEditor ) )
# We also need to stop people creating infinite loops
if self.drivesNodeSet( drivingEditor ) :
raise ValueError( "The supplied driver is already driven by this editor" )
if self.__nodeSetDriver :
previousDriver = self.__nodeSetDriver["weakDriver"]()
# It may have been deleted, we'll still have link data but the ref will be dead
if previousDriver is not None :
if drivingEditor is previousDriver and mode == self.__nodeSetDriver["mode"] :
return
else :
previousDriver.__unregisterDrivenEditor( self )
self.__nodeSetDriver = {}
if drivingEditor :
drivingEditor.__registerDrivenEditor( self, mode )
weakSelf = weakref.ref( self )
# We need to unlink ourselves if the driver goes away
def disconnect( _ ) :
if weakSelf() is not None:
weakSelf().setNodeSetDriver( None )
weakDriver = weakref.ref( drivingEditor, disconnect )
changeCallback = self.__nodeSetDriverModes[ mode ]
def updateFromDriver( _ ) :
if weakDriver() is not None and weakSelf() is not None :
nodeSet = weakDriver().getNodeSet()
if changeCallback :
nodeSet = changeCallback( weakSelf(), weakDriver() )
weakSelf().__setNodeSetInternal( nodeSet, callUpdateFromSet=True )
self.__nodeSetDriver = {
"mode" : mode,
"weakDriver" : weakDriver,
"driverNodeSetChangedConnection" : drivingEditor.nodeSetChangedSignal().connect( updateFromDriver ),
}
updateFromDriver( drivingEditor )
self.__nodeSetDriverChangedSignal( self )
self.__dirtyTitle()
## Returns a tuple of the drivingEditor and the drive mode.
# When there is no driver ( None, "" ) will be returned.
def getNodeSetDriver( self ) :
if self.__nodeSetDriver :
return ( self.__nodeSetDriver["weakDriver"](), self.__nodeSetDriver["mode"] )
return ( None, "" )
## Called whenever the editor's driving node set changes.
# Note: This is called after nodeSetChangedSignal in the event that
# the setNodeSet call breaks an existing driver link.
def nodeSetDriverChangedSignal( self ) :
return self.__nodeSetDriverChangedSignal
## Returns a dict of { editor : mode } that are driven by this editor.
# If recurse is true, the link chain will be followed recursively to
# also include editors that indirectly driven by this one.
def drivenNodeSets( self, recurse = False ) :
# Unwrap the weak refs
driven = { w(): m for w,m in self.__drivenNodeSets.items() if w() is not None }
if recurse :
for editor in list( driven.keys() ) :
driven.update( editor.drivenNodeSets( recurse = True ) )
return driven
def drivenNodeSetsChangedSignal( self ) :
return self.__drivenNodeSetsChangedSignal
## Does this editor ultimately drive otherEditor
def drivesNodeSet( self, otherEditor ) :
assert( isinstance( otherEditor, GafferUI.NodeSetEditor ) )
driver = otherEditor
while True :
if driver is None :
break
if driver is self :
return True
driver, _ = driver.getNodeSetDriver()
return False
## Returns the editor that ultimately drives this editor. If this editor
# is not driven, None is returned.
def drivingEditor( self ) :
driver = None
upstreamEditor = self
while True :
upstreamEditor, _ = upstreamEditor.getNodeSetDriver()
if upstreamEditor :
driver = upstreamEditor
else :
break
return driver
def __registerDrivenEditor( self, drivenEditor, mode ) :
if drivenEditor in self.drivenNodeSets() :
return
self.__drivenNodeSets[ weakref.ref( drivenEditor ) ] = mode
self.__drivenNodeSetsChangedSignal( self )
def __unregisterDrivenEditor( self, drivenEditor ) :
for weakEditor in self.__drivenNodeSets :
if weakEditor() is drivenEditor :
del self.__drivenNodeSets[ weakEditor ]
self.__drivenNodeSetsChangedSignal( self )
break
## Call to register a new DriverMode that can be used with setNodeSetDriver.
# The supplied callback will be called with ( thisEditor, drivingEditor ) and
# must return a derivative of Gaffer.Set that represents the nodesSet to be
# set in the driven editor.
# If provided, 'description' should be a sensible message to describe the
# nature of the user-observed behaviour, '{editor}' will be replaced with
# the name of the driving editor. eg:
# "Following the source node for the scene selection of {editor}"
@classmethod
def registerNodeSetDriverMode( cls, mode, changeCallback, description = "Following {editor}." ) :
cls.__nodeSetDriverModes[ mode ] = changeCallback
# TODO: Move to the NodeSetEditor class once they are GraphComponents
Gaffer.Metadata.registerValue( "NodeSetEditor", "nodeSetDriverMode:%s:description" % mode, description )
@staticmethod
def nodeSetDriverModeDescription( mode ) :
return Gaffer.Metadata.value( "NodeSetEditor", "nodeSetDriverMode:%s:description" % mode )
## Overridden to display the names of the nodes being edited.
# Derived classes should override _titleFormat() rather than
# reimplement this again.
def getTitle( self ) :
t = GafferUI.Editor.getTitle( self )
if t :
return t
if self.__titleFormat is None :
self.__titleFormat = self._titleFormat()
self.__nameChangedConnections = []
for n in self.__titleFormat :
if isinstance( n, Gaffer.GraphComponent ) :
self.__nameChangedConnections.append( n.nameChangedSignal().connect( Gaffer.WeakMethod( self.__nameChanged ) ) )
result = ""
for t in self.__titleFormat :
if isinstance( t, six.string_types ) :
result += t
else :
result += t.getName()
return result
## Ensures that the specified node has a visible editor of this class type editing
# it, creating one if necessary. The `floating` argument may be passed a value of
# `True` or `False`, to force the acquisition of a panel that is either floating or
# docked respectively.
## \todo Consider how this relates to draggable editor tabs and editor floating
# once we implement that in CompoundEditor - perhaps acquire will become a method
# on CompoundEditor instead at this point.
@classmethod
def acquire( cls, node, floating = None ) :
if isinstance( node, Gaffer.ScriptNode ) :
script = node
else :
script = node.scriptNode()
scriptWindow = GafferUI.ScriptWindow.acquire( script )
if floating in ( None, False ) :
for editor in scriptWindow.getLayout().editors( type = cls ) :
if node.isSame( editor._lastAddedNode() ) :
editor.reveal()
return editor
if floating in ( None, True ) :
childWindows = scriptWindow.childWindows()
for window in childWindows :
if isinstance( window, _EditorWindow ) :
if isinstance( window.getChild(), cls ) and node in window.getChild().getNodeSet() :
window.setVisible( True )
return window.getChild()
editor = cls( script )
editor.setNodeSet( Gaffer.StandardSet( [ node ] ) )
if floating is False :
scriptWindow.getLayout().addEditor( editor )
else :
window = _EditorWindow( scriptWindow, editor )
# Ensure keyboard shortcuts are relayed to the main menu bar
scriptWindow.menuBar().addShortcutTarget( window )
window.setVisible( True )
if isinstance( editor, GafferUI.NodeEditor ) :
# The window will have opened at the perfect size for the
# contained widgets. But some NodeEditors have expanding
# sections and buttons to add new widgets, and for that
# reason, a minimum height of 400px has been deemed more
# suitable.
size = window._qtWidget().size()
if size.height() < 400 :
size.setHeight( 400 )
window._qtWidget().resize( size )
return editor
def _lastAddedNode( self ) :
if len( self.__nodeSet ) :
return self.__nodeSet[-1]
return None
## Called when the contents of getNodeSet() have changed and need to be
# reflected in the UI - so must be implemented by derived classes to update
# their UI appropriately. Updates are performed lazily to avoid unecessary
# work, but any pending updates can be performed immediately by calling
# _doPendingUpdate().
#
# All implementations must first call the base class implementation.
def _updateFromSet( self ) :
self.__dirtyTitle()
# May be called to ensure that _updateFromSet() is called
# immediately if a lazy update has been scheduled but not
# yet performed.
def _doPendingUpdate( self ) :
self.__lazyUpdate.flush( self )
## May be reimplemented by derived classes to specify a combination of
# strings and node names to use in building the title. The NodeSetEditor
# will take care of updating the title appropriately as the nodes are renamed.
def _titleFormat( self, _prefix = None, _maxNodes = 2, _reverseNodes = False, _ellipsis = True ) :
if _prefix is None :
result = [ IECore.CamelCase.toSpaced( self.__class__.__name__ ) ]
else :
result = [ _prefix ]
# Only add node names if we're pinned in some way shape or form
if not self.__nodeSetIsScriptSelection() :
result.append( " [" )
numNames = min( _maxNodes, len( self.__nodeSet ) )
if numNames :
if _reverseNodes :
nodes = self.__nodeSet[len(self.__nodeSet)-numNames:]
nodes.reverse()
else :
nodes = self.__nodeSet[:numNames]
for i, node in enumerate( nodes ) :
result.append( node )
if i < numNames - 1 :
result.append( ", " )
if _ellipsis and len( self.__nodeSet ) > _maxNodes :
result.append( "..." )
result.append( "]" )
return result
def __dirtyTitle( self ) :
# flush information needed for making the title -
# we'll update it lazily in getTitle().
self.__nameChangedConnections = []
self.__titleFormat = None
self.titleChangedSignal()( self )
def __nodeSetIsScriptSelection( self ) :
driver = self.drivingEditor() or self
return driver.getNodeSet() == self.scriptNode().selection()
def __setNodeSetInternal( self, nodeSet, callUpdateFromSet ) :
if self.__nodeSet.isSame( nodeSet ) :
return
prevSet = self.__nodeSet
self.__nodeSet = nodeSet
self.__memberAddedConnection = self.__nodeSet.memberAddedSignal().connect( Gaffer.WeakMethod( self.__membersChanged ) )
self.__memberRemovedConnection = self.__nodeSet.memberRemovedSignal().connect( Gaffer.WeakMethod( self.__membersChanged ) )
self.__dirtyTitle()
if isinstance( nodeSet, Gaffer.StandardSet ) :
nodeSet.setRemoveOrphans( True )
if callUpdateFromSet :
# only update if the nodes being held have actually changed,
# so we don't get unnecessary flicker in any of the uis.
needsUpdate = len( prevSet ) != len( self.__nodeSet )
if not needsUpdate :
for i in range( 0, len( prevSet ) ) :
if not prevSet[i].isSame( self.__nodeSet[i] ) :
needsUpdate = True
break
if needsUpdate :
self._updateFromSet()
self.__nodeSetChangedSignal( self )
def __nameChanged( self, node ) :
self.titleChangedSignal()( self )
def __membersChanged( self, set, member ) :
self.__lazyUpdate()
@GafferUI.LazyMethod()
def __lazyUpdate( self ) :
self._updateFromSet()
class _EditorWindow( GafferUI.Window ) :
def __init__( self, parentWindow, editor, **kw ) :
GafferUI.Window.__init__( self, borderWidth = 8, **kw )
self.setChild( editor )
editor.titleChangedSignal().connect( Gaffer.WeakMethod( self.__updateTitle ), scoped = False )
editor.getNodeSet().memberRemovedSignal().connect( Gaffer.WeakMethod( self.__nodeSetMemberRemoved ), scoped = False )
parentWindow.addChildWindow( self, removeOnClose=True )
self.__updateTitle()
def __updateTitle( self, *unused ) :
self.setTitle( self.getChild().getTitle() )
def __nodeSetMemberRemoved( self, set, node ) :
if not len( set ) :
self.parent().removeChild( self )
NodeSetEditor.registerNodeSetDriverMode(
NodeSetEditor.DriverModeNodeSet, None,
"Following {editor}."
)
|
bsd-3-clause
|
MalloyPower/parsing-python
|
front-end/testsuite-python-lib/Python-2.3/Lib/sets.py
|
1
|
18320
|
"""Classes to represent arbitrary sets (including sets of sets).
This module implements sets using dictionaries whose values are
ignored. The usual operations (union, intersection, deletion, etc.)
are provided as both methods and operators.
Important: sets are not sequences! While they support 'x in s',
'len(s)', and 'for x in s', none of those operations are unique for
sequences; for example, mappings support all three as well. The
characteristic operation for sequences is subscripting with small
integers: s[i], for i in range(len(s)). Sets don't support
subscripting at all. Also, sequences allow multiple occurrences and
their elements have a definite order; sets on the other hand don't
record multiple occurrences and don't remember the order of element
insertion (which is why they don't support s[i]).
The following classes are provided:
BaseSet -- All the operations common to both mutable and immutable
sets. This is an abstract class, not meant to be directly
instantiated.
Set -- Mutable sets, subclass of BaseSet; not hashable.
ImmutableSet -- Immutable sets, subclass of BaseSet; hashable.
An iterable argument is mandatory to create an ImmutableSet.
_TemporarilyImmutableSet -- A wrapper around a Set, hashable,
giving the same hash value as the immutable set equivalent
would have. Do not use this class directly.
Only hashable objects can be added to a Set. In particular, you cannot
really add a Set as an element to another Set; if you try, what is
actually added is an ImmutableSet built from it (it compares equal to
the one you tried adding).
When you ask if `x in y' where x is a Set and y is a Set or
ImmutableSet, x is wrapped into a _TemporarilyImmutableSet z, and
what's tested is actually `z in y'.
"""
# Code history:
#
# - Greg V. Wilson wrote the first version, using a different approach
# to the mutable/immutable problem, and inheriting from dict.
#
# - Alex Martelli modified Greg's version to implement the current
# Set/ImmutableSet approach, and make the data an attribute.
#
# - Guido van Rossum rewrote much of the code, made some API changes,
# and cleaned up the docstrings.
#
# - Raymond Hettinger added a number of speedups and other
# improvements.
__all__ = ['BaseSet', 'Set', 'ImmutableSet']
from itertools import ifilter, ifilterfalse
class BaseSet(object):
"""Common base class for mutable and immutable sets."""
__slots__ = ['_data']
# Constructor
def __init__(self):
"""This is an abstract class."""
# Don't call this from a concrete subclass!
if self.__class__ is BaseSet:
raise TypeError, ("BaseSet is an abstract class. "
"Use Set or ImmutableSet.")
# Standard protocols: __len__, __repr__, __str__, __iter__
def __len__(self):
"""Return the number of elements of a set."""
return len(self._data)
def __repr__(self):
"""Return string representation of a set.
This looks like 'Set([<list of elements>])'.
"""
return self._repr()
# __str__ is the same as __repr__
__str__ = __repr__
def _repr(self, sorted=False):
elements = self._data.keys()
if sorted:
elements.sort()
return '%s(%r)' % (self.__class__.__name__, elements)
def __iter__(self):
"""Return an iterator over the elements or a set.
This is the keys iterator for the underlying dict.
"""
return self._data.iterkeys()
# Three-way comparison is not supported. However, because __eq__ is
# tried before __cmp__, if Set x == Set y, x.__eq__(y) returns True and
# then cmp(x, y) returns 0 (Python doesn't actually call __cmp__ in this
# case).
def __cmp__(self, other):
raise TypeError, "can't compare sets using cmp()"
# Equality comparisons using the underlying dicts. Mixed-type comparisons
# are allowed here, where Set == z for non-Set z always returns False,
# and Set != z always True. This allows expressions like "x in y" to
# give the expected result when y is a sequence of mixed types, not
# raising a pointless TypeError just because y contains a Set, or x is
# a Set and y contain's a non-set ("in" invokes only __eq__).
# Subtle: it would be nicer if __eq__ and __ne__ could return
# NotImplemented instead of True or False. Then the other comparand
# would get a chance to determine the result, and if the other comparand
# also returned NotImplemented then it would fall back to object address
# comparison (which would always return False for __eq__ and always
# True for __ne__). However, that doesn't work, because this type
# *also* implements __cmp__: if, e.g., __eq__ returns NotImplemented,
# Python tries __cmp__ next, and the __cmp__ here then raises TypeError.
def __eq__(self, other):
if isinstance(other, BaseSet):
return self._data == other._data
else:
return False
def __ne__(self, other):
if isinstance(other, BaseSet):
return self._data != other._data
else:
return True
# Copying operations
def copy(self):
"""Return a shallow copy of a set."""
result = self.__class__()
result._data.update(self._data)
return result
__copy__ = copy # For the copy module
def __deepcopy__(self, memo):
"""Return a deep copy of a set; used by copy module."""
# This pre-creates the result and inserts it in the memo
# early, in case the deep copy recurses into another reference
# to this same set. A set can't be an element of itself, but
# it can certainly contain an object that has a reference to
# itself.
from copy import deepcopy
result = self.__class__()
memo[id(self)] = result
data = result._data
value = True
for elt in self:
data[deepcopy(elt, memo)] = value
return result
# Standard set operations: union, intersection, both differences.
# Each has an operator version (e.g. __or__, invoked with |) and a
# method version (e.g. union).
# Subtle: Each pair requires distinct code so that the outcome is
# correct when the type of other isn't suitable. For example, if
# we did "union = __or__" instead, then Set().union(3) would return
# NotImplemented instead of raising TypeError (albeit that *why* it
# raises TypeError as-is is also a bit subtle).
def __or__(self, other):
"""Return the union of two sets as a new set.
(I.e. all elements that are in either set.)
"""
if not isinstance(other, BaseSet):
return NotImplemented
result = self.__class__()
result._data = self._data.copy()
result._data.update(other._data)
return result
def union(self, other):
"""Return the union of two sets as a new set.
(I.e. all elements that are in either set.)
"""
return self | other
def __and__(self, other):
"""Return the intersection of two sets as a new set.
(I.e. all elements that are in both sets.)
"""
if not isinstance(other, BaseSet):
return NotImplemented
if len(self) <= len(other):
little, big = self, other
else:
little, big = other, self
common = ifilter(big._data.has_key, little)
return self.__class__(common)
def intersection(self, other):
"""Return the intersection of two sets as a new set.
(I.e. all elements that are in both sets.)
"""
return self & other
def __xor__(self, other):
"""Return the symmetric difference of two sets as a new set.
(I.e. all elements that are in exactly one of the sets.)
"""
if not isinstance(other, BaseSet):
return NotImplemented
result = self.__class__()
data = result._data
value = True
selfdata = self._data
otherdata = other._data
for elt in ifilterfalse(otherdata.has_key, selfdata):
data[elt] = value
for elt in ifilterfalse(selfdata.has_key, otherdata):
data[elt] = value
return result
def symmetric_difference(self, other):
"""Return the symmetric difference of two sets as a new set.
(I.e. all elements that are in exactly one of the sets.)
"""
return self ^ other
def __sub__(self, other):
"""Return the difference of two sets as a new Set.
(I.e. all elements that are in this set and not in the other.)
"""
if not isinstance(other, BaseSet):
return NotImplemented
result = self.__class__()
data = result._data
value = True
for elt in ifilterfalse(other._data.has_key, self):
data[elt] = value
return result
def difference(self, other):
"""Return the difference of two sets as a new Set.
(I.e. all elements that are in this set and not in the other.)
"""
return self - other
# Membership test
def __contains__(self, element):
"""Report whether an element is a member of a set.
(Called in response to the expression `element in self'.)
"""
try:
return element in self._data
except TypeError:
transform = getattr(element, "__as_temporarily_immutable__", None)
if transform is None:
raise # re-raise the TypeError exception we caught
return transform() in self._data
# Subset and superset test
def issubset(self, other):
"""Report whether another set contains this set."""
self._binary_sanity_check(other)
if len(self) > len(other): # Fast check for obvious cases
return False
for elt in ifilterfalse(other._data.has_key, self):
return False
return True
def issuperset(self, other):
"""Report whether this set contains another set."""
self._binary_sanity_check(other)
if len(self) < len(other): # Fast check for obvious cases
return False
for elt in ifilterfalse(self._data.has_key, other):
return False
return True
# Inequality comparisons using the is-subset relation.
__le__ = issubset
__ge__ = issuperset
def __lt__(self, other):
self._binary_sanity_check(other)
return len(self) < len(other) and self.issubset(other)
def __gt__(self, other):
self._binary_sanity_check(other)
return len(self) > len(other) and self.issuperset(other)
# Assorted helpers
def _binary_sanity_check(self, other):
# Check that the other argument to a binary operation is also
# a set, raising a TypeError otherwise.
if not isinstance(other, BaseSet):
raise TypeError, "Binary operation only permitted between sets"
def _compute_hash(self):
# Calculate hash code for a set by xor'ing the hash codes of
# the elements. This ensures that the hash code does not depend
# on the order in which elements are added to the set. This is
# not called __hash__ because a BaseSet should not be hashable;
# only an ImmutableSet is hashable.
result = 0
for elt in self:
result ^= hash(elt)
return result
def _update(self, iterable):
# The main loop for update() and the subclass __init__() methods.
data = self._data
# Use the fast update() method when a dictionary is available.
if isinstance(iterable, BaseSet):
data.update(iterable._data)
return
value = True
if type(iterable) in (list, tuple, xrange):
# Optimized: we know that __iter__() and next() can't
# raise TypeError, so we can move 'try:' out of the loop.
it = iter(iterable)
while True:
try:
for element in it:
data[element] = value
return
except TypeError:
transform = getattr(element, "__as_immutable__", None)
if transform is None:
raise # re-raise the TypeError exception we caught
data[transform()] = value
else:
# Safe: only catch TypeError where intended
for element in iterable:
try:
data[element] = value
except TypeError:
transform = getattr(element, "__as_immutable__", None)
if transform is None:
raise # re-raise the TypeError exception we caught
data[transform()] = value
class ImmutableSet(BaseSet):
"""Immutable set class."""
__slots__ = ['_hashcode']
# BaseSet + hashing
def __init__(self, iterable=None):
"""Construct an immutable set from an optional iterable."""
self._hashcode = None
self._data = {}
if iterable is not None:
self._update(iterable)
def __hash__(self):
if self._hashcode is None:
self._hashcode = self._compute_hash()
return self._hashcode
def __getstate__(self):
return self._data, self._hashcode
def __setstate__(self, state):
self._data, self._hashcode = state
class Set(BaseSet):
""" Mutable set class."""
__slots__ = []
# BaseSet + operations requiring mutability; no hashing
def __init__(self, iterable=None):
"""Construct a set from an optional iterable."""
self._data = {}
if iterable is not None:
self._update(iterable)
def __getstate__(self):
# getstate's results are ignored if it is not
return self._data,
def __setstate__(self, data):
self._data, = data
def __hash__(self):
"""A Set cannot be hashed."""
# We inherit object.__hash__, so we must deny this explicitly
raise TypeError, "Can't hash a Set, only an ImmutableSet."
# In-place union, intersection, differences.
# Subtle: The xyz_update() functions deliberately return None,
# as do all mutating operations on built-in container types.
# The __xyz__ spellings have to return self, though.
def __ior__(self, other):
"""Update a set with the union of itself and another."""
self._binary_sanity_check(other)
self._data.update(other._data)
return self
def union_update(self, other):
"""Update a set with the union of itself and another."""
self |= other
def __iand__(self, other):
"""Update a set with the intersection of itself and another."""
self._binary_sanity_check(other)
self._data = (self & other)._data
return self
def intersection_update(self, other):
"""Update a set with the intersection of itself and another."""
self &= other
def __ixor__(self, other):
"""Update a set with the symmetric difference of itself and another."""
self._binary_sanity_check(other)
data = self._data
value = True
for elt in other:
if elt in data:
del data[elt]
else:
data[elt] = value
return self
def symmetric_difference_update(self, other):
"""Update a set with the symmetric difference of itself and another."""
self ^= other
def __isub__(self, other):
"""Remove all elements of another set from this set."""
self._binary_sanity_check(other)
data = self._data
for elt in ifilter(data.has_key, other):
del data[elt]
return self
def difference_update(self, other):
"""Remove all elements of another set from this set."""
self -= other
# Python dict-like mass mutations: update, clear
def update(self, iterable):
"""Add all values from an iterable (such as a list or file)."""
self._update(iterable)
def clear(self):
"""Remove all elements from this set."""
self._data.clear()
# Single-element mutations: add, remove, discard
def add(self, element):
"""Add an element to a set.
This has no effect if the element is already present.
"""
try:
self._data[element] = True
except TypeError:
transform = getattr(element, "__as_immutable__", None)
if transform is None:
raise # re-raise the TypeError exception we caught
self._data[transform()] = True
def remove(self, element):
"""Remove an element from a set; it must be a member.
If the element is not a member, raise a KeyError.
"""
try:
del self._data[element]
except TypeError:
transform = getattr(element, "__as_temporarily_immutable__", None)
if transform is None:
raise # re-raise the TypeError exception we caught
del self._data[transform()]
def discard(self, element):
"""Remove an element from a set if it is a member.
If the element is not a member, do nothing.
"""
try:
self.remove(element)
except KeyError:
pass
def pop(self):
"""Remove and return an arbitrary set element."""
return self._data.popitem()[0]
def __as_immutable__(self):
# Return a copy of self as an immutable set
return ImmutableSet(self)
def __as_temporarily_immutable__(self):
# Return self wrapped in a temporarily immutable set
return _TemporarilyImmutableSet(self)
class _TemporarilyImmutableSet(BaseSet):
# Wrap a mutable set as if it was temporarily immutable.
# This only supplies hashing and equality comparisons.
def __init__(self, set):
self._set = set
self._data = set._data # Needed by ImmutableSet.__eq__()
def __hash__(self):
return self._set._compute_hash()
|
mit
|
idea4bsd/idea4bsd
|
python/lib/Lib/site-packages/django/core/cache/backends/db.py
|
227
|
6002
|
"Database cache backend."
from django.core.cache.backends.base import BaseCache
from django.db import connections, router, transaction, DatabaseError
import base64, time
from datetime import datetime
try:
import cPickle as pickle
except ImportError:
import pickle
class Options(object):
"""A class that will quack like a Django model _meta class.
This allows cache operations to be controlled by the router
"""
def __init__(self, table):
self.db_table = table
self.app_label = 'django_cache'
self.module_name = 'cacheentry'
self.verbose_name = 'cache entry'
self.verbose_name_plural = 'cache entries'
self.object_name = 'CacheEntry'
self.abstract = False
self.managed = True
self.proxy = False
class BaseDatabaseCache(BaseCache):
def __init__(self, table, params):
BaseCache.__init__(self, params)
self._table = table
class CacheEntry(object):
_meta = Options(table)
self.cache_model_class = CacheEntry
class DatabaseCache(BaseDatabaseCache):
def get(self, key, default=None, version=None):
key = self.make_key(key, version=version)
self.validate_key(key)
db = router.db_for_read(self.cache_model_class)
table = connections[db].ops.quote_name(self._table)
cursor = connections[db].cursor()
cursor.execute("SELECT cache_key, value, expires FROM %s WHERE cache_key = %%s" % table, [key])
row = cursor.fetchone()
if row is None:
return default
now = datetime.now()
if row[2] < now:
db = router.db_for_write(self.cache_model_class)
cursor = connections[db].cursor()
cursor.execute("DELETE FROM %s WHERE cache_key = %%s" % table, [key])
transaction.commit_unless_managed(using=db)
return default
value = connections[db].ops.process_clob(row[1])
return pickle.loads(base64.decodestring(value))
def set(self, key, value, timeout=None, version=None):
key = self.make_key(key, version=version)
self.validate_key(key)
self._base_set('set', key, value, timeout)
def add(self, key, value, timeout=None, version=None):
key = self.make_key(key, version=version)
self.validate_key(key)
return self._base_set('add', key, value, timeout)
def _base_set(self, mode, key, value, timeout=None):
if timeout is None:
timeout = self.default_timeout
db = router.db_for_write(self.cache_model_class)
table = connections[db].ops.quote_name(self._table)
cursor = connections[db].cursor()
cursor.execute("SELECT COUNT(*) FROM %s" % table)
num = cursor.fetchone()[0]
now = datetime.now().replace(microsecond=0)
exp = datetime.fromtimestamp(time.time() + timeout).replace(microsecond=0)
if num > self._max_entries:
self._cull(db, cursor, now)
encoded = base64.encodestring(pickle.dumps(value, 2)).strip()
cursor.execute("SELECT cache_key, expires FROM %s WHERE cache_key = %%s" % table, [key])
try:
result = cursor.fetchone()
if result and (mode == 'set' or
(mode == 'add' and result[1] < now)):
cursor.execute("UPDATE %s SET value = %%s, expires = %%s WHERE cache_key = %%s" % table,
[encoded, connections[db].ops.value_to_db_datetime(exp), key])
else:
cursor.execute("INSERT INTO %s (cache_key, value, expires) VALUES (%%s, %%s, %%s)" % table,
[key, encoded, connections[db].ops.value_to_db_datetime(exp)])
except DatabaseError:
# To be threadsafe, updates/inserts are allowed to fail silently
transaction.rollback_unless_managed(using=db)
return False
else:
transaction.commit_unless_managed(using=db)
return True
def delete(self, key, version=None):
key = self.make_key(key, version=version)
self.validate_key(key)
db = router.db_for_write(self.cache_model_class)
table = connections[db].ops.quote_name(self._table)
cursor = connections[db].cursor()
cursor.execute("DELETE FROM %s WHERE cache_key = %%s" % table, [key])
transaction.commit_unless_managed(using=db)
def has_key(self, key, version=None):
key = self.make_key(key, version=version)
self.validate_key(key)
db = router.db_for_read(self.cache_model_class)
table = connections[db].ops.quote_name(self._table)
cursor = connections[db].cursor()
now = datetime.now().replace(microsecond=0)
cursor.execute("SELECT cache_key FROM %s WHERE cache_key = %%s and expires > %%s" % table,
[key, connections[db].ops.value_to_db_datetime(now)])
return cursor.fetchone() is not None
def _cull(self, db, cursor, now):
if self._cull_frequency == 0:
self.clear()
else:
table = connections[db].ops.quote_name(self._table)
cursor.execute("DELETE FROM %s WHERE expires < %%s" % table,
[connections[db].ops.value_to_db_datetime(now)])
cursor.execute("SELECT COUNT(*) FROM %s" % table)
num = cursor.fetchone()[0]
if num > self._max_entries:
cursor.execute("SELECT cache_key FROM %s ORDER BY cache_key LIMIT 1 OFFSET %%s" % table, [num / self._cull_frequency])
cursor.execute("DELETE FROM %s WHERE cache_key < %%s" % table, [cursor.fetchone()[0]])
def clear(self):
db = router.db_for_write(self.cache_model_class)
table = connections[db].ops.quote_name(self._table)
cursor = connections[db].cursor()
cursor.execute('DELETE FROM %s' % table)
# For backwards compatibility
class CacheClass(DatabaseCache):
pass
|
apache-2.0
|
scrollback/kuma
|
vendor/packages/setuptools/setuptools/command/install_scripts.py
|
32
|
1921
|
from distutils.command.install_scripts import install_scripts \
as _install_scripts
from easy_install import get_script_args, sys_executable, chmod
from pkg_resources import Distribution, PathMetadata, ensure_directory
import os
from distutils import log
class install_scripts(_install_scripts):
"""Do normal script install, plus any egg_info wrapper scripts"""
def initialize_options(self):
_install_scripts.initialize_options(self)
self.no_ep = False
def run(self):
self.run_command("egg_info")
if self.distribution.scripts:
_install_scripts.run(self) # run first to set up self.outfiles
else:
self.outfiles = []
if self.no_ep:
# don't install entry point scripts into .egg file!
return
ei_cmd = self.get_finalized_command("egg_info")
dist = Distribution(
ei_cmd.egg_base, PathMetadata(ei_cmd.egg_base, ei_cmd.egg_info),
ei_cmd.egg_name, ei_cmd.egg_version,
)
bs_cmd = self.get_finalized_command('build_scripts')
executable = getattr(bs_cmd,'executable',sys_executable)
is_wininst = getattr(
self.get_finalized_command("bdist_wininst"), '_is_running', False
)
for args in get_script_args(dist, executable, is_wininst):
self.write_script(*args)
def write_script(self, script_name, contents, mode="t", *ignored):
"""Write an executable file to the scripts directory"""
log.info("Installing %s script to %s", script_name, self.install_dir)
target = os.path.join(self.install_dir, script_name)
self.outfiles.append(target)
if not self.dry_run:
ensure_directory(target)
f = open(target,"w"+mode)
f.write(contents)
f.close()
chmod(target,0755)
|
mpl-2.0
|
xavierh/minecolonies
|
tools/exportLang.py
|
1
|
1512
|
#!/usr/local/bin/python3
import sys
import requests
import json
import os
base_url = "https://poeditor.com/api/"
api_key = ""
def post(data):
r = requests.post(base_url, data=data)
r.raise_for_status()
return r.json()
def get(path):
r = requests.get(path)
r.raise_for_status()
return r.json()
def getLangs():
return post({"api_token":api_key, "id":"69487", "action":"list_languages"})['list']
def getStrings(lang):
return get(post({"api_token":api_key, "id":"69487", "action":"export", "type":"key_value_json", "language":lang})['item'])
def getMcLang(poe, mc_langs):
if len(poe) == 2:
for lang in mc_langs:
if lang[:2] == poe:
return lang
else:
return poe[:2] + "_" + poe[3:].upper() + ".lang"
if __name__ == "__main__":
if len(sys.argv) > 1:
api_key = sys.argv[1]
else:
with open(".api_key") as f:
api_key = f.readline().strip()
poe_langs = []
for lang in getLangs():
poe_langs += [lang['code']]
poe_langs.sort(key = len, reverse = True)
mc_langs = os.listdir("../src/main/resources/assets/minecolonies/lang")
for poe_lang in poe_langs:
strings = getStrings(poe_lang)
mc_lang = getMcLang(poe_lang, mc_langs)
print(mc_lang)
with open("../src/main/resources/assets/minecolonies/lang/" + mc_lang, 'w') as f:
for k,v in strings.items():
if v == '':
f.write('#' + k + '=' + v + '\n')
else:
f.write(k + '=' + v + '\n')
mc_langs.remove(mc_lang)
if len(mc_langs) != 0:
print("We messed up!")
print("Langs left:")
print(mc_langs)
|
gpl-3.0
|
jmhsi/justin_tinker
|
data_science/courses/learning_dl_packages/models/research/pcl_rl/controller.py
|
7
|
16079
|
# Copyright 2017 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Controller coordinates sampling and training model.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
import numpy as np
import pickle
import random
flags = tf.flags
gfile = tf.gfile
FLAGS = flags.FLAGS
def find_best_eps_lambda(rewards, lengths):
"""Find the best lambda given a desired epsilon = FLAGS.max_divergence."""
# perhaps not the best way to do this
desired_div = FLAGS.max_divergence * np.mean(lengths)
def calc_divergence(eps_lambda):
max_reward = np.max(rewards)
logz = (max_reward / eps_lambda +
np.log(np.mean(np.exp((rewards - max_reward) / eps_lambda))))
exprr = np.mean(np.exp(rewards / eps_lambda - logz) *
rewards / eps_lambda)
return exprr - logz
left = 0.0
right = 1000.0
if len(rewards) <= 8:
return (left + right) / 2
num_iter = max(4, 1 + int(np.log((right - left) / 0.1) / np.log(2.0)))
for _ in xrange(num_iter):
mid = (left + right) / 2
cur_div = calc_divergence(mid)
if cur_div > desired_div:
left = mid
else:
right = mid
return (left + right) / 2
class Controller(object):
def __init__(self, env, env_spec, internal_dim,
use_online_batch=True,
batch_by_steps=False,
unify_episodes=False,
replay_batch_size=None,
max_step=None,
cutoff_agent=1,
save_trajectories_file=None,
use_trust_region=False,
use_value_opt=False,
update_eps_lambda=False,
prioritize_by='rewards',
get_model=None,
get_replay_buffer=None,
get_buffer_seeds=None):
self.env = env
self.env_spec = env_spec
self.internal_dim = internal_dim
self.use_online_batch = use_online_batch
self.batch_by_steps = batch_by_steps
self.unify_episodes = unify_episodes
self.replay_batch_size = replay_batch_size
self.max_step = max_step
self.cutoff_agent = cutoff_agent
self.save_trajectories_file = save_trajectories_file
self.use_trust_region = use_trust_region
self.use_value_opt = use_value_opt
self.update_eps_lambda = update_eps_lambda
self.prioritize_by = prioritize_by
self.model = get_model()
self.replay_buffer = get_replay_buffer()
self.seed_replay_buffer(get_buffer_seeds())
self.internal_state = np.array([self.initial_internal_state()] *
len(self.env))
self.last_obs = self.env_spec.initial_obs(len(self.env))
self.last_act = self.env_spec.initial_act(len(self.env))
self.last_pad = np.zeros(len(self.env))
self.start_episode = np.array([True] * len(self.env))
self.step_count = np.array([0] * len(self.env))
self.episode_running_rewards = np.zeros(len(self.env))
self.episode_running_lengths = np.zeros(len(self.env))
self.episode_rewards = []
self.episode_lengths = []
self.total_rewards = []
self.best_batch_rewards = None
def setup(self):
self.model.setup()
def initial_internal_state(self):
return np.zeros(self.model.policy.rnn_state_dim)
def _sample_episodes(self, sess, greedy=False):
"""Sample episodes from environment using model."""
# reset environments as necessary
obs_after_reset = self.env.reset_if(self.start_episode)
for i, obs in enumerate(obs_after_reset):
if obs is not None:
self.step_count[i] = 0
self.internal_state[i] = self.initial_internal_state()
for j in xrange(len(self.env_spec.obs_dims)):
self.last_obs[j][i] = obs[j]
for j in xrange(len(self.env_spec.act_dims)):
self.last_act[j][i] = -1
self.last_pad[i] = 0
# maintain episode as a single unit if the last sampling
# batch ended before the episode was terminated
if self.unify_episodes:
assert len(obs_after_reset) == 1
new_ep = obs_after_reset[0] is not None
else:
new_ep = True
self.start_id = 0 if new_ep else len(self.all_obs[:])
initial_state = self.internal_state
all_obs = [] if new_ep else self.all_obs[:]
all_act = ([self.last_act] if new_ep else self.all_act[:])
all_pad = [] if new_ep else self.all_pad[:]
rewards = [] if new_ep else self.rewards[:]
# start stepping in the environments
step = 0
while not self.env.all_done():
self.step_count += 1 - np.array(self.env.dones)
next_internal_state, sampled_actions = self.model.sample_step(
sess, self.last_obs, self.internal_state, self.last_act,
greedy=greedy)
env_actions = self.env_spec.convert_actions_to_env(sampled_actions)
next_obs, reward, next_dones, _ = self.env.step(env_actions)
all_obs.append(self.last_obs)
all_act.append(sampled_actions)
all_pad.append(self.last_pad)
rewards.append(reward)
self.internal_state = next_internal_state
self.last_obs = next_obs
self.last_act = sampled_actions
self.last_pad = np.array(next_dones).astype('float32')
step += 1
if self.max_step and step >= self.max_step:
break
self.all_obs = all_obs[:]
self.all_act = all_act[:]
self.all_pad = all_pad[:]
self.rewards = rewards[:]
# append final observation
all_obs.append(self.last_obs)
return initial_state, all_obs, all_act, rewards, all_pad
def sample_episodes(self, sess):
"""Sample steps from the environment until we have enough for a batch."""
# check if last batch ended with episode that was not terminated
if self.unify_episodes:
self.all_new_ep = self.start_episode[0]
# sample episodes until we either have enough episodes or enough steps
episodes = []
total_steps = 0
while total_steps < self.max_step * len(self.env):
(initial_state,
observations, actions, rewards,
pads) = self._sample_episodes(sess)
observations = zip(*observations)
actions = zip(*actions)
terminated = np.array(self.env.dones)
self.total_rewards = np.sum(np.array(rewards[self.start_id:]) *
(1 - np.array(pads[self.start_id:])), axis=0)
self.episode_running_rewards *= 1 - self.start_episode
self.episode_running_lengths *= 1 - self.start_episode
self.episode_running_rewards += self.total_rewards
self.episode_running_lengths += np.sum(1 - np.array(pads[self.start_id:]), axis=0)
episodes.extend(self.convert_from_batched_episodes(
initial_state, observations, actions, rewards,
terminated, pads))
total_steps += np.sum(1 - np.array(pads))
# set next starting episodes
self.start_episode = np.logical_or(terminated,
self.step_count >= self.cutoff_agent)
episode_rewards = self.episode_running_rewards[self.start_episode].tolist()
self.episode_rewards.extend(episode_rewards)
self.episode_lengths.extend(self.episode_running_lengths[self.start_episode].tolist())
self.episode_rewards = self.episode_rewards[-100:]
self.episode_lengths = self.episode_lengths[-100:]
if (self.save_trajectories_file is not None and
(self.best_batch_rewards is None or
np.mean(self.total_rewards) > self.best_batch_rewards)):
self.best_batch_rewards = np.mean(self.total_rewards)
my_episodes = self.convert_from_batched_episodes(
initial_state, observations, actions, rewards,
terminated, pads)
with gfile.GFile(self.save_trajectories_file, 'w') as f:
pickle.dump(my_episodes, f)
if not self.batch_by_steps:
return (initial_state,
observations, actions, rewards,
terminated, pads)
return self.convert_to_batched_episodes(episodes)
def _train(self, sess,
observations, initial_state, actions,
rewards, terminated, pads):
"""Train model using batch."""
if self.use_trust_region:
# use trust region to optimize policy
loss, _, summary = self.model.trust_region_step(
sess,
observations, initial_state, actions,
rewards, terminated, pads,
avg_episode_reward=np.mean(self.episode_rewards))
else: # otherwise use simple gradient descent on policy
loss, _, summary = self.model.train_step(
sess,
observations, initial_state, actions,
rewards, terminated, pads,
avg_episode_reward=np.mean(self.episode_rewards))
if self.use_value_opt: # optionally perform specific value optimization
self.model.fit_values(
sess,
observations, initial_state, actions,
rewards, terminated, pads)
return loss, summary
def train(self, sess):
"""Sample some episodes and train on some episodes."""
cur_step = sess.run(self.model.inc_global_step)
self.cur_step = cur_step
# on the first iteration, set target network close to online network
if self.cur_step == 0:
for _ in xrange(100):
sess.run(self.model.copy_op)
# on other iterations, just perform single target <-- online operation
sess.run(self.model.copy_op)
# sample from env
(initial_state,
observations, actions, rewards,
terminated, pads) = self.sample_episodes(sess)
# add to replay buffer
self.add_to_replay_buffer(
initial_state, observations, actions,
rewards, terminated, pads)
loss, summary = 0, None
# train on online batch
if self.use_online_batch:
loss, summary = self._train(
sess,
observations, initial_state, actions,
rewards, terminated, pads)
# update relative entropy coefficient
if self.update_eps_lambda:
episode_rewards = np.array(self.episode_rewards)
episode_lengths = np.array(self.episode_lengths)
eps_lambda = find_best_eps_lambda(episode_rewards, episode_lengths)
sess.run(self.model.objective.assign_eps_lambda,
feed_dict={self.model.objective.new_eps_lambda: eps_lambda})
# train on replay batch
replay_batch, replay_probs = self.get_from_replay_buffer(
self.replay_batch_size)
if replay_batch:
(initial_state,
observations, actions, rewards,
terminated, pads) = replay_batch
loss, summary = self._train(
sess,
observations, initial_state, actions,
rewards, terminated, pads)
return loss, summary, self.total_rewards, self.episode_rewards
def eval(self, sess):
"""Use greedy sampling."""
(initial_state,
observations, actions, rewards,
pads) = self._sample_episodes(sess, greedy=True)
total_rewards = np.sum(np.array(rewards) * (1 - np.array(pads)), axis=0)
return np.mean(total_rewards)
def convert_from_batched_episodes(
self, initial_state, observations, actions, rewards,
terminated, pads):
"""Convert time-major batch of episodes to batch-major list of episodes."""
rewards = np.array(rewards)
pads = np.array(pads)
observations = [np.array(obs) for obs in observations]
actions = [np.array(act) for act in actions]
total_rewards = np.sum(rewards * (1 - pads), axis=0)
total_length = np.sum(1 - pads, axis=0).astype('int32')
episodes = []
num_episodes = rewards.shape[1]
for i in xrange(num_episodes):
length = total_length[i]
ep_initial = initial_state[i]
ep_obs = [obs[:length, i, ...] for obs in observations]
ep_act = [act[:length + 1, i, ...] for act in actions]
ep_rewards = rewards[:length, i]
episodes.append(
[ep_initial, ep_obs, ep_act, ep_rewards, terminated[i]])
return episodes
def convert_to_batched_episodes(self, episodes, max_length=None):
"""Convert batch-major list of episodes to time-major batch of episodes."""
lengths = [len(ep[-2]) for ep in episodes]
max_length = max_length or max(lengths)
new_episodes = []
for ep, length in zip(episodes, lengths):
initial, observations, actions, rewards, terminated = ep
observations = [np.resize(obs, [max_length + 1] + list(obs.shape)[1:])
for obs in observations]
actions = [np.resize(act, [max_length + 1] + list(act.shape)[1:])
for act in actions]
pads = np.array([0] * length + [1] * (max_length - length))
rewards = np.resize(rewards, [max_length]) * (1 - pads)
new_episodes.append([initial, observations, actions, rewards,
terminated, pads])
(initial, observations, actions, rewards,
terminated, pads) = zip(*new_episodes)
observations = [np.swapaxes(obs, 0, 1)
for obs in zip(*observations)]
actions = [np.swapaxes(act, 0, 1)
for act in zip(*actions)]
rewards = np.transpose(rewards)
pads = np.transpose(pads)
return (initial, observations, actions, rewards, terminated, pads)
def add_to_replay_buffer(self, initial_state,
observations, actions, rewards,
terminated, pads):
"""Add batch of episodes to replay buffer."""
if self.replay_buffer is None:
return
rewards = np.array(rewards)
pads = np.array(pads)
total_rewards = np.sum(rewards * (1 - pads), axis=0)
episodes = self.convert_from_batched_episodes(
initial_state, observations, actions, rewards,
terminated, pads)
priorities = (total_rewards if self.prioritize_by == 'reward'
else self.cur_step)
if not self.unify_episodes or self.all_new_ep:
self.last_idxs = self.replay_buffer.add(
episodes, priorities)
else:
# If we are unifying episodes, we attempt to
# keep them unified in the replay buffer.
# The first episode sampled in the current batch is a
# continuation of the last episode from the previous batch
self.replay_buffer.add(episodes[:1], priorities, self.last_idxs[-1:])
if len(episodes) > 1:
self.replay_buffer.add(episodes[1:], priorities)
def get_from_replay_buffer(self, batch_size):
"""Sample a batch of episodes from the replay buffer."""
if self.replay_buffer is None or len(self.replay_buffer) < 1 * batch_size:
return None, None
desired_count = batch_size * self.max_step
# in the case of batch_by_steps, we sample larger and larger
# amounts from the replay buffer until we have enough steps.
while True:
if batch_size > len(self.replay_buffer):
batch_size = len(self.replay_buffer)
episodes, probs = self.replay_buffer.get_batch(batch_size)
count = sum(len(ep[-2]) for ep in episodes)
if count >= desired_count or not self.batch_by_steps:
break
if batch_size == len(self.replay_buffer):
return None, None
batch_size *= 1.2
return (self.convert_to_batched_episodes(episodes), probs)
def seed_replay_buffer(self, episodes):
"""Seed the replay buffer with some episodes."""
if self.replay_buffer is None:
return
# just need to add initial state
for i in xrange(len(episodes)):
episodes[i] = [self.initial_internal_state()] + episodes[i]
self.replay_buffer.seed_buffer(episodes)
|
apache-2.0
|
highweb-project/highweb-webcl-html5spec
|
build/android/devil/android/perf/thermal_throttle.py
|
18
|
4283
|
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import logging
class OmapThrottlingDetector(object):
"""Class to detect and track thermal throttling on an OMAP 4."""
OMAP_TEMP_FILE = ('/sys/devices/platform/omap/omap_temp_sensor.0/'
'temperature')
@staticmethod
def IsSupported(device):
return device.FileExists(OmapThrottlingDetector.OMAP_TEMP_FILE)
def __init__(self, device):
self._device = device
@staticmethod
def BecameThrottled(log_line):
return 'omap_thermal_throttle' in log_line
@staticmethod
def BecameUnthrottled(log_line):
return 'omap_thermal_unthrottle' in log_line
@staticmethod
def GetThrottlingTemperature(log_line):
if 'throttle_delayed_work_fn' in log_line:
return float([s for s in log_line.split() if s.isdigit()][0]) / 1000.0
def GetCurrentTemperature(self):
tempdata = self._device.ReadFile(OmapThrottlingDetector.OMAP_TEMP_FILE)
return float(tempdata) / 1000.0
class ExynosThrottlingDetector(object):
"""Class to detect and track thermal throttling on an Exynos 5."""
@staticmethod
def IsSupported(device):
return device.FileExists('/sys/bus/exynos5-core')
def __init__(self, device):
pass
@staticmethod
def BecameThrottled(log_line):
return 'exynos_tmu: Throttling interrupt' in log_line
@staticmethod
def BecameUnthrottled(log_line):
return 'exynos_thermal_unthrottle: not throttling' in log_line
@staticmethod
def GetThrottlingTemperature(_log_line):
return None
@staticmethod
def GetCurrentTemperature():
return None
class ThermalThrottle(object):
"""Class to detect and track thermal throttling.
Usage:
Wait for IsThrottled() to be False before running test
After running test call HasBeenThrottled() to find out if the
test run was affected by thermal throttling.
"""
def __init__(self, device):
self._device = device
self._throttled = False
self._detector = None
if OmapThrottlingDetector.IsSupported(device):
self._detector = OmapThrottlingDetector(device)
elif ExynosThrottlingDetector.IsSupported(device):
self._detector = ExynosThrottlingDetector(device)
def HasBeenThrottled(self):
"""True if there has been any throttling since the last call to
HasBeenThrottled or IsThrottled.
"""
return self._ReadLog()
def IsThrottled(self):
"""True if currently throttled."""
self._ReadLog()
return self._throttled
def _ReadLog(self):
if not self._detector:
return False
has_been_throttled = False
serial_number = str(self._device)
log = self._device.RunShellCommand('dmesg -c')
degree_symbol = unichr(0x00B0)
for line in log:
if self._detector.BecameThrottled(line):
if not self._throttled:
logging.warning('>>> Device %s thermally throttled', serial_number)
self._throttled = True
has_been_throttled = True
elif self._detector.BecameUnthrottled(line):
if self._throttled:
logging.warning('>>> Device %s thermally unthrottled', serial_number)
self._throttled = False
has_been_throttled = True
temperature = self._detector.GetThrottlingTemperature(line)
if temperature is not None:
logging.info(u'Device %s thermally throttled at %3.1f%sC',
serial_number, temperature, degree_symbol)
if logging.getLogger().isEnabledFor(logging.DEBUG):
# Print current temperature of CPU SoC.
temperature = self._detector.GetCurrentTemperature()
if temperature is not None:
logging.debug(u'Current SoC temperature of %s = %3.1f%sC',
serial_number, temperature, degree_symbol)
# Print temperature of battery, to give a system temperature
dumpsys_log = self._device.RunShellCommand('dumpsys battery')
for line in dumpsys_log:
if 'temperature' in line:
btemp = float([s for s in line.split() if s.isdigit()][0]) / 10.0
logging.debug(u'Current battery temperature of %s = %3.1f%sC',
serial_number, btemp, degree_symbol)
return has_been_throttled
|
bsd-3-clause
|
rooi/CouchPotatoServer
|
libs/tempita/__init__.py
|
31
|
38364
|
"""
A small templating language
This implements a small templating language. This language implements
if/elif/else, for/continue/break, expressions, and blocks of Python
code. The syntax is::
{{any expression (function calls etc)}}
{{any expression | filter}}
{{for x in y}}...{{endfor}}
{{if x}}x{{elif y}}y{{else}}z{{endif}}
{{py:x=1}}
{{py:
def foo(bar):
return 'baz'
}}
{{default var = default_value}}
{{# comment}}
You use this with the ``Template`` class or the ``sub`` shortcut.
The ``Template`` class takes the template string and the name of
the template (for errors) and a default namespace. Then (like
``string.Template``) you can call the ``tmpl.substitute(**kw)``
method to make a substitution (or ``tmpl.substitute(a_dict)``).
``sub(content, **kw)`` substitutes the template immediately. You
can use ``__name='tmpl.html'`` to set the name of the template.
If there are syntax errors ``TemplateError`` will be raised.
"""
import re
import sys
import cgi
from urllib import quote as url_quote
import os
import tokenize
from cStringIO import StringIO
from tempita._looper import looper
from tempita.compat3 import bytes, basestring_, next, is_unicode, coerce_text
__all__ = ['TemplateError', 'Template', 'sub', 'HTMLTemplate',
'sub_html', 'html', 'bunch']
token_re = re.compile(r'\{\{|\}\}')
in_re = re.compile(r'\s+in\s+')
var_re = re.compile(r'^[a-z_][a-z0-9_]*$', re.I)
class TemplateError(Exception):
"""Exception raised while parsing a template
"""
def __init__(self, message, position, name=None):
Exception.__init__(self, message)
self.position = position
self.name = name
def __str__(self):
msg = ' '.join(self.args)
if self.position:
msg = '%s at line %s column %s' % (
msg, self.position[0], self.position[1])
if self.name:
msg += ' in %s' % self.name
return msg
class _TemplateContinue(Exception):
pass
class _TemplateBreak(Exception):
pass
def get_file_template(name, from_template):
path = os.path.join(os.path.dirname(from_template.name), name)
return from_template.__class__.from_filename(
path, namespace=from_template.namespace,
get_template=from_template.get_template)
class Template(object):
default_namespace = {
'start_braces': '{{',
'end_braces': '}}',
'looper': looper,
}
default_encoding = 'utf8'
default_inherit = None
def __init__(self, content, name=None, namespace=None, stacklevel=None,
get_template=None, default_inherit=None, line_offset=0):
self.content = content
self._unicode = is_unicode(content)
if name is None and stacklevel is not None:
try:
caller = sys._getframe(stacklevel)
except ValueError:
pass
else:
globals = caller.f_globals
lineno = caller.f_lineno
if '__file__' in globals:
name = globals['__file__']
if name.endswith('.pyc') or name.endswith('.pyo'):
name = name[:-1]
elif '__name__' in globals:
name = globals['__name__']
else:
name = '<string>'
if lineno:
name += ':%s' % lineno
self.name = name
self._parsed = parse(content, name=name, line_offset=line_offset)
if namespace is None:
namespace = {}
self.namespace = namespace
self.get_template = get_template
if default_inherit is not None:
self.default_inherit = default_inherit
def from_filename(cls, filename, namespace=None, encoding=None,
default_inherit=None, get_template=get_file_template):
f = open(filename, 'rb')
c = f.read()
f.close()
if encoding:
c = c.decode(encoding)
return cls(content=c, name=filename, namespace=namespace,
default_inherit=default_inherit, get_template=get_template)
from_filename = classmethod(from_filename)
def __repr__(self):
return '<%s %s name=%r>' % (
self.__class__.__name__,
hex(id(self))[2:], self.name)
def substitute(self, *args, **kw):
if args:
if kw:
raise TypeError(
"You can only give positional *or* keyword arguments")
if len(args) > 1:
raise TypeError(
"You can only give one positional argument")
if not hasattr(args[0], 'items'):
raise TypeError(
"If you pass in a single argument, you must pass in a dictionary-like object (with a .items() method); you gave %r"
% (args[0],))
kw = args[0]
ns = kw
ns['__template_name__'] = self.name
if self.namespace:
ns.update(self.namespace)
result, defs, inherit = self._interpret(ns)
if not inherit:
inherit = self.default_inherit
if inherit:
result = self._interpret_inherit(result, defs, inherit, ns)
return result
def _interpret(self, ns):
__traceback_hide__ = True
parts = []
defs = {}
self._interpret_codes(self._parsed, ns, out=parts, defs=defs)
if '__inherit__' in defs:
inherit = defs.pop('__inherit__')
else:
inherit = None
return ''.join(parts), defs, inherit
def _interpret_inherit(self, body, defs, inherit_template, ns):
__traceback_hide__ = True
if not self.get_template:
raise TemplateError(
'You cannot use inheritance without passing in get_template',
position=None, name=self.name)
templ = self.get_template(inherit_template, self)
self_ = TemplateObject(self.name)
for name, value in defs.iteritems():
setattr(self_, name, value)
self_.body = body
ns = ns.copy()
ns['self'] = self_
return templ.substitute(ns)
def _interpret_codes(self, codes, ns, out, defs):
__traceback_hide__ = True
for item in codes:
if isinstance(item, basestring_):
out.append(item)
else:
self._interpret_code(item, ns, out, defs)
def _interpret_code(self, code, ns, out, defs):
__traceback_hide__ = True
name, pos = code[0], code[1]
if name == 'py':
self._exec(code[2], ns, pos)
elif name == 'continue':
raise _TemplateContinue()
elif name == 'break':
raise _TemplateBreak()
elif name == 'for':
vars, expr, content = code[2], code[3], code[4]
expr = self._eval(expr, ns, pos)
self._interpret_for(vars, expr, content, ns, out, defs)
elif name == 'cond':
parts = code[2:]
self._interpret_if(parts, ns, out, defs)
elif name == 'expr':
parts = code[2].split('|')
base = self._eval(parts[0], ns, pos)
for part in parts[1:]:
func = self._eval(part, ns, pos)
base = func(base)
out.append(self._repr(base, pos))
elif name == 'default':
var, expr = code[2], code[3]
if var not in ns:
result = self._eval(expr, ns, pos)
ns[var] = result
elif name == 'inherit':
expr = code[2]
value = self._eval(expr, ns, pos)
defs['__inherit__'] = value
elif name == 'def':
name = code[2]
signature = code[3]
parts = code[4]
ns[name] = defs[name] = TemplateDef(self, name, signature, body=parts, ns=ns,
pos=pos)
elif name == 'comment':
return
else:
assert 0, "Unknown code: %r" % name
def _interpret_for(self, vars, expr, content, ns, out, defs):
__traceback_hide__ = True
for item in expr:
if len(vars) == 1:
ns[vars[0]] = item
else:
if len(vars) != len(item):
raise ValueError(
'Need %i items to unpack (got %i items)'
% (len(vars), len(item)))
for name, value in zip(vars, item):
ns[name] = value
try:
self._interpret_codes(content, ns, out, defs)
except _TemplateContinue:
continue
except _TemplateBreak:
break
def _interpret_if(self, parts, ns, out, defs):
__traceback_hide__ = True
# @@: if/else/else gets through
for part in parts:
assert not isinstance(part, basestring_)
name, pos = part[0], part[1]
if name == 'else':
result = True
else:
result = self._eval(part[2], ns, pos)
if result:
self._interpret_codes(part[3], ns, out, defs)
break
def _eval(self, code, ns, pos):
__traceback_hide__ = True
try:
try:
value = eval(code, self.default_namespace, ns)
except SyntaxError, e:
raise SyntaxError(
'invalid syntax in expression: %s' % code)
return value
except:
exc_info = sys.exc_info()
e = exc_info[1]
if getattr(e, 'args', None):
arg0 = e.args[0]
else:
arg0 = coerce_text(e)
e.args = (self._add_line_info(arg0, pos),)
raise exc_info[0], e, exc_info[2]
def _exec(self, code, ns, pos):
__traceback_hide__ = True
try:
exec code in self.default_namespace, ns
except:
exc_info = sys.exc_info()
e = exc_info[1]
if e.args:
e.args = (self._add_line_info(e.args[0], pos),)
else:
e.args = (self._add_line_info(None, pos),)
raise exc_info[0], e, exc_info[2]
def _repr(self, value, pos):
__traceback_hide__ = True
try:
if value is None:
return ''
if self._unicode:
try:
value = unicode(value)
except UnicodeDecodeError:
value = bytes(value)
else:
if not isinstance(value, basestring_):
value = coerce_text(value)
if (is_unicode(value)
and self.default_encoding):
value = value.encode(self.default_encoding)
except:
exc_info = sys.exc_info()
e = exc_info[1]
e.args = (self._add_line_info(e.args[0], pos),)
raise exc_info[0], e, exc_info[2]
else:
if self._unicode and isinstance(value, bytes):
if not self.default_encoding:
raise UnicodeDecodeError(
'Cannot decode bytes value %r into unicode '
'(no default_encoding provided)' % value)
try:
value = value.decode(self.default_encoding)
except UnicodeDecodeError, e:
raise UnicodeDecodeError(
e.encoding,
e.object,
e.start,
e.end,
e.reason + ' in string %r' % value)
elif not self._unicode and is_unicode(value):
if not self.default_encoding:
raise UnicodeEncodeError(
'Cannot encode unicode value %r into bytes '
'(no default_encoding provided)' % value)
value = value.encode(self.default_encoding)
return value
def _add_line_info(self, msg, pos):
msg = "%s at line %s column %s" % (
msg, pos[0], pos[1])
if self.name:
msg += " in file %s" % self.name
return msg
def sub(content, **kw):
name = kw.get('__name')
tmpl = Template(content, name=name)
return tmpl.substitute(kw)
def paste_script_template_renderer(content, vars, filename=None):
tmpl = Template(content, name=filename)
return tmpl.substitute(vars)
class bunch(dict):
def __init__(self, **kw):
for name, value in kw.iteritems():
setattr(self, name, value)
def __setattr__(self, name, value):
self[name] = value
def __getattr__(self, name):
try:
return self[name]
except KeyError:
raise AttributeError(name)
def __getitem__(self, key):
if 'default' in self:
try:
return dict.__getitem__(self, key)
except KeyError:
return dict.__getitem__(self, 'default')
else:
return dict.__getitem__(self, key)
def __repr__(self):
items = [
(k, v) for k, v in self.iteritems()]
items.sort()
return '<%s %s>' % (
self.__class__.__name__,
' '.join(['%s=%r' % (k, v) for k, v in items]))
############################################################
## HTML Templating
############################################################
class html(object):
def __init__(self, value):
self.value = value
def __str__(self):
return self.value
def __html__(self):
return self.value
def __repr__(self):
return '<%s %r>' % (
self.__class__.__name__, self.value)
def html_quote(value, force=True):
if not force and hasattr(value, '__html__'):
return value.__html__()
if value is None:
return ''
if not isinstance(value, basestring_):
value = coerce_text(value)
if sys.version >= "3" and isinstance(value, bytes):
value = cgi.escape(value.decode('latin1'), 1)
value = value.encode('latin1')
else:
value = cgi.escape(value, 1)
if sys.version < "3":
if is_unicode(value):
value = value.encode('ascii', 'xmlcharrefreplace')
return value
def url(v):
v = coerce_text(v)
if is_unicode(v):
v = v.encode('utf8')
return url_quote(v)
def attr(**kw):
kw = list(kw.iteritems())
kw.sort()
parts = []
for name, value in kw:
if value is None:
continue
if name.endswith('_'):
name = name[:-1]
parts.append('%s="%s"' % (html_quote(name), html_quote(value)))
return html(' '.join(parts))
class HTMLTemplate(Template):
default_namespace = Template.default_namespace.copy()
default_namespace.update(dict(
html=html,
attr=attr,
url=url,
html_quote=html_quote,
))
def _repr(self, value, pos):
if hasattr(value, '__html__'):
value = value.__html__()
quote = False
else:
quote = True
plain = Template._repr(self, value, pos)
if quote:
return html_quote(plain)
else:
return plain
def sub_html(content, **kw):
name = kw.get('__name')
tmpl = HTMLTemplate(content, name=name)
return tmpl.substitute(kw)
class TemplateDef(object):
def __init__(self, template, func_name, func_signature,
body, ns, pos, bound_self=None):
self._template = template
self._func_name = func_name
self._func_signature = func_signature
self._body = body
self._ns = ns
self._pos = pos
self._bound_self = bound_self
def __repr__(self):
return '<tempita function %s(%s) at %s:%s>' % (
self._func_name, self._func_signature,
self._template.name, self._pos)
def __str__(self):
return self()
def __call__(self, *args, **kw):
values = self._parse_signature(args, kw)
ns = self._ns.copy()
ns.update(values)
if self._bound_self is not None:
ns['self'] = self._bound_self
out = []
subdefs = {}
self._template._interpret_codes(self._body, ns, out, subdefs)
return ''.join(out)
def __get__(self, obj, type=None):
if obj is None:
return self
return self.__class__(
self._template, self._func_name, self._func_signature,
self._body, self._ns, self._pos, bound_self=obj)
def _parse_signature(self, args, kw):
values = {}
sig_args, var_args, var_kw, defaults = self._func_signature
extra_kw = {}
for name, value in kw.iteritems():
if not var_kw and name not in sig_args:
raise TypeError(
'Unexpected argument %s' % name)
if name in sig_args:
values[sig_args] = value
else:
extra_kw[name] = value
args = list(args)
sig_args = list(sig_args)
while args:
while sig_args and sig_args[0] in values:
sig_args.pop(0)
if sig_args:
name = sig_args.pop(0)
values[name] = args.pop(0)
elif var_args:
values[var_args] = tuple(args)
break
else:
raise TypeError(
'Extra position arguments: %s'
% ', '.join(repr(v) for v in args))
for name, value_expr in defaults.iteritems():
if name not in values:
values[name] = self._template._eval(
value_expr, self._ns, self._pos)
for name in sig_args:
if name not in values:
raise TypeError(
'Missing argument: %s' % name)
if var_kw:
values[var_kw] = extra_kw
return values
class TemplateObject(object):
def __init__(self, name):
self.__name = name
self.get = TemplateObjectGetter(self)
def __repr__(self):
return '<%s %s>' % (self.__class__.__name__, self.__name)
class TemplateObjectGetter(object):
def __init__(self, template_obj):
self.__template_obj = template_obj
def __getattr__(self, attr):
return getattr(self.__template_obj, attr, Empty)
def __repr__(self):
return '<%s around %r>' % (self.__class__.__name__, self.__template_obj)
class _Empty(object):
def __call__(self, *args, **kw):
return self
def __str__(self):
return ''
def __repr__(self):
return 'Empty'
def __unicode__(self):
return u''
def __iter__(self):
return iter(())
def __bool__(self):
return False
if sys.version < "3":
__nonzero__ = __bool__
Empty = _Empty()
del _Empty
############################################################
## Lexing and Parsing
############################################################
def lex(s, name=None, trim_whitespace=True, line_offset=0):
"""
Lex a string into chunks:
>>> lex('hey')
['hey']
>>> lex('hey {{you}}')
['hey ', ('you', (1, 7))]
>>> lex('hey {{')
Traceback (most recent call last):
...
TemplateError: No }} to finish last expression at line 1 column 7
>>> lex('hey }}')
Traceback (most recent call last):
...
TemplateError: }} outside expression at line 1 column 7
>>> lex('hey {{ {{')
Traceback (most recent call last):
...
TemplateError: {{ inside expression at line 1 column 10
"""
in_expr = False
chunks = []
last = 0
last_pos = (1, 1)
for match in token_re.finditer(s):
expr = match.group(0)
pos = find_position(s, match.end(), line_offset)
if expr == '{{' and in_expr:
raise TemplateError('{{ inside expression', position=pos,
name=name)
elif expr == '}}' and not in_expr:
raise TemplateError('}} outside expression', position=pos,
name=name)
if expr == '{{':
part = s[last:match.start()]
if part:
chunks.append(part)
in_expr = True
else:
chunks.append((s[last:match.start()], last_pos))
in_expr = False
last = match.end()
last_pos = pos
if in_expr:
raise TemplateError('No }} to finish last expression',
name=name, position=last_pos)
part = s[last:]
if part:
chunks.append(part)
if trim_whitespace:
chunks = trim_lex(chunks)
return chunks
statement_re = re.compile(r'^(?:if |elif |for |def |inherit |default |py:)')
single_statements = ['else', 'endif', 'endfor', 'enddef', 'continue', 'break']
trail_whitespace_re = re.compile(r'\n\r?[\t ]*$')
lead_whitespace_re = re.compile(r'^[\t ]*\n')
def trim_lex(tokens):
r"""
Takes a lexed set of tokens, and removes whitespace when there is
a directive on a line by itself:
>>> tokens = lex('{{if x}}\nx\n{{endif}}\ny', trim_whitespace=False)
>>> tokens
[('if x', (1, 3)), '\nx\n', ('endif', (3, 3)), '\ny']
>>> trim_lex(tokens)
[('if x', (1, 3)), 'x\n', ('endif', (3, 3)), 'y']
"""
last_trim = None
for i in range(len(tokens)):
current = tokens[i]
if isinstance(tokens[i], basestring_):
# we don't trim this
continue
item = current[0]
if not statement_re.search(item) and item not in single_statements:
continue
if not i:
prev = ''
else:
prev = tokens[i - 1]
if i + 1 >= len(tokens):
next_chunk = ''
else:
next_chunk = tokens[i + 1]
if (not isinstance(next_chunk, basestring_)
or not isinstance(prev, basestring_)):
continue
prev_ok = not prev or trail_whitespace_re.search(prev)
if i == 1 and not prev.strip():
prev_ok = True
if last_trim is not None and last_trim + 2 == i and not prev.strip():
prev_ok = 'last'
if (prev_ok
and (not next_chunk or lead_whitespace_re.search(next_chunk)
or (i == len(tokens) - 2 and not next_chunk.strip()))):
if prev:
if ((i == 1 and not prev.strip())
or prev_ok == 'last'):
tokens[i - 1] = ''
else:
m = trail_whitespace_re.search(prev)
# +1 to leave the leading \n on:
prev = prev[:m.start() + 1]
tokens[i - 1] = prev
if next_chunk:
last_trim = i
if i == len(tokens) - 2 and not next_chunk.strip():
tokens[i + 1] = ''
else:
m = lead_whitespace_re.search(next_chunk)
next_chunk = next_chunk[m.end():]
tokens[i + 1] = next_chunk
return tokens
def find_position(string, index, line_offset):
"""Given a string and index, return (line, column)"""
leading = string[:index].splitlines()
return (len(leading) + line_offset, len(leading[-1]) + 1)
def parse(s, name=None, line_offset=0):
r"""
Parses a string into a kind of AST
>>> parse('{{x}}')
[('expr', (1, 3), 'x')]
>>> parse('foo')
['foo']
>>> parse('{{if x}}test{{endif}}')
[('cond', (1, 3), ('if', (1, 3), 'x', ['test']))]
>>> parse('series->{{for x in y}}x={{x}}{{endfor}}')
['series->', ('for', (1, 11), ('x',), 'y', ['x=', ('expr', (1, 27), 'x')])]
>>> parse('{{for x, y in z:}}{{continue}}{{endfor}}')
[('for', (1, 3), ('x', 'y'), 'z', [('continue', (1, 21))])]
>>> parse('{{py:x=1}}')
[('py', (1, 3), 'x=1')]
>>> parse('{{if x}}a{{elif y}}b{{else}}c{{endif}}')
[('cond', (1, 3), ('if', (1, 3), 'x', ['a']), ('elif', (1, 12), 'y', ['b']), ('else', (1, 23), None, ['c']))]
Some exceptions::
>>> parse('{{continue}}')
Traceback (most recent call last):
...
TemplateError: continue outside of for loop at line 1 column 3
>>> parse('{{if x}}foo')
Traceback (most recent call last):
...
TemplateError: No {{endif}} at line 1 column 3
>>> parse('{{else}}')
Traceback (most recent call last):
...
TemplateError: else outside of an if block at line 1 column 3
>>> parse('{{if x}}{{for x in y}}{{endif}}{{endfor}}')
Traceback (most recent call last):
...
TemplateError: Unexpected endif at line 1 column 25
>>> parse('{{if}}{{endif}}')
Traceback (most recent call last):
...
TemplateError: if with no expression at line 1 column 3
>>> parse('{{for x y}}{{endfor}}')
Traceback (most recent call last):
...
TemplateError: Bad for (no "in") in 'x y' at line 1 column 3
>>> parse('{{py:x=1\ny=2}}')
Traceback (most recent call last):
...
TemplateError: Multi-line py blocks must start with a newline at line 1 column 3
"""
tokens = lex(s, name=name, line_offset=line_offset)
result = []
while tokens:
next_chunk, tokens = parse_expr(tokens, name)
result.append(next_chunk)
return result
def parse_expr(tokens, name, context=()):
if isinstance(tokens[0], basestring_):
return tokens[0], tokens[1:]
expr, pos = tokens[0]
expr = expr.strip()
if expr.startswith('py:'):
expr = expr[3:].lstrip(' \t')
if expr.startswith('\n') or expr.startswith('\r'):
expr = expr.lstrip('\r\n')
if '\r' in expr:
expr = expr.replace('\r\n', '\n')
expr = expr.replace('\r', '')
expr += '\n'
else:
if '\n' in expr:
raise TemplateError(
'Multi-line py blocks must start with a newline',
position=pos, name=name)
return ('py', pos, expr), tokens[1:]
elif expr in ('continue', 'break'):
if 'for' not in context:
raise TemplateError(
'continue outside of for loop',
position=pos, name=name)
return (expr, pos), tokens[1:]
elif expr.startswith('if '):
return parse_cond(tokens, name, context)
elif (expr.startswith('elif ')
or expr == 'else'):
raise TemplateError(
'%s outside of an if block' % expr.split()[0],
position=pos, name=name)
elif expr in ('if', 'elif', 'for'):
raise TemplateError(
'%s with no expression' % expr,
position=pos, name=name)
elif expr in ('endif', 'endfor', 'enddef'):
raise TemplateError(
'Unexpected %s' % expr,
position=pos, name=name)
elif expr.startswith('for '):
return parse_for(tokens, name, context)
elif expr.startswith('default '):
return parse_default(tokens, name, context)
elif expr.startswith('inherit '):
return parse_inherit(tokens, name, context)
elif expr.startswith('def '):
return parse_def(tokens, name, context)
elif expr.startswith('#'):
return ('comment', pos, tokens[0][0]), tokens[1:]
return ('expr', pos, tokens[0][0]), tokens[1:]
def parse_cond(tokens, name, context):
start = tokens[0][1]
pieces = []
context = context + ('if',)
while 1:
if not tokens:
raise TemplateError(
'Missing {{endif}}',
position=start, name=name)
if (isinstance(tokens[0], tuple)
and tokens[0][0] == 'endif'):
return ('cond', start) + tuple(pieces), tokens[1:]
next_chunk, tokens = parse_one_cond(tokens, name, context)
pieces.append(next_chunk)
def parse_one_cond(tokens, name, context):
(first, pos), tokens = tokens[0], tokens[1:]
content = []
if first.endswith(':'):
first = first[:-1]
if first.startswith('if '):
part = ('if', pos, first[3:].lstrip(), content)
elif first.startswith('elif '):
part = ('elif', pos, first[5:].lstrip(), content)
elif first == 'else':
part = ('else', pos, None, content)
else:
assert 0, "Unexpected token %r at %s" % (first, pos)
while 1:
if not tokens:
raise TemplateError(
'No {{endif}}',
position=pos, name=name)
if (isinstance(tokens[0], tuple)
and (tokens[0][0] == 'endif'
or tokens[0][0].startswith('elif ')
or tokens[0][0] == 'else')):
return part, tokens
next_chunk, tokens = parse_expr(tokens, name, context)
content.append(next_chunk)
def parse_for(tokens, name, context):
first, pos = tokens[0]
tokens = tokens[1:]
context = ('for',) + context
content = []
assert first.startswith('for ')
if first.endswith(':'):
first = first[:-1]
first = first[3:].strip()
match = in_re.search(first)
if not match:
raise TemplateError(
'Bad for (no "in") in %r' % first,
position=pos, name=name)
vars = first[:match.start()]
if '(' in vars:
raise TemplateError(
'You cannot have () in the variable section of a for loop (%r)'
% vars, position=pos, name=name)
vars = tuple([
v.strip() for v in first[:match.start()].split(',')
if v.strip()])
expr = first[match.end():]
while 1:
if not tokens:
raise TemplateError(
'No {{endfor}}',
position=pos, name=name)
if (isinstance(tokens[0], tuple)
and tokens[0][0] == 'endfor'):
return ('for', pos, vars, expr, content), tokens[1:]
next_chunk, tokens = parse_expr(tokens, name, context)
content.append(next_chunk)
def parse_default(tokens, name, context):
first, pos = tokens[0]
assert first.startswith('default ')
first = first.split(None, 1)[1]
parts = first.split('=', 1)
if len(parts) == 1:
raise TemplateError(
"Expression must be {{default var=value}}; no = found in %r" % first,
position=pos, name=name)
var = parts[0].strip()
if ',' in var:
raise TemplateError(
"{{default x, y = ...}} is not supported",
position=pos, name=name)
if not var_re.search(var):
raise TemplateError(
"Not a valid variable name for {{default}}: %r"
% var, position=pos, name=name)
expr = parts[1].strip()
return ('default', pos, var, expr), tokens[1:]
def parse_inherit(tokens, name, context):
first, pos = tokens[0]
assert first.startswith('inherit ')
expr = first.split(None, 1)[1]
return ('inherit', pos, expr), tokens[1:]
def parse_def(tokens, name, context):
first, start = tokens[0]
tokens = tokens[1:]
assert first.startswith('def ')
first = first.split(None, 1)[1]
if first.endswith(':'):
first = first[:-1]
if '(' not in first:
func_name = first
sig = ((), None, None, {})
elif not first.endswith(')'):
raise TemplateError("Function definition doesn't end with ): %s" % first,
position=start, name=name)
else:
first = first[:-1]
func_name, sig_text = first.split('(', 1)
sig = parse_signature(sig_text, name, start)
context = context + ('def',)
content = []
while 1:
if not tokens:
raise TemplateError(
'Missing {{enddef}}',
position=start, name=name)
if (isinstance(tokens[0], tuple)
and tokens[0][0] == 'enddef'):
return ('def', start, func_name, sig, content), tokens[1:]
next_chunk, tokens = parse_expr(tokens, name, context)
content.append(next_chunk)
def parse_signature(sig_text, name, pos):
tokens = tokenize.generate_tokens(StringIO(sig_text).readline)
sig_args = []
var_arg = None
var_kw = None
defaults = {}
def get_token(pos=False):
try:
tok_type, tok_string, (srow, scol), (erow, ecol), line = next(tokens)
except StopIteration:
return tokenize.ENDMARKER, ''
if pos:
return tok_type, tok_string, (srow, scol), (erow, ecol)
else:
return tok_type, tok_string
while 1:
var_arg_type = None
tok_type, tok_string = get_token()
if tok_type == tokenize.ENDMARKER:
break
if tok_type == tokenize.OP and (tok_string == '*' or tok_string == '**'):
var_arg_type = tok_string
tok_type, tok_string = get_token()
if tok_type != tokenize.NAME:
raise TemplateError('Invalid signature: (%s)' % sig_text,
position=pos, name=name)
var_name = tok_string
tok_type, tok_string = get_token()
if tok_type == tokenize.ENDMARKER or (tok_type == tokenize.OP and tok_string == ','):
if var_arg_type == '*':
var_arg = var_name
elif var_arg_type == '**':
var_kw = var_name
else:
sig_args.append(var_name)
if tok_type == tokenize.ENDMARKER:
break
continue
if var_arg_type is not None:
raise TemplateError('Invalid signature: (%s)' % sig_text,
position=pos, name=name)
if tok_type == tokenize.OP and tok_string == '=':
nest_type = None
unnest_type = None
nest_count = 0
start_pos = end_pos = None
parts = []
while 1:
tok_type, tok_string, s, e = get_token(True)
if start_pos is None:
start_pos = s
end_pos = e
if tok_type == tokenize.ENDMARKER and nest_count:
raise TemplateError('Invalid signature: (%s)' % sig_text,
position=pos, name=name)
if (not nest_count and
(tok_type == tokenize.ENDMARKER or (tok_type == tokenize.OP and tok_string == ','))):
default_expr = isolate_expression(sig_text, start_pos, end_pos)
defaults[var_name] = default_expr
sig_args.append(var_name)
break
parts.append((tok_type, tok_string))
if nest_count and tok_type == tokenize.OP and tok_string == nest_type:
nest_count += 1
elif nest_count and tok_type == tokenize.OP and tok_string == unnest_type:
nest_count -= 1
if not nest_count:
nest_type = unnest_type = None
elif not nest_count and tok_type == tokenize.OP and tok_string in ('(', '[', '{'):
nest_type = tok_string
nest_count = 1
unnest_type = {'(': ')', '[': ']', '{': '}'}[nest_type]
return sig_args, var_arg, var_kw, defaults
def isolate_expression(string, start_pos, end_pos):
srow, scol = start_pos
srow -= 1
erow, ecol = end_pos
erow -= 1
lines = string.splitlines(True)
if srow == erow:
return lines[srow][scol:ecol]
parts = [lines[srow][scol:]]
parts.extend(lines[srow+1:erow])
if erow < len(lines):
# It'll sometimes give (end_row_past_finish, 0)
parts.append(lines[erow][:ecol])
return ''.join(parts)
_fill_command_usage = """\
%prog [OPTIONS] TEMPLATE arg=value
Use py:arg=value to set a Python value; otherwise all values are
strings.
"""
def fill_command(args=None):
import sys
import optparse
import pkg_resources
import os
if args is None:
args = sys.argv[1:]
dist = pkg_resources.get_distribution('Paste')
parser = optparse.OptionParser(
version=coerce_text(dist),
usage=_fill_command_usage)
parser.add_option(
'-o', '--output',
dest='output',
metavar="FILENAME",
help="File to write output to (default stdout)")
parser.add_option(
'--html',
dest='use_html',
action='store_true',
help="Use HTML style filling (including automatic HTML quoting)")
parser.add_option(
'--env',
dest='use_env',
action='store_true',
help="Put the environment in as top-level variables")
options, args = parser.parse_args(args)
if len(args) < 1:
print('You must give a template filename')
sys.exit(2)
template_name = args[0]
args = args[1:]
vars = {}
if options.use_env:
vars.update(os.environ)
for value in args:
if '=' not in value:
print('Bad argument: %r' % value)
sys.exit(2)
name, value = value.split('=', 1)
if name.startswith('py:'):
name = name[:3]
value = eval(value)
vars[name] = value
if template_name == '-':
template_content = sys.stdin.read()
template_name = '<stdin>'
else:
f = open(template_name, 'rb')
template_content = f.read()
f.close()
if options.use_html:
TemplateClass = HTMLTemplate
else:
TemplateClass = Template
template = TemplateClass(template_content, name=template_name)
result = template.substitute(vars)
if options.output:
f = open(options.output, 'wb')
f.write(result)
f.close()
else:
sys.stdout.write(result)
if __name__ == '__main__':
fill_command()
|
gpl-3.0
|
anand-c-goog/tensorflow
|
tensorflow/contrib/quantization/python/array_ops.py
|
5
|
1141
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Quantized Array Operations."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=unused-import,wildcard-import
from tensorflow.python.ops import gen_array_ops as quantized_gen_array_ops
from tensorflow.python.ops.gen_array_ops import dequantize
from tensorflow.python.ops.gen_array_ops import quantize_v2
from tensorflow.python.ops.gen_array_ops import quantized_concat
|
apache-2.0
|
bravominski/PennApps2015-HeartMates
|
venv/lib/python2.7/site-packages/requests/packages/urllib3/request.py
|
853
|
5751
|
try:
from urllib.parse import urlencode
except ImportError:
from urllib import urlencode
from .filepost import encode_multipart_formdata
__all__ = ['RequestMethods']
class RequestMethods(object):
"""
Convenience mixin for classes who implement a :meth:`urlopen` method, such
as :class:`~urllib3.connectionpool.HTTPConnectionPool` and
:class:`~urllib3.poolmanager.PoolManager`.
Provides behavior for making common types of HTTP request methods and
decides which type of request field encoding to use.
Specifically,
:meth:`.request_encode_url` is for sending requests whose fields are
encoded in the URL (such as GET, HEAD, DELETE).
:meth:`.request_encode_body` is for sending requests whose fields are
encoded in the *body* of the request using multipart or www-form-urlencoded
(such as for POST, PUT, PATCH).
:meth:`.request` is for making any kind of request, it will look up the
appropriate encoding format and use one of the above two methods to make
the request.
Initializer parameters:
:param headers:
Headers to include with all requests, unless other headers are given
explicitly.
"""
_encode_url_methods = set(['DELETE', 'GET', 'HEAD', 'OPTIONS'])
def __init__(self, headers=None):
self.headers = headers or {}
def urlopen(self, method, url, body=None, headers=None,
encode_multipart=True, multipart_boundary=None,
**kw): # Abstract
raise NotImplemented("Classes extending RequestMethods must implement "
"their own ``urlopen`` method.")
def request(self, method, url, fields=None, headers=None, **urlopen_kw):
"""
Make a request using :meth:`urlopen` with the appropriate encoding of
``fields`` based on the ``method`` used.
This is a convenience method that requires the least amount of manual
effort. It can be used in most situations, while still having the
option to drop down to more specific methods when necessary, such as
:meth:`request_encode_url`, :meth:`request_encode_body`,
or even the lowest level :meth:`urlopen`.
"""
method = method.upper()
if method in self._encode_url_methods:
return self.request_encode_url(method, url, fields=fields,
headers=headers,
**urlopen_kw)
else:
return self.request_encode_body(method, url, fields=fields,
headers=headers,
**urlopen_kw)
def request_encode_url(self, method, url, fields=None, **urlopen_kw):
"""
Make a request using :meth:`urlopen` with the ``fields`` encoded in
the url. This is useful for request methods like GET, HEAD, DELETE, etc.
"""
if fields:
url += '?' + urlencode(fields)
return self.urlopen(method, url, **urlopen_kw)
def request_encode_body(self, method, url, fields=None, headers=None,
encode_multipart=True, multipart_boundary=None,
**urlopen_kw):
"""
Make a request using :meth:`urlopen` with the ``fields`` encoded in
the body. This is useful for request methods like POST, PUT, PATCH, etc.
When ``encode_multipart=True`` (default), then
:meth:`urllib3.filepost.encode_multipart_formdata` is used to encode
the payload with the appropriate content type. Otherwise
:meth:`urllib.urlencode` is used with the
'application/x-www-form-urlencoded' content type.
Multipart encoding must be used when posting files, and it's reasonably
safe to use it in other times too. However, it may break request
signing, such as with OAuth.
Supports an optional ``fields`` parameter of key/value strings AND
key/filetuple. A filetuple is a (filename, data, MIME type) tuple where
the MIME type is optional. For example::
fields = {
'foo': 'bar',
'fakefile': ('foofile.txt', 'contents of foofile'),
'realfile': ('barfile.txt', open('realfile').read()),
'typedfile': ('bazfile.bin', open('bazfile').read(),
'image/jpeg'),
'nonamefile': 'contents of nonamefile field',
}
When uploading a file, providing a filename (the first parameter of the
tuple) is optional but recommended to best mimick behavior of browsers.
Note that if ``headers`` are supplied, the 'Content-Type' header will
be overwritten because it depends on the dynamic random boundary string
which is used to compose the body of the request. The random boundary
string can be explicitly set with the ``multipart_boundary`` parameter.
"""
if headers is None:
headers = self.headers
extra_kw = {'headers': {}}
if fields:
if 'body' in urlopen_kw:
raise TypeError('request got values for both \'fields\' and \'body\', can only specify one.')
if encode_multipart:
body, content_type = encode_multipart_formdata(fields, boundary=multipart_boundary)
else:
body, content_type = urlencode(fields), 'application/x-www-form-urlencoded'
extra_kw['body'] = body
extra_kw['headers'] = {'Content-Type': content_type}
extra_kw['headers'].update(headers)
extra_kw.update(urlopen_kw)
return self.urlopen(method, url, **extra_kw)
|
apache-2.0
|
lgiommi/root
|
interpreter/llvm/src/tools/clang/bindings/python/tests/cindex/test_token_kind.py
|
97
|
1064
|
from clang.cindex import TokenKind
from nose.tools import eq_
from nose.tools import ok_
from nose.tools import raises
def test_constructor():
"""Ensure TokenKind constructor works as expected."""
t = TokenKind(5, 'foo')
eq_(t.value, 5)
eq_(t.name, 'foo')
@raises(ValueError)
def test_bad_register():
"""Ensure a duplicate value is rejected for registration."""
TokenKind.register(2, 'foo')
@raises(ValueError)
def test_unknown_value():
"""Ensure trying to fetch an unknown value raises."""
TokenKind.from_value(-1)
def test_registration():
"""Ensure that items registered appear as class attributes."""
ok_(hasattr(TokenKind, 'LITERAL'))
literal = TokenKind.LITERAL
ok_(isinstance(literal, TokenKind))
def test_from_value():
"""Ensure registered values can be obtained from from_value()."""
t = TokenKind.from_value(3)
ok_(isinstance(t, TokenKind))
eq_(t, TokenKind.LITERAL)
def test_repr():
"""Ensure repr() works."""
r = repr(TokenKind.LITERAL)
eq_(r, 'TokenKind.LITERAL')
|
lgpl-2.1
|
tqtran7/horizon
|
openstack_dashboard/dashboards/project/networks/forms.py
|
41
|
2431
|
# Copyright 2012 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Copyright 2012 NEC Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
from django.core.urlresolvers import reverse
from django.utils.translation import ugettext_lazy as _
from horizon import exceptions
from horizon import forms
from horizon import messages
from openstack_dashboard import api
LOG = logging.getLogger(__name__)
class UpdateNetwork(forms.SelfHandlingForm):
name = forms.CharField(label=_("Name"), required=False)
tenant_id = forms.CharField(widget=forms.HiddenInput)
network_id = forms.CharField(label=_("ID"),
widget=forms.TextInput(
attrs={'readonly': 'readonly'}))
admin_state = forms.ChoiceField(choices=[(True, _('UP')),
(False, _('DOWN'))],
required=False,
label=_("Admin State"))
failure_url = 'horizon:project:networks:index'
def handle(self, request, data):
try:
params = {'admin_state_up': (data['admin_state'] == 'True'),
'name': data['name']}
network = api.neutron.network_update(request,
data['network_id'],
**params)
msg = _('Network %s was successfully updated.') % data['name']
LOG.debug(msg)
messages.success(request, msg)
return network
except Exception:
msg = _('Failed to update network %s') % data['name']
LOG.info(msg)
redirect = reverse(self.failure_url)
exceptions.handle(request, msg, redirect=redirect)
|
apache-2.0
|
poulpito/Flexget
|
flexget/plugins/sites/limetorrents.py
|
6
|
3902
|
from __future__ import unicode_literals, division, absolute_import
from future.moves.urllib.parse import quote
from builtins import * # noqa pylint: disable=unused-import, redefined-builtin
import logging
from flexget import plugin
from flexget.entry import Entry
from flexget.event import event
from flexget.utils.requests import RequestException
from flexget.utils.soup import get_soup
from flexget.utils.search import torrent_availability
from flexget.utils.tools import parse_filesize
log = logging.getLogger('limetorrents')
class Limetorrents(object):
"""
Limetorrents search plugin.
"""
schema = {
'oneOf': [
{'type': 'boolean'},
{
'type': 'object',
'properties': {
'category': {'type': 'string', 'enum': ['all', 'anime', 'applications', 'games', 'movies', 'music',
'tv', 'other'], 'default': 'all'},
'order_by': {'type': 'string', 'enum': ['date', 'seeds'], 'default': 'date'}
},
'additionalProperties': False
}
]
}
base_url = 'https://www.limetorrents.cc/'
errors = False
@plugin.internet(log)
def search(self, task, entry, config):
"""
Search for entries on Limetorrents
"""
if not isinstance(config, dict):
config = {'category': config}
order_by = ''
if isinstance(config.get('order_by'), str):
if config['order_by'] != 'date':
order_by = '{0}/1'.format(config['order_by'])
category = 'all'
if isinstance(config.get('category'), str):
category = '{0}'.format(config['category'])
entries = set()
for search_string in entry.get('search_strings', [entry['title']]):
query = 'search/{0}/{1}/{2}'.format(category, quote(search_string.encode('utf8')), order_by)
log.debug('Using search: %s; category: %s; ordering: %s', search_string, category, order_by or 'default')
try:
page = task.requests.get(self.base_url + query)
log.debug('requesting: %s', page.url)
except RequestException as e:
log.error('Limetorrents request failed: %s', e)
continue
soup = get_soup(page.content)
if soup.find('a', attrs={'class': 'csprite_dl14'}) is not None:
for link in soup.findAll('a', attrs={'class': 'csprite_dl14'}):
row = link.find_parent('tr')
info_url = str(link.get('href'))
# Get the title from the URL as it's complete versus the actual Title text which gets cut off
title = str(link.next_sibling.get('href'))
title = title[:title.rfind('-torrent')].replace('-', ' ')
title = title[1:]
data = row.findAll('td', attrs={'class': 'tdnormal'})
size = str(data[1].text).replace(',', '')
seeds = int(row.find('td', attrs={'class': 'tdseed'}).text.replace(',', ''))
leeches = int(row.find('td', attrs={'class': 'tdleech'}).text.replace(',', ''))
size = parse_filesize(size)
e = Entry()
e['url'] = info_url
e['title'] = title
e['torrent_seeds'] = seeds
e['torrent_leeches'] = leeches
e['search_sort'] = torrent_availability(e['torrent_seeds'], e['torrent_leeches'])
e['content_size'] = size
entries.add(e)
return entries
@event('plugin.register')
def register_plugin():
plugin.register(Limetorrents, 'limetorrents', interfaces=['search'], api_ver=2)
|
mit
|
nycholas/ask-undrgz
|
src/ask-undrgz/django/utils/regex_helper.py
|
361
|
12079
|
"""
Functions for reversing a regular expression (used in reverse URL resolving).
Used internally by Django and not intended for external use.
This is not, and is not intended to be, a complete reg-exp decompiler. It
should be good enough for a large class of URLS, however.
"""
# Mapping of an escape character to a representative of that class. So, e.g.,
# "\w" is replaced by "x" in a reverse URL. A value of None means to ignore
# this sequence. Any missing key is mapped to itself.
ESCAPE_MAPPINGS = {
"A": None,
"b": None,
"B": None,
"d": u"0",
"D": u"x",
"s": u" ",
"S": u"x",
"w": u"x",
"W": u"!",
"Z": None,
}
class Choice(list):
"""
Used to represent multiple possibilities at this point in a pattern string.
We use a distinguished type, rather than a list, so that the usage in the
code is clear.
"""
class Group(list):
"""
Used to represent a capturing group in the pattern string.
"""
class NonCapture(list):
"""
Used to represent a non-capturing group in the pattern string.
"""
def normalize(pattern):
"""
Given a reg-exp pattern, normalizes it to a list of forms that suffice for
reverse matching. This does the following:
(1) For any repeating sections, keeps the minimum number of occurrences
permitted (this means zero for optional groups).
(2) If an optional group includes parameters, include one occurrence of
that group (along with the zero occurrence case from step (1)).
(3) Select the first (essentially an arbitrary) element from any character
class. Select an arbitrary character for any unordered class (e.g. '.'
or '\w') in the pattern.
(5) Ignore comments and any of the reg-exp flags that won't change
what we construct ("iLmsu"). "(?x)" is an error, however.
(6) Raise an error on all other non-capturing (?...) forms (e.g.
look-ahead and look-behind matches) and any disjunctive ('|')
constructs.
Django's URLs for forward resolving are either all positional arguments or
all keyword arguments. That is assumed here, as well. Although reverse
resolving can be done using positional args when keyword args are
specified, the two cannot be mixed in the same reverse() call.
"""
# Do a linear scan to work out the special features of this pattern. The
# idea is that we scan once here and collect all the information we need to
# make future decisions.
result = []
non_capturing_groups = []
consume_next = True
pattern_iter = next_char(iter(pattern))
num_args = 0
# A "while" loop is used here because later on we need to be able to peek
# at the next character and possibly go around without consuming another
# one at the top of the loop.
try:
ch, escaped = pattern_iter.next()
except StopIteration:
return zip([u''], [[]])
try:
while True:
if escaped:
result.append(ch)
elif ch == '.':
# Replace "any character" with an arbitrary representative.
result.append(u".")
elif ch == '|':
# FIXME: One day we'll should do this, but not in 1.0.
raise NotImplementedError
elif ch == "^":
pass
elif ch == '$':
break
elif ch == ')':
# This can only be the end of a non-capturing group, since all
# other unescaped parentheses are handled by the grouping
# section later (and the full group is handled there).
#
# We regroup everything inside the capturing group so that it
# can be quantified, if necessary.
start = non_capturing_groups.pop()
inner = NonCapture(result[start:])
result = result[:start] + [inner]
elif ch == '[':
# Replace ranges with the first character in the range.
ch, escaped = pattern_iter.next()
result.append(ch)
ch, escaped = pattern_iter.next()
while escaped or ch != ']':
ch, escaped = pattern_iter.next()
elif ch == '(':
# Some kind of group.
ch, escaped = pattern_iter.next()
if ch != '?' or escaped:
# A positional group
name = "_%d" % num_args
num_args += 1
result.append(Group(((u"%%(%s)s" % name), name)))
walk_to_end(ch, pattern_iter)
else:
ch, escaped = pattern_iter.next()
if ch in "iLmsu#":
# All of these are ignorable. Walk to the end of the
# group.
walk_to_end(ch, pattern_iter)
elif ch == ':':
# Non-capturing group
non_capturing_groups.append(len(result))
elif ch != 'P':
# Anything else, other than a named group, is something
# we cannot reverse.
raise ValueError("Non-reversible reg-exp portion: '(?%s'" % ch)
else:
ch, escaped = pattern_iter.next()
if ch != '<':
raise ValueError("Non-reversible reg-exp portion: '(?P%s'" % ch)
# We are in a named capturing group. Extra the name and
# then skip to the end.
name = []
ch, escaped = pattern_iter.next()
while ch != '>':
name.append(ch)
ch, escaped = pattern_iter.next()
param = ''.join(name)
result.append(Group(((u"%%(%s)s" % param), param)))
walk_to_end(ch, pattern_iter)
elif ch in "*?+{":
# Quanitifers affect the previous item in the result list.
count, ch = get_quantifier(ch, pattern_iter)
if ch:
# We had to look ahead, but it wasn't need to compute the
# quanitifer, so use this character next time around the
# main loop.
consume_next = False
if count == 0:
if contains(result[-1], Group):
# If we are quantifying a capturing group (or
# something containing such a group) and the minimum is
# zero, we must also handle the case of one occurrence
# being present. All the quantifiers (except {0,0},
# which we conveniently ignore) that have a 0 minimum
# also allow a single occurrence.
result[-1] = Choice([None, result[-1]])
else:
result.pop()
elif count > 1:
result.extend([result[-1]] * (count - 1))
else:
# Anything else is a literal.
result.append(ch)
if consume_next:
ch, escaped = pattern_iter.next()
else:
consume_next = True
except StopIteration:
pass
except NotImplementedError:
# A case of using the disjunctive form. No results for you!
return zip([u''], [[]])
return zip(*flatten_result(result))
def next_char(input_iter):
"""
An iterator that yields the next character from "pattern_iter", respecting
escape sequences. An escaped character is replaced by a representative of
its class (e.g. \w -> "x"). If the escaped character is one that is
skipped, it is not returned (the next character is returned instead).
Yields the next character, along with a boolean indicating whether it is a
raw (unescaped) character or not.
"""
for ch in input_iter:
if ch != '\\':
yield ch, False
continue
ch = input_iter.next()
representative = ESCAPE_MAPPINGS.get(ch, ch)
if representative is None:
continue
yield representative, True
def walk_to_end(ch, input_iter):
"""
The iterator is currently inside a capturing group. We want to walk to the
close of this group, skipping over any nested groups and handling escaped
parentheses correctly.
"""
if ch == '(':
nesting = 1
else:
nesting = 0
for ch, escaped in input_iter:
if escaped:
continue
elif ch == '(':
nesting += 1
elif ch == ')':
if not nesting:
return
nesting -= 1
def get_quantifier(ch, input_iter):
"""
Parse a quantifier from the input, where "ch" is the first character in the
quantifier.
Returns the minimum number of occurences permitted by the quantifier and
either None or the next character from the input_iter if the next character
is not part of the quantifier.
"""
if ch in '*?+':
try:
ch2, escaped = input_iter.next()
except StopIteration:
ch2 = None
if ch2 == '?':
ch2 = None
if ch == '+':
return 1, ch2
return 0, ch2
quant = []
while ch != '}':
ch, escaped = input_iter.next()
quant.append(ch)
quant = quant[:-1]
values = ''.join(quant).split(',')
# Consume the trailing '?', if necessary.
try:
ch, escaped = input_iter.next()
except StopIteration:
ch = None
if ch == '?':
ch = None
return int(values[0]), ch
def contains(source, inst):
"""
Returns True if the "source" contains an instance of "inst". False,
otherwise.
"""
if isinstance(source, inst):
return True
if isinstance(source, NonCapture):
for elt in source:
if contains(elt, inst):
return True
return False
def flatten_result(source):
"""
Turns the given source sequence into a list of reg-exp possibilities and
their arguments. Returns a list of strings and a list of argument lists.
Each of the two lists will be of the same length.
"""
if source is None:
return [u''], [[]]
if isinstance(source, Group):
if source[1] is None:
params = []
else:
params = [source[1]]
return [source[0]], [params]
result = [u'']
result_args = [[]]
pos = last = 0
for pos, elt in enumerate(source):
if isinstance(elt, basestring):
continue
piece = u''.join(source[last:pos])
if isinstance(elt, Group):
piece += elt[0]
param = elt[1]
else:
param = None
last = pos + 1
for i in range(len(result)):
result[i] += piece
if param:
result_args[i].append(param)
if isinstance(elt, (Choice, NonCapture)):
if isinstance(elt, NonCapture):
elt = [elt]
inner_result, inner_args = [], []
for item in elt:
res, args = flatten_result(item)
inner_result.extend(res)
inner_args.extend(args)
new_result = []
new_args = []
for item, args in zip(result, result_args):
for i_item, i_args in zip(inner_result, inner_args):
new_result.append(item + i_item)
new_args.append(args[:] + i_args)
result = new_result
result_args = new_args
if pos >= last:
piece = u''.join(source[last:])
for i in range(len(result)):
result[i] += piece
return result, result_args
|
bsd-3-clause
|
landism/pants
|
tests/python/pants_test/cache/test_pinger.py
|
14
|
4804
|
# coding=utf-8
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import unittest
import urlparse
from requests import RequestException
from pants.cache.pinger import BestUrlSelector, InvalidRESTfulCacheProtoError, Pinger
from pants_test.base_test import BaseTest
from pants_test.cache.delay_server import setup_delayed_server
class TestPinger(BaseTest):
# NB(gmalmquist): The tests in this file pass locally, but are decorated with expectedFailure
# because CI is usually too slow to run them before they timeout.
resolution = 1
fast_delay_seconds = 0
fast_timeout_seconds = fast_delay_seconds + resolution
slow_delay_seconds = fast_timeout_seconds + resolution
slow_timeout_seconds = slow_delay_seconds + resolution
unreachable_delay_seconds = slow_timeout_seconds + 10 * resolution
message = "This test may fail occasionally if the CPU is very busy."
def setUp(self):
unreachable = setup_delayed_server(self.unreachable_delay_seconds)
slow = setup_delayed_server(self.slow_delay_seconds)
fast = setup_delayed_server(self.fast_delay_seconds)
self.servers = [unreachable, slow, fast]
self.fast_netloc = 'http://localhost:{}'.format(fast.socket.getsockname()[1])
self.slow_netloc = 'http://localhost:{}'.format(slow.socket.getsockname()[1])
self.unreachable_netloc = 'http://localhost:{}'.format(unreachable.socket.getsockname()[1])
self.https_external_netlock = 'https://github.com'
@unittest.expectedFailure
def test_pinger_times_correct(self):
test = Pinger(timeout=self.slow_timeout_seconds, tries=2)
netlocs = [self.fast_netloc, self.slow_netloc, self.unreachable_netloc]
ping_results = dict(test.pings(netlocs))
self.assertNotEqual(ping_results[self.slow_netloc], Pinger.UNREACHABLE)
self.assertLess(ping_results[self.fast_netloc], ping_results[self.slow_netloc])
self.assertEqual(ping_results[self.unreachable_netloc], Pinger.UNREACHABLE, msg=self.message)
@unittest.expectedFailure
def test_pinger_timeout_config(self):
test = Pinger(timeout=self.fast_timeout_seconds, tries=2)
netlocs = [self.fast_netloc, self.slow_netloc]
ping_results = dict(test.pings(netlocs))
self.assertLess(ping_results[self.fast_netloc], self.fast_timeout_seconds)
self.assertEqual(
ping_results[self.slow_netloc], Pinger.UNREACHABLE, msg=self.message)
@unittest.expectedFailure
def test_global_pinger_memo(self):
fast_pinger = Pinger(timeout=self.fast_timeout_seconds, tries=2)
slow_pinger = Pinger(timeout=self.slow_timeout_seconds, tries=2)
self.assertEqual(
fast_pinger.pings([self.slow_netloc])[0][1], Pinger.UNREACHABLE, msg=self.message)
self.assertNotEqual(
slow_pinger.pings([self.slow_netloc])[0][1], Pinger.UNREACHABLE, msg=self.message)
@unittest.expectedFailure
def test_https_external_pinger(self):
# NB(gmalmquist): I spent quite some time trying to spin up an HTTPS server and get it to work
# with this test, but it appears to be more trouble than it's worth. If you're feeling
# ambitious, feel free to give it a try.
pinger = Pinger(timeout=self.slow_delay_seconds, tries=2)
self.assertLess(pinger.ping(self.https_external_netlock), Pinger.UNREACHABLE)
def tearDown(self):
for server in self.servers:
server.shutdown()
class TestBestUrlSelector(BaseTest):
def setUp(self):
self.url1 = 'http://host1:123'
self.url2 = 'https://host2:456'
self.unsupported_url = 'ftp://ftpserver'
self.best_url_selector = BestUrlSelector([self.url1, self.url2], max_failures=1)
def call_url(self, expected_url, with_error=False):
try:
with self.best_url_selector.select_best_url() as url:
self.assertEquals(urlparse.urlparse(expected_url), url)
if with_error:
raise RequestException('error connecting to {}'.format(url))
except RequestException:
pass
def test_unsupported_protocol(self):
with self.assertRaises(InvalidRESTfulCacheProtoError):
BestUrlSelector([self.unsupported_url])
def test_select_next_url_after_max_consecutive_failures(self):
self.call_url(self.url1, with_error=True)
# A success call will reset the counter.
self.call_url(self.url1)
# Too many failures for url1, switch to url2.
self.call_url(self.url1, with_error=True)
self.call_url(self.url1, with_error=True)
self.call_url(self.url2)
# Too many failures for url2, switch to url1.
self.call_url(self.url2, with_error=True)
self.call_url(self.url2, with_error=True)
self.call_url(self.url1)
|
apache-2.0
|
parksandwildlife/wastd
|
wastd/observations/migrations/0017_auto_20200423_0940.py
|
1
|
1621
|
# Generated by Django 2.2.10 on 2020-04-23 01:40
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('observations', '0016_auto_20200305_1227'),
]
operations = [
migrations.AlterField(
model_name='observation',
name='encounter',
field=models.ForeignKey(help_text='The Encounter during which the observation was made', on_delete=django.db.models.deletion.CASCADE, related_name='encounter', to='observations.Encounter', verbose_name='Encounter'),
),
migrations.AlterField(
model_name='turtlehatchlingemergenceobservation',
name='light_sources_present',
field=models.CharField(choices=[('na', 'NA'), ('absent', 'Confirmed absent'), ('present', 'Confirmed present')], default='na', help_text='', max_length=300, verbose_name='Light sources present during emergence'),
),
migrations.AlterField(
model_name='turtlehatchlingemergenceobservation',
name='outlier_tracks_present',
field=models.CharField(choices=[('na', 'NA'), ('absent', 'Confirmed absent'), ('present', 'Confirmed present')], default='na', help_text='', max_length=300, verbose_name='Outlier tracks present'),
),
migrations.AlterField(
model_name='turtlehatchlingemergenceoutlierobservation',
name='outlier_group_size',
field=models.PositiveIntegerField(blank=True, help_text='', null=True, verbose_name='Number of tracks in outlier group'),
),
]
|
mit
|
todaychi/hue
|
desktop/core/ext-py/tablib-0.10.0/tablib/packages/odf/xforms.py
|
96
|
1231
|
# -*- coding: utf-8 -*-
# Copyright (C) 2006-2007 Søren Roug, European Environment Agency
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
# Contributor(s):
#
from namespaces import XFORMSNS
from element import Element
# ODF 1.0 section 11.2
# XForms is designed to be embedded in another XML format.
# Autogenerated
def Model(**args):
return Element(qname = (XFORMSNS,'model'), **args)
def Instance(**args):
return Element(qname = (XFORMSNS,'instance'), **args)
def Bind(**args):
return Element(qname = (XFORMSNS,'bind'), **args)
|
apache-2.0
|
nens/turn
|
setup.py
|
1
|
1473
|
from setuptools import setup
version = '1.0.1.dev0'
long_description = '\n\n'.join([
open('README.rst').read(),
open('CREDITS.rst').read(),
open('CHANGES.rst').read(),
])
install_requires = [
'redis>=2.10.5',
],
tests_require = ["flake8", "ipdb", "ipython", "pytest", "pytest-cov"]
setup(name='turn',
version=version,
description=('A shared-resource-locking '
'queue system using python and redis.'),
long_description=long_description,
keywords=['redis', 'queue', 'resource', 'shared'],
author='Arjan Verkerk',
author_email='[email protected]',
url='https://github.com/nens/turn',
license='GPL',
packages=['turn'],
include_package_data=True,
zip_safe=False,
install_requires=install_requires,
tests_require=tests_require,
extras_require={'test': tests_require},
classifiers = [
'Intended Audience :: Developers',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Topic :: Software Development :: Libraries :: Python Modules',
],
entry_points={
'console_scripts': [
'turn = turn.console:main',
]},
)
|
gpl-3.0
|
qedsoftware/commcare-hq
|
corehq/apps/users/tests/test_signals.py
|
1
|
4010
|
import uuid
from django.test import SimpleTestCase
from elasticsearch.exceptions import ConnectionError
from mock import patch, MagicMock
from corehq.apps.reports.analytics.esaccessors import get_user_stubs
from corehq.elastic import doc_exists_in_es, get_es_new
from corehq.pillows.mappings.user_mapping import USER_INDEX_INFO
from corehq.util.test_utils import trap_extra_setup, mock_out_couch
from dimagi.utils.couch.undo import DELETED_SUFFIX
from ..models import CommCareUser, WebUser
# Note that you can't directly patch the signal handler, as that code has
# already been called. It's easier to patch something that the handler calls.
# Also, you need to patch the path to the function in the file where the signal
# handler uses it, not where it's actually defined. That's quite a gotcha.
from pillowtop.es_utils import initialize_index_and_mapping
@mock_out_couch()
@patch('corehq.apps.users.models.CouchUser.sync_to_django_user', new=MagicMock)
class TestUserSignals(SimpleTestCase):
@patch('corehq.apps.analytics.signals.update_hubspot_properties')
@patch('corehq.apps.callcenter.signals.sync_call_center_user_case')
@patch('corehq.apps.cachehq.signals.invalidate_document')
@patch('corehq.apps.users.signals.send_to_elasticsearch')
def test_commcareuser_save(self, send_to_es, invalidate, sync_call_center,
update_hubspot_properties):
CommCareUser().save()
self.assertTrue(send_to_es.called)
self.assertTrue(invalidate.called)
self.assertTrue(sync_call_center.called)
self.assertFalse(update_hubspot_properties.called)
@patch('corehq.apps.analytics.signals.update_hubspot_properties')
@patch('corehq.apps.callcenter.signals.sync_call_center_user_case')
@patch('corehq.apps.cachehq.signals.invalidate_document')
@patch('corehq.apps.users.signals.send_to_elasticsearch')
def test_webuser_save(self, send_to_es, invalidate, sync_call_center,
update_hubspot_properties):
WebUser().save()
self.assertTrue(send_to_es.called)
self.assertTrue(invalidate.called)
self.assertFalse(sync_call_center.called)
self.assertTrue(update_hubspot_properties.called)
@mock_out_couch()
@patch('corehq.apps.users.models.CouchUser.sync_to_django_user', new=MagicMock)
@patch('corehq.apps.analytics.signals.update_hubspot_properties')
@patch('corehq.apps.callcenter.signals.sync_call_center_user_case')
@patch('corehq.apps.cachehq.signals.invalidate_document')
class TestUserSyncToEs(SimpleTestCase):
@classmethod
def setUpClass(cls):
super(TestUserSyncToEs, cls).setUpClass()
# create the index
cls.es = get_es_new()
with trap_extra_setup(ConnectionError):
initialize_index_and_mapping(cls.es, USER_INDEX_INFO)
def test_sync_to_es_create_update_delete(self, *mocks):
domain = 'user_es_domain'
user = CommCareUser(
domain=domain,
username='user1',
_id=uuid.uuid4().hex,
is_active=True,
first_name='user1 first name',
last_name='user1 last name',
)
user.save()
self.check_user(user)
user.first_name = 'new first name'
user.save()
self.check_user(user)
# simulate retire without needing couch
user.base_doc += DELETED_SUFFIX
user.save()
self.es.indices.refresh(USER_INDEX_INFO.index)
self.assertFalse(doc_exists_in_es(USER_INDEX_INFO, user._id))
def check_user(self, user):
self.es.indices.refresh(USER_INDEX_INFO.index)
results = get_user_stubs([user._id])
self.assertEquals(len(results), 1)
self.assertEquals(results[0], {
'_id': user._id,
'username': user.username,
'is_active': True,
'first_name': user.first_name,
'last_name': user.last_name,
'doc_type': user.doc_type,
})
|
bsd-3-clause
|
ActiveState/code
|
recipes/Python/528897_poker_dice/recipe-528897.py
|
1
|
2821
|
# just a simple game of poker dice, using 5 dice - with the
# computer throwing for you - and then you choose which
# dice to keep or reuse for the next throw
import os
def clear():
os.system("clear")
clear()
print
print " Poker dice game "
px = 2
while px == 2:
print
print " The compuuter will help you throw your 5 dice "
print
rand = range(1,7)
import random
dice = range(5)
y =0
while y < 5:
y = y + 1
dice[y-1] = random.choice(rand)
print " The throw gives the following result .. ", dice
for i in range(len(dice)):
print " dice position No.",i + 1,"\t"," .... throws ..",dice[i]
print
howmany = input('How many dice do you want to\
throw again\nto make full house, or five of a kind etc.....>>>> ')
print
print "Input the dice position number to remove the dice\
,\nand REMEMBER to press enter each time\n(except when you choose\
a complete re-throw)"
print
tt = 0
while tt < howmany:
tt = tt + 1
if howmany == 5:
break
yy = input (' ...>>> ')
if yy == 1 and tt == 1:
del dice[0]
if yy == 2 and tt == 1:
del dice[1]
if yy == 2 and tt == 2:
del dice[0]
if yy == 3 and tt == 1:
del dice[2]
if yy == 3 and tt == 2:
del dice[1]
if yy == 3 and tt == 3:
del dice[0]
if yy == 4 and tt == 1:
del dice[3]
if yy == 4 and tt == 2:
del dice[2]
if yy == 4 and tt ==3:
del dice[1]
if yy == 4 and tt == 4:
del dice[0]
if yy == 5 and tt == 1:
del dice[4]
if yy == 5 and tt == 2:
del dice[3]
if yy == 5 and tt == 3:
del dice[2]
if yy == 5 and tt == 4:
del dice[1]
if yy == 5 and tt == 5:
del dice[0]
if howmany < 5:
print "your first throw (i.e dice kept) ... ",dice
if howmany == 5:
print "dice kept = none"
dice2 = range(howmany)
y =0
while y < howmany:
y = y + 1
dice2[y-1] = random.choice(rand)
uu = 0
while uu < howmany:
uu = uu + 1
fff = dice2[uu-1]
dice.insert(0,fff)
print
if howmany < 5:
print "The new throw(s) give you ... ",dice2
print
if howmany < 5:
for i in range(len(dice)):
print " Dice position No.",i + 1,"(b)"," ...... ",dice[i]
print
if howmany == 5:
for i in range(len(dice2)):
print " Dice position No.",i + 1,"(b)"," ...... ",dice2[i]
print
again = raw_input("Do you want to play poker dice\
again,\nenter y for yes and n for no ... ")
if again == 'y':
px = 2
if again == 'n':
px = 0
print
print "finish"
|
mit
|
jank3/django
|
django/contrib/postgres/forms/ranges.py
|
393
|
3005
|
from psycopg2.extras import DateRange, DateTimeTZRange, NumericRange
from django import forms
from django.core import exceptions
from django.forms.widgets import MultiWidget
from django.utils.translation import ugettext_lazy as _
__all__ = ['IntegerRangeField', 'FloatRangeField', 'DateTimeRangeField', 'DateRangeField']
class BaseRangeField(forms.MultiValueField):
default_error_messages = {
'invalid': _('Enter two valid values.'),
'bound_ordering': _('The start of the range must not exceed the end of the range.'),
}
def __init__(self, **kwargs):
kwargs.setdefault('widget', RangeWidget(self.base_field.widget))
kwargs.setdefault('fields', [self.base_field(required=False), self.base_field(required=False)])
kwargs.setdefault('required', False)
kwargs.setdefault('require_all_fields', False)
super(BaseRangeField, self).__init__(**kwargs)
def prepare_value(self, value):
lower_base, upper_base = self.fields
if isinstance(value, self.range_type):
return [
lower_base.prepare_value(value.lower),
upper_base.prepare_value(value.upper),
]
if value is None:
return [
lower_base.prepare_value(None),
upper_base.prepare_value(None),
]
return value
def compress(self, values):
if not values:
return None
lower, upper = values
if lower is not None and upper is not None and lower > upper:
raise exceptions.ValidationError(
self.error_messages['bound_ordering'],
code='bound_ordering',
)
try:
range_value = self.range_type(lower, upper)
except TypeError:
raise exceptions.ValidationError(
self.error_messages['invalid'],
code='invalid',
)
else:
return range_value
class IntegerRangeField(BaseRangeField):
default_error_messages = {'invalid': _('Enter two whole numbers.')}
base_field = forms.IntegerField
range_type = NumericRange
class FloatRangeField(BaseRangeField):
default_error_messages = {'invalid': _('Enter two numbers.')}
base_field = forms.FloatField
range_type = NumericRange
class DateTimeRangeField(BaseRangeField):
default_error_messages = {'invalid': _('Enter two valid date/times.')}
base_field = forms.DateTimeField
range_type = DateTimeTZRange
class DateRangeField(BaseRangeField):
default_error_messages = {'invalid': _('Enter two valid dates.')}
base_field = forms.DateField
range_type = DateRange
class RangeWidget(MultiWidget):
def __init__(self, base_widget, attrs=None):
widgets = (base_widget, base_widget)
super(RangeWidget, self).__init__(widgets, attrs)
def decompress(self, value):
if value:
return (value.lower, value.upper)
return (None, None)
|
bsd-3-clause
|
kailIII/geraldo
|
site/newsite/django_1_0/tests/regressiontests/file_uploads/tests.py
|
9
|
8537
|
import os
import errno
import sha
import shutil
import unittest
from django.core.files import temp as tempfile
from django.core.files.uploadedfile import SimpleUploadedFile
from django.test import TestCase, client
from django.utils import simplejson
from models import FileModel, UPLOAD_ROOT, UPLOAD_TO
class FileUploadTests(TestCase):
def test_simple_upload(self):
post_data = {
'name': 'Ringo',
'file_field': open(__file__),
}
response = self.client.post('/file_uploads/upload/', post_data)
self.assertEqual(response.status_code, 200)
def test_large_upload(self):
tdir = tempfile.gettempdir()
file1 = tempfile.NamedTemporaryFile(suffix=".file1", dir=tdir)
file1.write('a' * (2 ** 21))
file1.seek(0)
file2 = tempfile.NamedTemporaryFile(suffix=".file2", dir=tdir)
file2.write('a' * (10 * 2 ** 20))
file2.seek(0)
# This file contains chinese symbols for a name.
file3 = open(os.path.join(tdir, u'test_中文_Orl\u00e9ans.jpg'.encode('utf-8')), 'w+b')
file3.write('b' * (2 ** 10))
file3.seek(0)
post_data = {
'name': 'Ringo',
'file_field1': open(file1.name),
'file_field2': open(file2.name),
'file_unicode': file3,
}
for key in post_data.keys():
try:
post_data[key + '_hash'] = sha.new(post_data[key].read()).hexdigest()
post_data[key].seek(0)
except AttributeError:
post_data[key + '_hash'] = sha.new(post_data[key]).hexdigest()
response = self.client.post('/file_uploads/verify/', post_data)
try:
os.unlink(file3.name)
except:
pass
self.assertEqual(response.status_code, 200)
def test_dangerous_file_names(self):
"""Uploaded file names should be sanitized before ever reaching the view."""
# This test simulates possible directory traversal attacks by a
# malicious uploader We have to do some monkeybusiness here to construct
# a malicious payload with an invalid file name (containing os.sep or
# os.pardir). This similar to what an attacker would need to do when
# trying such an attack.
scary_file_names = [
"/tmp/hax0rd.txt", # Absolute path, *nix-style.
"C:\\Windows\\hax0rd.txt", # Absolute path, win-syle.
"C:/Windows/hax0rd.txt", # Absolute path, broken-style.
"\\tmp\\hax0rd.txt", # Absolute path, broken in a different way.
"/tmp\\hax0rd.txt", # Absolute path, broken by mixing.
"subdir/hax0rd.txt", # Descendant path, *nix-style.
"subdir\\hax0rd.txt", # Descendant path, win-style.
"sub/dir\\hax0rd.txt", # Descendant path, mixed.
"../../hax0rd.txt", # Relative path, *nix-style.
"..\\..\\hax0rd.txt", # Relative path, win-style.
"../..\\hax0rd.txt" # Relative path, mixed.
]
payload = []
for i, name in enumerate(scary_file_names):
payload.extend([
'--' + client.BOUNDARY,
'Content-Disposition: form-data; name="file%s"; filename="%s"' % (i, name),
'Content-Type: application/octet-stream',
'',
'You got pwnd.'
])
payload.extend([
'--' + client.BOUNDARY + '--',
'',
])
payload = "\r\n".join(payload)
r = {
'CONTENT_LENGTH': len(payload),
'CONTENT_TYPE': client.MULTIPART_CONTENT,
'PATH_INFO': "/file_uploads/echo/",
'REQUEST_METHOD': 'POST',
'wsgi.input': client.FakePayload(payload),
}
response = self.client.request(**r)
# The filenames should have been sanitized by the time it got to the view.
recieved = simplejson.loads(response.content)
for i, name in enumerate(scary_file_names):
got = recieved["file%s" % i]
self.assertEqual(got, "hax0rd.txt")
def test_filename_overflow(self):
"""File names over 256 characters (dangerous on some platforms) get fixed up."""
name = "%s.txt" % ("f"*500)
payload = "\r\n".join([
'--' + client.BOUNDARY,
'Content-Disposition: form-data; name="file"; filename="%s"' % name,
'Content-Type: application/octet-stream',
'',
'Oops.'
'--' + client.BOUNDARY + '--',
'',
])
r = {
'CONTENT_LENGTH': len(payload),
'CONTENT_TYPE': client.MULTIPART_CONTENT,
'PATH_INFO': "/file_uploads/echo/",
'REQUEST_METHOD': 'POST',
'wsgi.input': client.FakePayload(payload),
}
got = simplejson.loads(self.client.request(**r).content)
self.assert_(len(got['file']) < 256, "Got a long file name (%s characters)." % len(got['file']))
def test_custom_upload_handler(self):
# A small file (under the 5M quota)
smallfile = tempfile.NamedTemporaryFile()
smallfile.write('a' * (2 ** 21))
# A big file (over the quota)
bigfile = tempfile.NamedTemporaryFile()
bigfile.write('a' * (10 * 2 ** 20))
# Small file posting should work.
response = self.client.post('/file_uploads/quota/', {'f': open(smallfile.name)})
got = simplejson.loads(response.content)
self.assert_('f' in got)
# Large files don't go through.
response = self.client.post("/file_uploads/quota/", {'f': open(bigfile.name)})
got = simplejson.loads(response.content)
self.assert_('f' not in got)
def test_broken_custom_upload_handler(self):
f = tempfile.NamedTemporaryFile()
f.write('a' * (2 ** 21))
# AttributeError: You cannot alter upload handlers after the upload has been processed.
self.assertRaises(
AttributeError,
self.client.post,
'/file_uploads/quota/broken/',
{'f': open(f.name)}
)
def test_fileupload_getlist(self):
file1 = tempfile.NamedTemporaryFile()
file1.write('a' * (2 ** 23))
file2 = tempfile.NamedTemporaryFile()
file2.write('a' * (2 * 2 ** 18))
file2a = tempfile.NamedTemporaryFile()
file2a.write('a' * (5 * 2 ** 20))
response = self.client.post('/file_uploads/getlist_count/', {
'file1': open(file1.name),
'field1': u'test',
'field2': u'test3',
'field3': u'test5',
'field4': u'test6',
'field5': u'test7',
'file2': (open(file2.name), open(file2a.name))
})
got = simplejson.loads(response.content)
self.assertEqual(got.get('file1'), 1)
self.assertEqual(got.get('file2'), 2)
class DirectoryCreationTests(unittest.TestCase):
"""
Tests for error handling during directory creation
via _save_FIELD_file (ticket #6450)
"""
def setUp(self):
self.obj = FileModel()
if not os.path.isdir(UPLOAD_ROOT):
os.makedirs(UPLOAD_ROOT)
def tearDown(self):
os.chmod(UPLOAD_ROOT, 0700)
shutil.rmtree(UPLOAD_ROOT)
def test_readonly_root(self):
"""Permission errors are not swallowed"""
os.chmod(UPLOAD_ROOT, 0500)
try:
self.obj.save_testfile_file('foo.txt', SimpleUploadedFile('foo.txt', 'x'))
except OSError, err:
self.assertEquals(err.errno, errno.EACCES)
except:
self.fail("OSError [Errno %s] not raised" % errno.EACCES)
def test_not_a_directory(self):
"""The correct IOError is raised when the upload directory name exists but isn't a directory"""
# Create a file with the upload directory name
fd = open(UPLOAD_TO, 'w')
fd.close()
try:
self.obj.save_testfile_file('foo.txt', SimpleUploadedFile('foo.txt', 'x'))
except IOError, err:
# The test needs to be done on a specific string as IOError
# is raised even without the patch (just not early enough)
self.assertEquals(err.args[0],
"%s exists and is not a directory" % UPLOAD_TO)
except:
self.fail("IOError not raised")
|
lgpl-3.0
|
Lh4cKg/sl4a
|
python/src/Lib/plat-mac/lib-scriptpackages/SystemEvents/Login_Items_Suite.py
|
82
|
1733
|
"""Suite Login Items Suite: Terms and Events for controlling the Login Items application
Level 1, version 1
Generated from /System/Library/CoreServices/System Events.app
AETE/AEUT resource version 1/0, language 0, script 0
"""
import aetools
import MacOS
_code = 'logi'
class Login_Items_Suite_Events:
pass
class login_item(aetools.ComponentItem):
"""login item - an item to be launched or opened at login """
want = 'logi'
class _Prop__3c_Inheritance_3e_(aetools.NProperty):
"""<Inheritance> - All of the properties of the superclass. """
which = 'c@#^'
want = 'cobj'
class _Prop_hidden(aetools.NProperty):
"""hidden - Is the Login Item hidden when launched? """
which = 'hidn'
want = 'bool'
class _Prop_kind(aetools.NProperty):
"""kind - the file type of the Login Item """
which = 'kind'
want = 'utxt'
class _Prop_name(aetools.NProperty):
"""name - the name of the Login Item """
which = 'pnam'
want = 'utxt'
class _Prop_path(aetools.NProperty):
"""path - the file system path to the Login Item """
which = 'ppth'
want = 'utxt'
login_items = login_item
import Standard_Suite
login_item._superclassnames = ['item']
login_item._privpropdict = {
'_3c_Inheritance_3e_' : _Prop__3c_Inheritance_3e_,
'hidden' : _Prop_hidden,
'kind' : _Prop_kind,
'name' : _Prop_name,
'path' : _Prop_path,
}
login_item._privelemdict = {
}
#
# Indices of types declared in this module
#
_classdeclarations = {
'logi' : login_item,
}
_propdeclarations = {
'c@#^' : _Prop__3c_Inheritance_3e_,
'hidn' : _Prop_hidden,
'kind' : _Prop_kind,
'pnam' : _Prop_name,
'ppth' : _Prop_path,
}
_compdeclarations = {
}
_enumdeclarations = {
}
|
apache-2.0
|
AgileInstitute/labs-cpp-gtest
|
SuperSet/gtest-1.7.0/test/gtest_uninitialized_test.py
|
2901
|
2480
|
#!/usr/bin/env python
#
# Copyright 2008, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Verifies that Google Test warns the user when not initialized properly."""
__author__ = '[email protected] (Zhanyong Wan)'
import gtest_test_utils
COMMAND = gtest_test_utils.GetTestExecutablePath('gtest_uninitialized_test_')
def Assert(condition):
if not condition:
raise AssertionError
def AssertEq(expected, actual):
if expected != actual:
print 'Expected: %s' % (expected,)
print ' Actual: %s' % (actual,)
raise AssertionError
def TestExitCodeAndOutput(command):
"""Runs the given command and verifies its exit code and output."""
# Verifies that 'command' exits with code 1.
p = gtest_test_utils.Subprocess(command)
Assert(p.exited)
AssertEq(1, p.exit_code)
Assert('InitGoogleTest' in p.output)
class GTestUninitializedTest(gtest_test_utils.TestCase):
def testExitCodeAndOutput(self):
TestExitCodeAndOutput(COMMAND)
if __name__ == '__main__':
gtest_test_utils.Main()
|
mit
|
saumishr/django
|
tests/regressiontests/localflavor/de/tests.py
|
33
|
1847
|
from django.contrib.localflavor.de.forms import (DEZipCodeField, DEStateSelect,
DEIdentityCardNumberField)
from django.test import SimpleTestCase
class DELocalFlavorTests(SimpleTestCase):
def test_DEStateSelect(self):
f = DEStateSelect()
out = u'''<select name="states">
<option value="BW">Baden-Wuerttemberg</option>
<option value="BY">Bavaria</option>
<option value="BE">Berlin</option>
<option value="BB">Brandenburg</option>
<option value="HB">Bremen</option>
<option value="HH">Hamburg</option>
<option value="HE">Hessen</option>
<option value="MV">Mecklenburg-Western Pomerania</option>
<option value="NI">Lower Saxony</option>
<option value="NW">North Rhine-Westphalia</option>
<option value="RP">Rhineland-Palatinate</option>
<option value="SL">Saarland</option>
<option value="SN">Saxony</option>
<option value="ST">Saxony-Anhalt</option>
<option value="SH">Schleswig-Holstein</option>
<option value="TH" selected="selected">Thuringia</option>
</select>'''
self.assertHTMLEqual(f.render('states', 'TH'), out)
def test_DEZipCodeField(self):
error_format = [u'Enter a zip code in the format XXXXX.']
valid = {
'99423': '99423',
}
invalid = {
' 99423': error_format,
}
self.assertFieldOutput(DEZipCodeField, valid, invalid)
def test_DEIdentityCardNumberField(self):
error_format = [u'Enter a valid German identity card number in XXXXXXXXXXX-XXXXXXX-XXXXXXX-X format.']
valid = {
'7549313035D-6004103-0903042-0': '7549313035D-6004103-0903042-0',
'9786324830D 6104243 0910271 2': '9786324830D-6104243-0910271-2',
}
invalid = {
'0434657485D-6407276-0508137-9': error_format,
}
self.assertFieldOutput(DEIdentityCardNumberField, valid, invalid)
|
bsd-3-clause
|
jeffery-do/Vizdoombot
|
doom/lib/python3.5/site-packages/scipy/special/tests/test_spence.py
|
60
|
1099
|
import numpy as np
from numpy import sqrt, log, pi
from scipy.special._testutils import FuncData
from scipy.special import spence
def test_consistency():
# Make sure the implementation of spence for real arguments
# agrees with the implementation of spence for imaginary arguments.
x = np.logspace(-30, 300, 200)
dataset = np.vstack((x + 0j, spence(x))).T
FuncData(spence, dataset, 0, 1, rtol=1e-14).check()
def test_special_points():
# Check against known values of Spence's function.
phi = (1 + sqrt(5))/2
dataset = [(1, 0),
(2, -pi**2/12),
(0.5, pi**2/12 - log(2)**2/2),
(0, pi**2/6),
(-1, pi**2/4 - 1j*pi*log(2)),
((-1 + sqrt(5))/2, pi**2/15 - log(phi)**2),
((3 - sqrt(5))/2, pi**2/10 - log(phi)**2),
(phi, -pi**2/15 + log(phi)**2/2),
# Corrected from Zagier, "The Dilogarithm Function"
((3 + sqrt(5))/2, -pi**2/10 - log(phi)**2)]
dataset = np.asarray(dataset)
FuncData(spence, dataset, 0, 1, rtol=1e-14).check()
|
mit
|
sudosurootdev/external_chromium_org
|
mojo/python/tests/bindings_serialization_deserialization_unittest.py
|
25
|
3603
|
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import math
import unittest
# pylint: disable=F0401
import mojo.bindings.reflection as reflection
import mojo.system
# Generated files
# pylint: disable=F0401
import sample_import_mojom
import sample_import2_mojom
import sample_service_mojom
def _NewHandle():
return mojo.system.MessagePipe().handle0
def _TestEquality(x, y):
if x == y:
return True
if type(x) != type(y):
print '\n%r != %r. Element are not of the same type.' % (x, y)
return False
if isinstance(x, float) and math.isnan(x) and math.isnan(y):
return True
if hasattr(x, '__len__'):
if len(x) != len(y):
print '\n%r != %r. Iterables are not of the same size.' % (x, y)
return False
for (x1, y1) in zip(x, y):
if not _TestEquality(x1, y1):
return False
return True
if (hasattr(x, '__metaclass__') and
x.__metaclass__ == reflection.MojoStructType):
properties = [p for p in dir(x) if not p.startswith('_')]
for p in properties:
p1 = getattr(x, p)
p2 = getattr(y, p)
if not hasattr(p1, '__call__') and not _TestEquality(p1, p2):
print '\n%r != %r. Not equal for property %r.' % (x, y, p)
return False
return True
return False
def _NewBar():
bar_instance = sample_service_mojom.Bar()
bar_instance.alpha = 22
bar_instance.beta = 87
bar_instance.gamma = 122
bar_instance.type = sample_service_mojom.Bar.Type.BOTH
return bar_instance
def _NewFoo():
foo_instance = sample_service_mojom.Foo()
foo_instance.name = "Foo.name"
foo_instance.x = 23
foo_instance.y = -23
foo_instance.a = False
foo_instance.b = True
foo_instance.c = True
foo_instance.bar = _NewBar()
foo_instance.extra_bars = [
_NewBar(),
_NewBar(),
]
foo_instance.data = 'Hello world'
foo_instance.source = _NewHandle()
foo_instance.input_streams = [ _NewHandle() ]
foo_instance.output_streams = [ _NewHandle(), _NewHandle() ]
foo_instance.array_of_array_of_bools = [ [ True, False ], [] ]
foo_instance.multi_array_of_strings = [
[
[ "1", "2" ],
[],
[ "3", "4" ],
],
[],
]
foo_instance.array_of_bools = [ True, 0, 1, 2, 0, 0, 0, 0, 0, True ]
return foo_instance
class SerializationDeserializationTest(unittest.TestCase):
def testTestEquality(self):
self.assertFalse(_TestEquality(1, 2))
def testFooSerialization(self):
(data, _) = _NewFoo().Serialize()
self.assertTrue(len(data))
self.assertEquals(len(data) % 8, 0)
def testFooDeserialization(self):
(data, handles) = _NewFoo().Serialize()
self.assertTrue(
sample_service_mojom.Foo.Deserialize(data, handles))
def testFooSerializationDeserialization(self):
foo1 = _NewFoo()
(data, handles) = foo1.Serialize()
foo2 = sample_service_mojom.Foo.Deserialize(data, handles)
self.assertTrue(_TestEquality(foo1, foo2))
def testDefaultsTestSerializationDeserialization(self):
v1 = sample_service_mojom.DefaultsTest()
v1.a18 = []
v1.a19 = ""
v1.a21 = sample_import_mojom.Point()
v1.a22.location = sample_import_mojom.Point()
v1.a22.size = sample_import2_mojom.Size()
(data, handles) = v1.Serialize()
v2 = sample_service_mojom.DefaultsTest.Deserialize(data, handles)
self.assertTrue(_TestEquality(v1, v2))
def testFooDeserializationError(self):
with self.assertRaises(Exception):
sample_service_mojom.Foo.Deserialize("", [])
|
bsd-3-clause
|
nanobox-io/nanobox-pkgsrc-base
|
nodejs012/patches/patch-tools_gyp_pylib_gyp_generator_make.py
|
2
|
1181
|
$NetBSD: patch-tools_gyp_pylib_gyp_generator_make.py,v 1.3 2013/12/12 11:52:37 jperkin Exp $
Add support for NetBSD and DragonFly.
Ensure we use the system libtool on OSX.
--- tools/gyp/pylib/gyp/generator/make.py.orig 2015-02-06 20:04:23.000000000 +0000
+++ tools/gyp/pylib/gyp/generator/make.py
@@ -168,7 +168,7 @@ cmd_solink_module = $(LINK.$(TOOLSET)) -
LINK_COMMANDS_MAC = """\
quiet_cmd_alink = LIBTOOL-STATIC $@
-cmd_alink = rm -f $@ && ./gyp-mac-tool filter-libtool libtool $(GYP_LIBTOOLFLAGS) -static -o $@ $(filter %.o,$^)
+cmd_alink = rm -f $@ && ./gyp-mac-tool filter-libtool /usr/bin/libtool $(GYP_LIBTOOLFLAGS) -static -o $@ $(filter %.o,$^)
quiet_cmd_link = LINK($(TOOLSET)) $@
cmd_link = $(LINK.$(TOOLSET)) $(GYP_LDFLAGS) $(LDFLAGS.$(TOOLSET)) -o "$@" $(LD_INPUTS) $(LIBS)
@@ -2008,7 +2008,7 @@ def GenerateOutput(target_list, target_d
'flock': './gyp-flock-tool flock',
'flock_index': 2,
})
- elif flavor == 'freebsd':
+ elif flavor == 'freebsd' or flavor == 'dragonflybsd' or flavor == 'netbsd':
# Note: OpenBSD has sysutils/flock. lockf seems to be FreeBSD specific.
header_params.update({
'flock': 'lockf',
|
mit
|
NiceCircuits/pcbLibraryManager
|
src/pcbLibraryManager/libraryManager/common.py
|
1
|
3365
|
# -*- coding: utf-8 -*-
"""
Created on Tue Jul 14 23:12:28 2015
@author: piotr at nicecircuits.com
"""
import datetime
import math
def timestamp():
return datetime.datetime.now().strftime("%Y%m%d%H%M%S")
def rectangleToPolyLinePoints(position, dimensions, rotation=0.0):
# start with unit rectangle
points = [[-0.5, -0.5], [-0.5, 0.5], [0.5, 0.5], [0.5, -0.5], [-0.5, -0.5]]
return translatePoints(scalePoints(rotatePoints(points, rotation), dimensions), position)
def rectangleCorners(position, dimensions):
return [[position[0]-dimensions[0]/2, position[1] - dimensions[1]/2],\
[position[0]+dimensions[0]/2, position[1] + dimensions[1]/2],]
def polylinePointsToLines(points):
return [[points[i], points[i+1]] for i in range(len(points)-1)]
def scalePoint(point, scale):
return scalePoints([point],scale)[0]
def scalePoints(points, scale):
if not type(scale) is list:
scale = [scale, scale]
return [[x*scale[0], y*scale[1]] for x,y in points]
def rotatePoint(point, rotation):
return rotatePoints([point], rotation)[0]
def rotatePoints(points, rotation):
ret=[]
for p in points:
if p:
if len(p)==3:
(x,y,z)=p
p3d=True
else:
x,y=p
p3d=False
rho, phi = cart2pol(x, y)
phi = phi + rotation
x, y = pol2cart(rho, phi)
if p3d:
ret.append([x,y,z])
else:
ret.append([x,y])
else:
ret.append(p)
return ret
def translatePoint(point, translation):
if point:
return translatePoints([point], translation)[0]
else:
return point
def translatePoints(points, translation):
if len(points[0])==2:
return [[x+translation[0], y+translation[1]] for x,y in points]
elif len(points[0])==3:
if len(translation)==2:
translation.append(0)
return [[x+translation[0], y+translation[1], z+translation[2]] for x,y,z in points]
else:
raise ValueError("invalid points")
def mirrorPoint(point, axis):
if axis=="X":
point[0]=-point[0]
elif axis=="Y":
point[1]=-point[1]
else:
raise ValueError("unsupported mirror axis %s" % axis)
def mirrorPoints(points, axis):
return [mirrorPoint(p, axis) for p in points]
def cos(x):
return math.cos(math.radians(x))
def sin(x):
return math.sin(math.radians(x))
def almostEqual(a, b, thr=0.001):
if (a > b-thr) and (a < b+thr):
return True
else:
return False
def cart2pol(x, y):
rho = math.sqrt(x**2 + y**2)
phi = math.degrees(math.atan2(y, x))
return(rho, phi)
def pol2cart(rho, phi):
x = rho * math.cos(math.radians(phi))
y = rho * math.sin(math.radians(phi))
return(x, y)
def mil(x):
"Convert mil to mm"
return x*0.0254
def mm2inch(x):
"Convert mm to inch"
return x/25.4
def isIterable(x):
return hasattr(x, '__iter__')
def isArray(x):
return not hasattr(x, "strip") and \
(hasattr(x, "__getitem__") or hasattr(x, "__iter__"))
class textAlign:
"""
"""
center, centerLeft, centerRight, topCenter, topLeft, topRight,\
bottomCenter, bottomLeft, bottomRight = range(9)
if __name__ == "__main__":
print(timestamp())
|
cc0-1.0
|
johnkit/vtk-dev
|
ThirdParty/Twisted/twisted/copyright.py
|
23
|
1466
|
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Copyright information for Twisted.
"""
from __future__ import division, absolute_import
from twisted import __version__ as version, version as longversion
longversion = str(longversion)
copyright="""\
Copyright (c) 2001-2014 Twisted Matrix Laboratories.
See LICENSE for details."""
disclaimer='''
Twisted, the Framework of Your Internet
%s
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
"Software"), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
''' % (copyright,)
|
bsd-3-clause
|
2014c2g5/2014cadp
|
wsgi/static/Brython2.2.0rc0-20140913-093500/Lib/unittest/test/testmock/testhelpers.py
|
737
|
25793
|
import unittest
from unittest.mock import (
call, _Call, create_autospec, MagicMock,
Mock, ANY, _CallList, patch, PropertyMock
)
from datetime import datetime
class SomeClass(object):
def one(self, a, b):
pass
def two(self):
pass
def three(self, a=None):
pass
class AnyTest(unittest.TestCase):
def test_any(self):
self.assertEqual(ANY, object())
mock = Mock()
mock(ANY)
mock.assert_called_with(ANY)
mock = Mock()
mock(foo=ANY)
mock.assert_called_with(foo=ANY)
def test_repr(self):
self.assertEqual(repr(ANY), '<ANY>')
self.assertEqual(str(ANY), '<ANY>')
def test_any_and_datetime(self):
mock = Mock()
mock(datetime.now(), foo=datetime.now())
mock.assert_called_with(ANY, foo=ANY)
def test_any_mock_calls_comparison_order(self):
mock = Mock()
d = datetime.now()
class Foo(object):
def __eq__(self, other):
return False
def __ne__(self, other):
return True
for d in datetime.now(), Foo():
mock.reset_mock()
mock(d, foo=d, bar=d)
mock.method(d, zinga=d, alpha=d)
mock().method(a1=d, z99=d)
expected = [
call(ANY, foo=ANY, bar=ANY),
call.method(ANY, zinga=ANY, alpha=ANY),
call(), call().method(a1=ANY, z99=ANY)
]
self.assertEqual(expected, mock.mock_calls)
self.assertEqual(mock.mock_calls, expected)
class CallTest(unittest.TestCase):
def test_call_with_call(self):
kall = _Call()
self.assertEqual(kall, _Call())
self.assertEqual(kall, _Call(('',)))
self.assertEqual(kall, _Call(((),)))
self.assertEqual(kall, _Call(({},)))
self.assertEqual(kall, _Call(('', ())))
self.assertEqual(kall, _Call(('', {})))
self.assertEqual(kall, _Call(('', (), {})))
self.assertEqual(kall, _Call(('foo',)))
self.assertEqual(kall, _Call(('bar', ())))
self.assertEqual(kall, _Call(('baz', {})))
self.assertEqual(kall, _Call(('spam', (), {})))
kall = _Call(((1, 2, 3),))
self.assertEqual(kall, _Call(((1, 2, 3),)))
self.assertEqual(kall, _Call(('', (1, 2, 3))))
self.assertEqual(kall, _Call(((1, 2, 3), {})))
self.assertEqual(kall, _Call(('', (1, 2, 3), {})))
kall = _Call(((1, 2, 4),))
self.assertNotEqual(kall, _Call(('', (1, 2, 3))))
self.assertNotEqual(kall, _Call(('', (1, 2, 3), {})))
kall = _Call(('foo', (1, 2, 4),))
self.assertNotEqual(kall, _Call(('', (1, 2, 4))))
self.assertNotEqual(kall, _Call(('', (1, 2, 4), {})))
self.assertNotEqual(kall, _Call(('bar', (1, 2, 4))))
self.assertNotEqual(kall, _Call(('bar', (1, 2, 4), {})))
kall = _Call(({'a': 3},))
self.assertEqual(kall, _Call(('', (), {'a': 3})))
self.assertEqual(kall, _Call(('', {'a': 3})))
self.assertEqual(kall, _Call(((), {'a': 3})))
self.assertEqual(kall, _Call(({'a': 3},)))
def test_empty__Call(self):
args = _Call()
self.assertEqual(args, ())
self.assertEqual(args, ('foo',))
self.assertEqual(args, ((),))
self.assertEqual(args, ('foo', ()))
self.assertEqual(args, ('foo',(), {}))
self.assertEqual(args, ('foo', {}))
self.assertEqual(args, ({},))
def test_named_empty_call(self):
args = _Call(('foo', (), {}))
self.assertEqual(args, ('foo',))
self.assertEqual(args, ('foo', ()))
self.assertEqual(args, ('foo',(), {}))
self.assertEqual(args, ('foo', {}))
self.assertNotEqual(args, ((),))
self.assertNotEqual(args, ())
self.assertNotEqual(args, ({},))
self.assertNotEqual(args, ('bar',))
self.assertNotEqual(args, ('bar', ()))
self.assertNotEqual(args, ('bar', {}))
def test_call_with_args(self):
args = _Call(((1, 2, 3), {}))
self.assertEqual(args, ((1, 2, 3),))
self.assertEqual(args, ('foo', (1, 2, 3)))
self.assertEqual(args, ('foo', (1, 2, 3), {}))
self.assertEqual(args, ((1, 2, 3), {}))
def test_named_call_with_args(self):
args = _Call(('foo', (1, 2, 3), {}))
self.assertEqual(args, ('foo', (1, 2, 3)))
self.assertEqual(args, ('foo', (1, 2, 3), {}))
self.assertNotEqual(args, ((1, 2, 3),))
self.assertNotEqual(args, ((1, 2, 3), {}))
def test_call_with_kwargs(self):
args = _Call(((), dict(a=3, b=4)))
self.assertEqual(args, (dict(a=3, b=4),))
self.assertEqual(args, ('foo', dict(a=3, b=4)))
self.assertEqual(args, ('foo', (), dict(a=3, b=4)))
self.assertEqual(args, ((), dict(a=3, b=4)))
def test_named_call_with_kwargs(self):
args = _Call(('foo', (), dict(a=3, b=4)))
self.assertEqual(args, ('foo', dict(a=3, b=4)))
self.assertEqual(args, ('foo', (), dict(a=3, b=4)))
self.assertNotEqual(args, (dict(a=3, b=4),))
self.assertNotEqual(args, ((), dict(a=3, b=4)))
def test_call_with_args_call_empty_name(self):
args = _Call(((1, 2, 3), {}))
self.assertEqual(args, call(1, 2, 3))
self.assertEqual(call(1, 2, 3), args)
self.assertTrue(call(1, 2, 3) in [args])
def test_call_ne(self):
self.assertNotEqual(_Call(((1, 2, 3),)), call(1, 2))
self.assertFalse(_Call(((1, 2, 3),)) != call(1, 2, 3))
self.assertTrue(_Call(((1, 2), {})) != call(1, 2, 3))
def test_call_non_tuples(self):
kall = _Call(((1, 2, 3),))
for value in 1, None, self, int:
self.assertNotEqual(kall, value)
self.assertFalse(kall == value)
def test_repr(self):
self.assertEqual(repr(_Call()), 'call()')
self.assertEqual(repr(_Call(('foo',))), 'call.foo()')
self.assertEqual(repr(_Call(((1, 2, 3), {'a': 'b'}))),
"call(1, 2, 3, a='b')")
self.assertEqual(repr(_Call(('bar', (1, 2, 3), {'a': 'b'}))),
"call.bar(1, 2, 3, a='b')")
self.assertEqual(repr(call), 'call')
self.assertEqual(str(call), 'call')
self.assertEqual(repr(call()), 'call()')
self.assertEqual(repr(call(1)), 'call(1)')
self.assertEqual(repr(call(zz='thing')), "call(zz='thing')")
self.assertEqual(repr(call().foo), 'call().foo')
self.assertEqual(repr(call(1).foo.bar(a=3).bing),
'call().foo.bar().bing')
self.assertEqual(
repr(call().foo(1, 2, a=3)),
"call().foo(1, 2, a=3)"
)
self.assertEqual(repr(call()()), "call()()")
self.assertEqual(repr(call(1)(2)), "call()(2)")
self.assertEqual(
repr(call()().bar().baz.beep(1)),
"call()().bar().baz.beep(1)"
)
def test_call(self):
self.assertEqual(call(), ('', (), {}))
self.assertEqual(call('foo', 'bar', one=3, two=4),
('', ('foo', 'bar'), {'one': 3, 'two': 4}))
mock = Mock()
mock(1, 2, 3)
mock(a=3, b=6)
self.assertEqual(mock.call_args_list,
[call(1, 2, 3), call(a=3, b=6)])
def test_attribute_call(self):
self.assertEqual(call.foo(1), ('foo', (1,), {}))
self.assertEqual(call.bar.baz(fish='eggs'),
('bar.baz', (), {'fish': 'eggs'}))
mock = Mock()
mock.foo(1, 2 ,3)
mock.bar.baz(a=3, b=6)
self.assertEqual(mock.method_calls,
[call.foo(1, 2, 3), call.bar.baz(a=3, b=6)])
def test_extended_call(self):
result = call(1).foo(2).bar(3, a=4)
self.assertEqual(result, ('().foo().bar', (3,), dict(a=4)))
mock = MagicMock()
mock(1, 2, a=3, b=4)
self.assertEqual(mock.call_args, call(1, 2, a=3, b=4))
self.assertNotEqual(mock.call_args, call(1, 2, 3))
self.assertEqual(mock.call_args_list, [call(1, 2, a=3, b=4)])
self.assertEqual(mock.mock_calls, [call(1, 2, a=3, b=4)])
mock = MagicMock()
mock.foo(1).bar()().baz.beep(a=6)
last_call = call.foo(1).bar()().baz.beep(a=6)
self.assertEqual(mock.mock_calls[-1], last_call)
self.assertEqual(mock.mock_calls, last_call.call_list())
def test_call_list(self):
mock = MagicMock()
mock(1)
self.assertEqual(call(1).call_list(), mock.mock_calls)
mock = MagicMock()
mock(1).method(2)
self.assertEqual(call(1).method(2).call_list(),
mock.mock_calls)
mock = MagicMock()
mock(1).method(2)(3)
self.assertEqual(call(1).method(2)(3).call_list(),
mock.mock_calls)
mock = MagicMock()
int(mock(1).method(2)(3).foo.bar.baz(4)(5))
kall = call(1).method(2)(3).foo.bar.baz(4)(5).__int__()
self.assertEqual(kall.call_list(), mock.mock_calls)
def test_call_any(self):
self.assertEqual(call, ANY)
m = MagicMock()
int(m)
self.assertEqual(m.mock_calls, [ANY])
self.assertEqual([ANY], m.mock_calls)
def test_two_args_call(self):
args = _Call(((1, 2), {'a': 3}), two=True)
self.assertEqual(len(args), 2)
self.assertEqual(args[0], (1, 2))
self.assertEqual(args[1], {'a': 3})
other_args = _Call(((1, 2), {'a': 3}))
self.assertEqual(args, other_args)
class SpecSignatureTest(unittest.TestCase):
def _check_someclass_mock(self, mock):
self.assertRaises(AttributeError, getattr, mock, 'foo')
mock.one(1, 2)
mock.one.assert_called_with(1, 2)
self.assertRaises(AssertionError,
mock.one.assert_called_with, 3, 4)
self.assertRaises(TypeError, mock.one, 1)
mock.two()
mock.two.assert_called_with()
self.assertRaises(AssertionError,
mock.two.assert_called_with, 3)
self.assertRaises(TypeError, mock.two, 1)
mock.three()
mock.three.assert_called_with()
self.assertRaises(AssertionError,
mock.three.assert_called_with, 3)
self.assertRaises(TypeError, mock.three, 3, 2)
mock.three(1)
mock.three.assert_called_with(1)
mock.three(a=1)
mock.three.assert_called_with(a=1)
def test_basic(self):
for spec in (SomeClass, SomeClass()):
mock = create_autospec(spec)
self._check_someclass_mock(mock)
def test_create_autospec_return_value(self):
def f():
pass
mock = create_autospec(f, return_value='foo')
self.assertEqual(mock(), 'foo')
class Foo(object):
pass
mock = create_autospec(Foo, return_value='foo')
self.assertEqual(mock(), 'foo')
def test_autospec_reset_mock(self):
m = create_autospec(int)
int(m)
m.reset_mock()
self.assertEqual(m.__int__.call_count, 0)
def test_mocking_unbound_methods(self):
class Foo(object):
def foo(self, foo):
pass
p = patch.object(Foo, 'foo')
mock_foo = p.start()
Foo().foo(1)
mock_foo.assert_called_with(1)
def test_create_autospec_unbound_methods(self):
# see mock issue 128
# this is expected to fail until the issue is fixed
return
class Foo(object):
def foo(self):
pass
klass = create_autospec(Foo)
instance = klass()
self.assertRaises(TypeError, instance.foo, 1)
# Note: no type checking on the "self" parameter
klass.foo(1)
klass.foo.assert_called_with(1)
self.assertRaises(TypeError, klass.foo)
def test_create_autospec_keyword_arguments(self):
class Foo(object):
a = 3
m = create_autospec(Foo, a='3')
self.assertEqual(m.a, '3')
def test_create_autospec_keyword_only_arguments(self):
def foo(a, *, b=None):
pass
m = create_autospec(foo)
m(1)
m.assert_called_with(1)
self.assertRaises(TypeError, m, 1, 2)
m(2, b=3)
m.assert_called_with(2, b=3)
def test_function_as_instance_attribute(self):
obj = SomeClass()
def f(a):
pass
obj.f = f
mock = create_autospec(obj)
mock.f('bing')
mock.f.assert_called_with('bing')
def test_spec_as_list(self):
# because spec as a list of strings in the mock constructor means
# something very different we treat a list instance as the type.
mock = create_autospec([])
mock.append('foo')
mock.append.assert_called_with('foo')
self.assertRaises(AttributeError, getattr, mock, 'foo')
class Foo(object):
foo = []
mock = create_autospec(Foo)
mock.foo.append(3)
mock.foo.append.assert_called_with(3)
self.assertRaises(AttributeError, getattr, mock.foo, 'foo')
def test_attributes(self):
class Sub(SomeClass):
attr = SomeClass()
sub_mock = create_autospec(Sub)
for mock in (sub_mock, sub_mock.attr):
self._check_someclass_mock(mock)
def test_builtin_functions_types(self):
# we could replace builtin functions / methods with a function
# with *args / **kwargs signature. Using the builtin method type
# as a spec seems to work fairly well though.
class BuiltinSubclass(list):
def bar(self, arg):
pass
sorted = sorted
attr = {}
mock = create_autospec(BuiltinSubclass)
mock.append(3)
mock.append.assert_called_with(3)
self.assertRaises(AttributeError, getattr, mock.append, 'foo')
mock.bar('foo')
mock.bar.assert_called_with('foo')
self.assertRaises(TypeError, mock.bar, 'foo', 'bar')
self.assertRaises(AttributeError, getattr, mock.bar, 'foo')
mock.sorted([1, 2])
mock.sorted.assert_called_with([1, 2])
self.assertRaises(AttributeError, getattr, mock.sorted, 'foo')
mock.attr.pop(3)
mock.attr.pop.assert_called_with(3)
self.assertRaises(AttributeError, getattr, mock.attr, 'foo')
def test_method_calls(self):
class Sub(SomeClass):
attr = SomeClass()
mock = create_autospec(Sub)
mock.one(1, 2)
mock.two()
mock.three(3)
expected = [call.one(1, 2), call.two(), call.three(3)]
self.assertEqual(mock.method_calls, expected)
mock.attr.one(1, 2)
mock.attr.two()
mock.attr.three(3)
expected.extend(
[call.attr.one(1, 2), call.attr.two(), call.attr.three(3)]
)
self.assertEqual(mock.method_calls, expected)
def test_magic_methods(self):
class BuiltinSubclass(list):
attr = {}
mock = create_autospec(BuiltinSubclass)
self.assertEqual(list(mock), [])
self.assertRaises(TypeError, int, mock)
self.assertRaises(TypeError, int, mock.attr)
self.assertEqual(list(mock), [])
self.assertIsInstance(mock['foo'], MagicMock)
self.assertIsInstance(mock.attr['foo'], MagicMock)
def test_spec_set(self):
class Sub(SomeClass):
attr = SomeClass()
for spec in (Sub, Sub()):
mock = create_autospec(spec, spec_set=True)
self._check_someclass_mock(mock)
self.assertRaises(AttributeError, setattr, mock, 'foo', 'bar')
self.assertRaises(AttributeError, setattr, mock.attr, 'foo', 'bar')
def test_descriptors(self):
class Foo(object):
@classmethod
def f(cls, a, b):
pass
@staticmethod
def g(a, b):
pass
class Bar(Foo):
pass
class Baz(SomeClass, Bar):
pass
for spec in (Foo, Foo(), Bar, Bar(), Baz, Baz()):
mock = create_autospec(spec)
mock.f(1, 2)
mock.f.assert_called_once_with(1, 2)
mock.g(3, 4)
mock.g.assert_called_once_with(3, 4)
def test_recursive(self):
class A(object):
def a(self):
pass
foo = 'foo bar baz'
bar = foo
A.B = A
mock = create_autospec(A)
mock()
self.assertFalse(mock.B.called)
mock.a()
mock.B.a()
self.assertEqual(mock.method_calls, [call.a(), call.B.a()])
self.assertIs(A.foo, A.bar)
self.assertIsNot(mock.foo, mock.bar)
mock.foo.lower()
self.assertRaises(AssertionError, mock.bar.lower.assert_called_with)
def test_spec_inheritance_for_classes(self):
class Foo(object):
def a(self):
pass
class Bar(object):
def f(self):
pass
class_mock = create_autospec(Foo)
self.assertIsNot(class_mock, class_mock())
for this_mock in class_mock, class_mock():
this_mock.a()
this_mock.a.assert_called_with()
self.assertRaises(TypeError, this_mock.a, 'foo')
self.assertRaises(AttributeError, getattr, this_mock, 'b')
instance_mock = create_autospec(Foo())
instance_mock.a()
instance_mock.a.assert_called_with()
self.assertRaises(TypeError, instance_mock.a, 'foo')
self.assertRaises(AttributeError, getattr, instance_mock, 'b')
# The return value isn't isn't callable
self.assertRaises(TypeError, instance_mock)
instance_mock.Bar.f()
instance_mock.Bar.f.assert_called_with()
self.assertRaises(AttributeError, getattr, instance_mock.Bar, 'g')
instance_mock.Bar().f()
instance_mock.Bar().f.assert_called_with()
self.assertRaises(AttributeError, getattr, instance_mock.Bar(), 'g')
def test_inherit(self):
class Foo(object):
a = 3
Foo.Foo = Foo
# class
mock = create_autospec(Foo)
instance = mock()
self.assertRaises(AttributeError, getattr, instance, 'b')
attr_instance = mock.Foo()
self.assertRaises(AttributeError, getattr, attr_instance, 'b')
# instance
mock = create_autospec(Foo())
self.assertRaises(AttributeError, getattr, mock, 'b')
self.assertRaises(TypeError, mock)
# attribute instance
call_result = mock.Foo()
self.assertRaises(AttributeError, getattr, call_result, 'b')
def test_builtins(self):
# used to fail with infinite recursion
create_autospec(1)
create_autospec(int)
create_autospec('foo')
create_autospec(str)
create_autospec({})
create_autospec(dict)
create_autospec([])
create_autospec(list)
create_autospec(set())
create_autospec(set)
create_autospec(1.0)
create_autospec(float)
create_autospec(1j)
create_autospec(complex)
create_autospec(False)
create_autospec(True)
def test_function(self):
def f(a, b):
pass
mock = create_autospec(f)
self.assertRaises(TypeError, mock)
mock(1, 2)
mock.assert_called_with(1, 2)
f.f = f
mock = create_autospec(f)
self.assertRaises(TypeError, mock.f)
mock.f(3, 4)
mock.f.assert_called_with(3, 4)
def test_skip_attributeerrors(self):
class Raiser(object):
def __get__(self, obj, type=None):
if obj is None:
raise AttributeError('Can only be accessed via an instance')
class RaiserClass(object):
raiser = Raiser()
@staticmethod
def existing(a, b):
return a + b
s = create_autospec(RaiserClass)
self.assertRaises(TypeError, lambda x: s.existing(1, 2, 3))
s.existing(1, 2)
self.assertRaises(AttributeError, lambda: s.nonexisting)
# check we can fetch the raiser attribute and it has no spec
obj = s.raiser
obj.foo, obj.bar
def test_signature_class(self):
class Foo(object):
def __init__(self, a, b=3):
pass
mock = create_autospec(Foo)
self.assertRaises(TypeError, mock)
mock(1)
mock.assert_called_once_with(1)
mock(4, 5)
mock.assert_called_with(4, 5)
def test_class_with_no_init(self):
# this used to raise an exception
# due to trying to get a signature from object.__init__
class Foo(object):
pass
create_autospec(Foo)
def test_signature_callable(self):
class Callable(object):
def __init__(self):
pass
def __call__(self, a):
pass
mock = create_autospec(Callable)
mock()
mock.assert_called_once_with()
self.assertRaises(TypeError, mock, 'a')
instance = mock()
self.assertRaises(TypeError, instance)
instance(a='a')
instance.assert_called_once_with(a='a')
instance('a')
instance.assert_called_with('a')
mock = create_autospec(Callable())
mock(a='a')
mock.assert_called_once_with(a='a')
self.assertRaises(TypeError, mock)
mock('a')
mock.assert_called_with('a')
def test_signature_noncallable(self):
class NonCallable(object):
def __init__(self):
pass
mock = create_autospec(NonCallable)
instance = mock()
mock.assert_called_once_with()
self.assertRaises(TypeError, mock, 'a')
self.assertRaises(TypeError, instance)
self.assertRaises(TypeError, instance, 'a')
mock = create_autospec(NonCallable())
self.assertRaises(TypeError, mock)
self.assertRaises(TypeError, mock, 'a')
def test_create_autospec_none(self):
class Foo(object):
bar = None
mock = create_autospec(Foo)
none = mock.bar
self.assertNotIsInstance(none, type(None))
none.foo()
none.foo.assert_called_once_with()
def test_autospec_functions_with_self_in_odd_place(self):
class Foo(object):
def f(a, self):
pass
a = create_autospec(Foo)
a.f(self=10)
a.f.assert_called_with(self=10)
def test_autospec_property(self):
class Foo(object):
@property
def foo(self):
return 3
foo = create_autospec(Foo)
mock_property = foo.foo
# no spec on properties
self.assertTrue(isinstance(mock_property, MagicMock))
mock_property(1, 2, 3)
mock_property.abc(4, 5, 6)
mock_property.assert_called_once_with(1, 2, 3)
mock_property.abc.assert_called_once_with(4, 5, 6)
def test_autospec_slots(self):
class Foo(object):
__slots__ = ['a']
foo = create_autospec(Foo)
mock_slot = foo.a
# no spec on slots
mock_slot(1, 2, 3)
mock_slot.abc(4, 5, 6)
mock_slot.assert_called_once_with(1, 2, 3)
mock_slot.abc.assert_called_once_with(4, 5, 6)
class TestCallList(unittest.TestCase):
def test_args_list_contains_call_list(self):
mock = Mock()
self.assertIsInstance(mock.call_args_list, _CallList)
mock(1, 2)
mock(a=3)
mock(3, 4)
mock(b=6)
for kall in call(1, 2), call(a=3), call(3, 4), call(b=6):
self.assertTrue(kall in mock.call_args_list)
calls = [call(a=3), call(3, 4)]
self.assertTrue(calls in mock.call_args_list)
calls = [call(1, 2), call(a=3)]
self.assertTrue(calls in mock.call_args_list)
calls = [call(3, 4), call(b=6)]
self.assertTrue(calls in mock.call_args_list)
calls = [call(3, 4)]
self.assertTrue(calls in mock.call_args_list)
self.assertFalse(call('fish') in mock.call_args_list)
self.assertFalse([call('fish')] in mock.call_args_list)
def test_call_list_str(self):
mock = Mock()
mock(1, 2)
mock.foo(a=3)
mock.foo.bar().baz('fish', cat='dog')
expected = (
"[call(1, 2),\n"
" call.foo(a=3),\n"
" call.foo.bar(),\n"
" call.foo.bar().baz('fish', cat='dog')]"
)
self.assertEqual(str(mock.mock_calls), expected)
def test_propertymock(self):
p = patch('%s.SomeClass.one' % __name__, new_callable=PropertyMock)
mock = p.start()
try:
SomeClass.one
mock.assert_called_once_with()
s = SomeClass()
s.one
mock.assert_called_with()
self.assertEqual(mock.mock_calls, [call(), call()])
s.one = 3
self.assertEqual(mock.mock_calls, [call(), call(), call(3)])
finally:
p.stop()
def test_propertymock_returnvalue(self):
m = MagicMock()
p = PropertyMock()
type(m).foo = p
returned = m.foo
p.assert_called_once_with()
self.assertIsInstance(returned, MagicMock)
self.assertNotIsInstance(returned, PropertyMock)
if __name__ == '__main__':
unittest.main()
|
gpl-3.0
|
mozilla/stoneridge
|
python/src/Lib/decimal.py
|
31
|
220812
|
# Copyright (c) 2004 Python Software Foundation.
# All rights reserved.
# Written by Eric Price <eprice at tjhsst.edu>
# and Facundo Batista <facundo at taniquetil.com.ar>
# and Raymond Hettinger <python at rcn.com>
# and Aahz <aahz at pobox.com>
# and Tim Peters
# This module is currently Py2.3 compatible and should be kept that way
# unless a major compelling advantage arises. IOW, 2.3 compatibility is
# strongly preferred, but not guaranteed.
# Also, this module should be kept in sync with the latest updates of
# the IBM specification as it evolves. Those updates will be treated
# as bug fixes (deviation from the spec is a compatibility, usability
# bug) and will be backported. At this point the spec is stabilizing
# and the updates are becoming fewer, smaller, and less significant.
"""
This is a Py2.3 implementation of decimal floating point arithmetic based on
the General Decimal Arithmetic Specification:
http://speleotrove.com/decimal/decarith.html
and IEEE standard 854-1987:
www.cs.berkeley.edu/~ejr/projects/754/private/drafts/854-1987/dir.html
Decimal floating point has finite precision with arbitrarily large bounds.
The purpose of this module is to support arithmetic using familiar
"schoolhouse" rules and to avoid some of the tricky representation
issues associated with binary floating point. The package is especially
useful for financial applications or for contexts where users have
expectations that are at odds with binary floating point (for instance,
in binary floating point, 1.00 % 0.1 gives 0.09999999999999995 instead
of the expected Decimal('0.00') returned by decimal floating point).
Here are some examples of using the decimal module:
>>> from decimal import *
>>> setcontext(ExtendedContext)
>>> Decimal(0)
Decimal('0')
>>> Decimal('1')
Decimal('1')
>>> Decimal('-.0123')
Decimal('-0.0123')
>>> Decimal(123456)
Decimal('123456')
>>> Decimal('123.45e12345678901234567890')
Decimal('1.2345E+12345678901234567892')
>>> Decimal('1.33') + Decimal('1.27')
Decimal('2.60')
>>> Decimal('12.34') + Decimal('3.87') - Decimal('18.41')
Decimal('-2.20')
>>> dig = Decimal(1)
>>> print dig / Decimal(3)
0.333333333
>>> getcontext().prec = 18
>>> print dig / Decimal(3)
0.333333333333333333
>>> print dig.sqrt()
1
>>> print Decimal(3).sqrt()
1.73205080756887729
>>> print Decimal(3) ** 123
4.85192780976896427E+58
>>> inf = Decimal(1) / Decimal(0)
>>> print inf
Infinity
>>> neginf = Decimal(-1) / Decimal(0)
>>> print neginf
-Infinity
>>> print neginf + inf
NaN
>>> print neginf * inf
-Infinity
>>> print dig / 0
Infinity
>>> getcontext().traps[DivisionByZero] = 1
>>> print dig / 0
Traceback (most recent call last):
...
...
...
DivisionByZero: x / 0
>>> c = Context()
>>> c.traps[InvalidOperation] = 0
>>> print c.flags[InvalidOperation]
0
>>> c.divide(Decimal(0), Decimal(0))
Decimal('NaN')
>>> c.traps[InvalidOperation] = 1
>>> print c.flags[InvalidOperation]
1
>>> c.flags[InvalidOperation] = 0
>>> print c.flags[InvalidOperation]
0
>>> print c.divide(Decimal(0), Decimal(0))
Traceback (most recent call last):
...
...
...
InvalidOperation: 0 / 0
>>> print c.flags[InvalidOperation]
1
>>> c.flags[InvalidOperation] = 0
>>> c.traps[InvalidOperation] = 0
>>> print c.divide(Decimal(0), Decimal(0))
NaN
>>> print c.flags[InvalidOperation]
1
>>>
"""
__all__ = [
# Two major classes
'Decimal', 'Context',
# Contexts
'DefaultContext', 'BasicContext', 'ExtendedContext',
# Exceptions
'DecimalException', 'Clamped', 'InvalidOperation', 'DivisionByZero',
'Inexact', 'Rounded', 'Subnormal', 'Overflow', 'Underflow',
# Constants for use in setting up contexts
'ROUND_DOWN', 'ROUND_HALF_UP', 'ROUND_HALF_EVEN', 'ROUND_CEILING',
'ROUND_FLOOR', 'ROUND_UP', 'ROUND_HALF_DOWN', 'ROUND_05UP',
# Functions for manipulating contexts
'setcontext', 'getcontext', 'localcontext'
]
__version__ = '1.70' # Highest version of the spec this complies with
import copy as _copy
import math as _math
import numbers as _numbers
try:
from collections import namedtuple as _namedtuple
DecimalTuple = _namedtuple('DecimalTuple', 'sign digits exponent')
except ImportError:
DecimalTuple = lambda *args: args
# Rounding
ROUND_DOWN = 'ROUND_DOWN'
ROUND_HALF_UP = 'ROUND_HALF_UP'
ROUND_HALF_EVEN = 'ROUND_HALF_EVEN'
ROUND_CEILING = 'ROUND_CEILING'
ROUND_FLOOR = 'ROUND_FLOOR'
ROUND_UP = 'ROUND_UP'
ROUND_HALF_DOWN = 'ROUND_HALF_DOWN'
ROUND_05UP = 'ROUND_05UP'
# Errors
class DecimalException(ArithmeticError):
"""Base exception class.
Used exceptions derive from this.
If an exception derives from another exception besides this (such as
Underflow (Inexact, Rounded, Subnormal) that indicates that it is only
called if the others are present. This isn't actually used for
anything, though.
handle -- Called when context._raise_error is called and the
trap_enabler is not set. First argument is self, second is the
context. More arguments can be given, those being after
the explanation in _raise_error (For example,
context._raise_error(NewError, '(-x)!', self._sign) would
call NewError().handle(context, self._sign).)
To define a new exception, it should be sufficient to have it derive
from DecimalException.
"""
def handle(self, context, *args):
pass
class Clamped(DecimalException):
"""Exponent of a 0 changed to fit bounds.
This occurs and signals clamped if the exponent of a result has been
altered in order to fit the constraints of a specific concrete
representation. This may occur when the exponent of a zero result would
be outside the bounds of a representation, or when a large normal
number would have an encoded exponent that cannot be represented. In
this latter case, the exponent is reduced to fit and the corresponding
number of zero digits are appended to the coefficient ("fold-down").
"""
class InvalidOperation(DecimalException):
"""An invalid operation was performed.
Various bad things cause this:
Something creates a signaling NaN
-INF + INF
0 * (+-)INF
(+-)INF / (+-)INF
x % 0
(+-)INF % x
x._rescale( non-integer )
sqrt(-x) , x > 0
0 ** 0
x ** (non-integer)
x ** (+-)INF
An operand is invalid
The result of the operation after these is a quiet positive NaN,
except when the cause is a signaling NaN, in which case the result is
also a quiet NaN, but with the original sign, and an optional
diagnostic information.
"""
def handle(self, context, *args):
if args:
ans = _dec_from_triple(args[0]._sign, args[0]._int, 'n', True)
return ans._fix_nan(context)
return _NaN
class ConversionSyntax(InvalidOperation):
"""Trying to convert badly formed string.
This occurs and signals invalid-operation if an string is being
converted to a number and it does not conform to the numeric string
syntax. The result is [0,qNaN].
"""
def handle(self, context, *args):
return _NaN
class DivisionByZero(DecimalException, ZeroDivisionError):
"""Division by 0.
This occurs and signals division-by-zero if division of a finite number
by zero was attempted (during a divide-integer or divide operation, or a
power operation with negative right-hand operand), and the dividend was
not zero.
The result of the operation is [sign,inf], where sign is the exclusive
or of the signs of the operands for divide, or is 1 for an odd power of
-0, for power.
"""
def handle(self, context, sign, *args):
return _SignedInfinity[sign]
class DivisionImpossible(InvalidOperation):
"""Cannot perform the division adequately.
This occurs and signals invalid-operation if the integer result of a
divide-integer or remainder operation had too many digits (would be
longer than precision). The result is [0,qNaN].
"""
def handle(self, context, *args):
return _NaN
class DivisionUndefined(InvalidOperation, ZeroDivisionError):
"""Undefined result of division.
This occurs and signals invalid-operation if division by zero was
attempted (during a divide-integer, divide, or remainder operation), and
the dividend is also zero. The result is [0,qNaN].
"""
def handle(self, context, *args):
return _NaN
class Inexact(DecimalException):
"""Had to round, losing information.
This occurs and signals inexact whenever the result of an operation is
not exact (that is, it needed to be rounded and any discarded digits
were non-zero), or if an overflow or underflow condition occurs. The
result in all cases is unchanged.
The inexact signal may be tested (or trapped) to determine if a given
operation (or sequence of operations) was inexact.
"""
class InvalidContext(InvalidOperation):
"""Invalid context. Unknown rounding, for example.
This occurs and signals invalid-operation if an invalid context was
detected during an operation. This can occur if contexts are not checked
on creation and either the precision exceeds the capability of the
underlying concrete representation or an unknown or unsupported rounding
was specified. These aspects of the context need only be checked when
the values are required to be used. The result is [0,qNaN].
"""
def handle(self, context, *args):
return _NaN
class Rounded(DecimalException):
"""Number got rounded (not necessarily changed during rounding).
This occurs and signals rounded whenever the result of an operation is
rounded (that is, some zero or non-zero digits were discarded from the
coefficient), or if an overflow or underflow condition occurs. The
result in all cases is unchanged.
The rounded signal may be tested (or trapped) to determine if a given
operation (or sequence of operations) caused a loss of precision.
"""
class Subnormal(DecimalException):
"""Exponent < Emin before rounding.
This occurs and signals subnormal whenever the result of a conversion or
operation is subnormal (that is, its adjusted exponent is less than
Emin, before any rounding). The result in all cases is unchanged.
The subnormal signal may be tested (or trapped) to determine if a given
or operation (or sequence of operations) yielded a subnormal result.
"""
class Overflow(Inexact, Rounded):
"""Numerical overflow.
This occurs and signals overflow if the adjusted exponent of a result
(from a conversion or from an operation that is not an attempt to divide
by zero), after rounding, would be greater than the largest value that
can be handled by the implementation (the value Emax).
The result depends on the rounding mode:
For round-half-up and round-half-even (and for round-half-down and
round-up, if implemented), the result of the operation is [sign,inf],
where sign is the sign of the intermediate result. For round-down, the
result is the largest finite number that can be represented in the
current precision, with the sign of the intermediate result. For
round-ceiling, the result is the same as for round-down if the sign of
the intermediate result is 1, or is [0,inf] otherwise. For round-floor,
the result is the same as for round-down if the sign of the intermediate
result is 0, or is [1,inf] otherwise. In all cases, Inexact and Rounded
will also be raised.
"""
def handle(self, context, sign, *args):
if context.rounding in (ROUND_HALF_UP, ROUND_HALF_EVEN,
ROUND_HALF_DOWN, ROUND_UP):
return _SignedInfinity[sign]
if sign == 0:
if context.rounding == ROUND_CEILING:
return _SignedInfinity[sign]
return _dec_from_triple(sign, '9'*context.prec,
context.Emax-context.prec+1)
if sign == 1:
if context.rounding == ROUND_FLOOR:
return _SignedInfinity[sign]
return _dec_from_triple(sign, '9'*context.prec,
context.Emax-context.prec+1)
class Underflow(Inexact, Rounded, Subnormal):
"""Numerical underflow with result rounded to 0.
This occurs and signals underflow if a result is inexact and the
adjusted exponent of the result would be smaller (more negative) than
the smallest value that can be handled by the implementation (the value
Emin). That is, the result is both inexact and subnormal.
The result after an underflow will be a subnormal number rounded, if
necessary, so that its exponent is not less than Etiny. This may result
in 0 with the sign of the intermediate result and an exponent of Etiny.
In all cases, Inexact, Rounded, and Subnormal will also be raised.
"""
# List of public traps and flags
_signals = [Clamped, DivisionByZero, Inexact, Overflow, Rounded,
Underflow, InvalidOperation, Subnormal]
# Map conditions (per the spec) to signals
_condition_map = {ConversionSyntax:InvalidOperation,
DivisionImpossible:InvalidOperation,
DivisionUndefined:InvalidOperation,
InvalidContext:InvalidOperation}
##### Context Functions ##################################################
# The getcontext() and setcontext() function manage access to a thread-local
# current context. Py2.4 offers direct support for thread locals. If that
# is not available, use threading.currentThread() which is slower but will
# work for older Pythons. If threads are not part of the build, create a
# mock threading object with threading.local() returning the module namespace.
try:
import threading
except ImportError:
# Python was compiled without threads; create a mock object instead
import sys
class MockThreading(object):
def local(self, sys=sys):
return sys.modules[__name__]
threading = MockThreading()
del sys, MockThreading
try:
threading.local
except AttributeError:
# To fix reloading, force it to create a new context
# Old contexts have different exceptions in their dicts, making problems.
if hasattr(threading.currentThread(), '__decimal_context__'):
del threading.currentThread().__decimal_context__
def setcontext(context):
"""Set this thread's context to context."""
if context in (DefaultContext, BasicContext, ExtendedContext):
context = context.copy()
context.clear_flags()
threading.currentThread().__decimal_context__ = context
def getcontext():
"""Returns this thread's context.
If this thread does not yet have a context, returns
a new context and sets this thread's context.
New contexts are copies of DefaultContext.
"""
try:
return threading.currentThread().__decimal_context__
except AttributeError:
context = Context()
threading.currentThread().__decimal_context__ = context
return context
else:
local = threading.local()
if hasattr(local, '__decimal_context__'):
del local.__decimal_context__
def getcontext(_local=local):
"""Returns this thread's context.
If this thread does not yet have a context, returns
a new context and sets this thread's context.
New contexts are copies of DefaultContext.
"""
try:
return _local.__decimal_context__
except AttributeError:
context = Context()
_local.__decimal_context__ = context
return context
def setcontext(context, _local=local):
"""Set this thread's context to context."""
if context in (DefaultContext, BasicContext, ExtendedContext):
context = context.copy()
context.clear_flags()
_local.__decimal_context__ = context
del threading, local # Don't contaminate the namespace
def localcontext(ctx=None):
"""Return a context manager for a copy of the supplied context
Uses a copy of the current context if no context is specified
The returned context manager creates a local decimal context
in a with statement:
def sin(x):
with localcontext() as ctx:
ctx.prec += 2
# Rest of sin calculation algorithm
# uses a precision 2 greater than normal
return +s # Convert result to normal precision
def sin(x):
with localcontext(ExtendedContext):
# Rest of sin calculation algorithm
# uses the Extended Context from the
# General Decimal Arithmetic Specification
return +s # Convert result to normal context
>>> setcontext(DefaultContext)
>>> print getcontext().prec
28
>>> with localcontext():
... ctx = getcontext()
... ctx.prec += 2
... print ctx.prec
...
30
>>> with localcontext(ExtendedContext):
... print getcontext().prec
...
9
>>> print getcontext().prec
28
"""
if ctx is None: ctx = getcontext()
return _ContextManager(ctx)
##### Decimal class #######################################################
class Decimal(object):
"""Floating point class for decimal arithmetic."""
__slots__ = ('_exp','_int','_sign', '_is_special')
# Generally, the value of the Decimal instance is given by
# (-1)**_sign * _int * 10**_exp
# Special values are signified by _is_special == True
# We're immutable, so use __new__ not __init__
def __new__(cls, value="0", context=None):
"""Create a decimal point instance.
>>> Decimal('3.14') # string input
Decimal('3.14')
>>> Decimal((0, (3, 1, 4), -2)) # tuple (sign, digit_tuple, exponent)
Decimal('3.14')
>>> Decimal(314) # int or long
Decimal('314')
>>> Decimal(Decimal(314)) # another decimal instance
Decimal('314')
>>> Decimal(' 3.14 \\n') # leading and trailing whitespace okay
Decimal('3.14')
"""
# Note that the coefficient, self._int, is actually stored as
# a string rather than as a tuple of digits. This speeds up
# the "digits to integer" and "integer to digits" conversions
# that are used in almost every arithmetic operation on
# Decimals. This is an internal detail: the as_tuple function
# and the Decimal constructor still deal with tuples of
# digits.
self = object.__new__(cls)
# From a string
# REs insist on real strings, so we can too.
if isinstance(value, basestring):
m = _parser(value.strip())
if m is None:
if context is None:
context = getcontext()
return context._raise_error(ConversionSyntax,
"Invalid literal for Decimal: %r" % value)
if m.group('sign') == "-":
self._sign = 1
else:
self._sign = 0
intpart = m.group('int')
if intpart is not None:
# finite number
fracpart = m.group('frac') or ''
exp = int(m.group('exp') or '0')
self._int = str(int(intpart+fracpart))
self._exp = exp - len(fracpart)
self._is_special = False
else:
diag = m.group('diag')
if diag is not None:
# NaN
self._int = str(int(diag or '0')).lstrip('0')
if m.group('signal'):
self._exp = 'N'
else:
self._exp = 'n'
else:
# infinity
self._int = '0'
self._exp = 'F'
self._is_special = True
return self
# From an integer
if isinstance(value, (int,long)):
if value >= 0:
self._sign = 0
else:
self._sign = 1
self._exp = 0
self._int = str(abs(value))
self._is_special = False
return self
# From another decimal
if isinstance(value, Decimal):
self._exp = value._exp
self._sign = value._sign
self._int = value._int
self._is_special = value._is_special
return self
# From an internal working value
if isinstance(value, _WorkRep):
self._sign = value.sign
self._int = str(value.int)
self._exp = int(value.exp)
self._is_special = False
return self
# tuple/list conversion (possibly from as_tuple())
if isinstance(value, (list,tuple)):
if len(value) != 3:
raise ValueError('Invalid tuple size in creation of Decimal '
'from list or tuple. The list or tuple '
'should have exactly three elements.')
# process sign. The isinstance test rejects floats
if not (isinstance(value[0], (int, long)) and value[0] in (0,1)):
raise ValueError("Invalid sign. The first value in the tuple "
"should be an integer; either 0 for a "
"positive number or 1 for a negative number.")
self._sign = value[0]
if value[2] == 'F':
# infinity: value[1] is ignored
self._int = '0'
self._exp = value[2]
self._is_special = True
else:
# process and validate the digits in value[1]
digits = []
for digit in value[1]:
if isinstance(digit, (int, long)) and 0 <= digit <= 9:
# skip leading zeros
if digits or digit != 0:
digits.append(digit)
else:
raise ValueError("The second value in the tuple must "
"be composed of integers in the range "
"0 through 9.")
if value[2] in ('n', 'N'):
# NaN: digits form the diagnostic
self._int = ''.join(map(str, digits))
self._exp = value[2]
self._is_special = True
elif isinstance(value[2], (int, long)):
# finite number: digits give the coefficient
self._int = ''.join(map(str, digits or [0]))
self._exp = value[2]
self._is_special = False
else:
raise ValueError("The third value in the tuple must "
"be an integer, or one of the "
"strings 'F', 'n', 'N'.")
return self
if isinstance(value, float):
value = Decimal.from_float(value)
self._exp = value._exp
self._sign = value._sign
self._int = value._int
self._is_special = value._is_special
return self
raise TypeError("Cannot convert %r to Decimal" % value)
# @classmethod, but @decorator is not valid Python 2.3 syntax, so
# don't use it (see notes on Py2.3 compatibility at top of file)
def from_float(cls, f):
"""Converts a float to a decimal number, exactly.
Note that Decimal.from_float(0.1) is not the same as Decimal('0.1').
Since 0.1 is not exactly representable in binary floating point, the
value is stored as the nearest representable value which is
0x1.999999999999ap-4. The exact equivalent of the value in decimal
is 0.1000000000000000055511151231257827021181583404541015625.
>>> Decimal.from_float(0.1)
Decimal('0.1000000000000000055511151231257827021181583404541015625')
>>> Decimal.from_float(float('nan'))
Decimal('NaN')
>>> Decimal.from_float(float('inf'))
Decimal('Infinity')
>>> Decimal.from_float(-float('inf'))
Decimal('-Infinity')
>>> Decimal.from_float(-0.0)
Decimal('-0')
"""
if isinstance(f, (int, long)): # handle integer inputs
return cls(f)
if _math.isinf(f) or _math.isnan(f): # raises TypeError if not a float
return cls(repr(f))
if _math.copysign(1.0, f) == 1.0:
sign = 0
else:
sign = 1
n, d = abs(f).as_integer_ratio()
k = d.bit_length() - 1
result = _dec_from_triple(sign, str(n*5**k), -k)
if cls is Decimal:
return result
else:
return cls(result)
from_float = classmethod(from_float)
def _isnan(self):
"""Returns whether the number is not actually one.
0 if a number
1 if NaN
2 if sNaN
"""
if self._is_special:
exp = self._exp
if exp == 'n':
return 1
elif exp == 'N':
return 2
return 0
def _isinfinity(self):
"""Returns whether the number is infinite
0 if finite or not a number
1 if +INF
-1 if -INF
"""
if self._exp == 'F':
if self._sign:
return -1
return 1
return 0
def _check_nans(self, other=None, context=None):
"""Returns whether the number is not actually one.
if self, other are sNaN, signal
if self, other are NaN return nan
return 0
Done before operations.
"""
self_is_nan = self._isnan()
if other is None:
other_is_nan = False
else:
other_is_nan = other._isnan()
if self_is_nan or other_is_nan:
if context is None:
context = getcontext()
if self_is_nan == 2:
return context._raise_error(InvalidOperation, 'sNaN',
self)
if other_is_nan == 2:
return context._raise_error(InvalidOperation, 'sNaN',
other)
if self_is_nan:
return self._fix_nan(context)
return other._fix_nan(context)
return 0
def _compare_check_nans(self, other, context):
"""Version of _check_nans used for the signaling comparisons
compare_signal, __le__, __lt__, __ge__, __gt__.
Signal InvalidOperation if either self or other is a (quiet
or signaling) NaN. Signaling NaNs take precedence over quiet
NaNs.
Return 0 if neither operand is a NaN.
"""
if context is None:
context = getcontext()
if self._is_special or other._is_special:
if self.is_snan():
return context._raise_error(InvalidOperation,
'comparison involving sNaN',
self)
elif other.is_snan():
return context._raise_error(InvalidOperation,
'comparison involving sNaN',
other)
elif self.is_qnan():
return context._raise_error(InvalidOperation,
'comparison involving NaN',
self)
elif other.is_qnan():
return context._raise_error(InvalidOperation,
'comparison involving NaN',
other)
return 0
def __nonzero__(self):
"""Return True if self is nonzero; otherwise return False.
NaNs and infinities are considered nonzero.
"""
return self._is_special or self._int != '0'
def _cmp(self, other):
"""Compare the two non-NaN decimal instances self and other.
Returns -1 if self < other, 0 if self == other and 1
if self > other. This routine is for internal use only."""
if self._is_special or other._is_special:
self_inf = self._isinfinity()
other_inf = other._isinfinity()
if self_inf == other_inf:
return 0
elif self_inf < other_inf:
return -1
else:
return 1
# check for zeros; Decimal('0') == Decimal('-0')
if not self:
if not other:
return 0
else:
return -((-1)**other._sign)
if not other:
return (-1)**self._sign
# If different signs, neg one is less
if other._sign < self._sign:
return -1
if self._sign < other._sign:
return 1
self_adjusted = self.adjusted()
other_adjusted = other.adjusted()
if self_adjusted == other_adjusted:
self_padded = self._int + '0'*(self._exp - other._exp)
other_padded = other._int + '0'*(other._exp - self._exp)
if self_padded == other_padded:
return 0
elif self_padded < other_padded:
return -(-1)**self._sign
else:
return (-1)**self._sign
elif self_adjusted > other_adjusted:
return (-1)**self._sign
else: # self_adjusted < other_adjusted
return -((-1)**self._sign)
# Note: The Decimal standard doesn't cover rich comparisons for
# Decimals. In particular, the specification is silent on the
# subject of what should happen for a comparison involving a NaN.
# We take the following approach:
#
# == comparisons involving a quiet NaN always return False
# != comparisons involving a quiet NaN always return True
# == or != comparisons involving a signaling NaN signal
# InvalidOperation, and return False or True as above if the
# InvalidOperation is not trapped.
# <, >, <= and >= comparisons involving a (quiet or signaling)
# NaN signal InvalidOperation, and return False if the
# InvalidOperation is not trapped.
#
# This behavior is designed to conform as closely as possible to
# that specified by IEEE 754.
def __eq__(self, other, context=None):
other = _convert_other(other, allow_float=True)
if other is NotImplemented:
return other
if self._check_nans(other, context):
return False
return self._cmp(other) == 0
def __ne__(self, other, context=None):
other = _convert_other(other, allow_float=True)
if other is NotImplemented:
return other
if self._check_nans(other, context):
return True
return self._cmp(other) != 0
def __lt__(self, other, context=None):
other = _convert_other(other, allow_float=True)
if other is NotImplemented:
return other
ans = self._compare_check_nans(other, context)
if ans:
return False
return self._cmp(other) < 0
def __le__(self, other, context=None):
other = _convert_other(other, allow_float=True)
if other is NotImplemented:
return other
ans = self._compare_check_nans(other, context)
if ans:
return False
return self._cmp(other) <= 0
def __gt__(self, other, context=None):
other = _convert_other(other, allow_float=True)
if other is NotImplemented:
return other
ans = self._compare_check_nans(other, context)
if ans:
return False
return self._cmp(other) > 0
def __ge__(self, other, context=None):
other = _convert_other(other, allow_float=True)
if other is NotImplemented:
return other
ans = self._compare_check_nans(other, context)
if ans:
return False
return self._cmp(other) >= 0
def compare(self, other, context=None):
"""Compares one to another.
-1 => a < b
0 => a = b
1 => a > b
NaN => one is NaN
Like __cmp__, but returns Decimal instances.
"""
other = _convert_other(other, raiseit=True)
# Compare(NaN, NaN) = NaN
if (self._is_special or other and other._is_special):
ans = self._check_nans(other, context)
if ans:
return ans
return Decimal(self._cmp(other))
def __hash__(self):
"""x.__hash__() <==> hash(x)"""
# Decimal integers must hash the same as the ints
#
# The hash of a nonspecial noninteger Decimal must depend only
# on the value of that Decimal, and not on its representation.
# For example: hash(Decimal('100E-1')) == hash(Decimal('10')).
# Equality comparisons involving signaling nans can raise an
# exception; since equality checks are implicitly and
# unpredictably used when checking set and dict membership, we
# prevent signaling nans from being used as set elements or
# dict keys by making __hash__ raise an exception.
if self._is_special:
if self.is_snan():
raise TypeError('Cannot hash a signaling NaN value.')
elif self.is_nan():
# 0 to match hash(float('nan'))
return 0
else:
# values chosen to match hash(float('inf')) and
# hash(float('-inf')).
if self._sign:
return -271828
else:
return 314159
# In Python 2.7, we're allowing comparisons (but not
# arithmetic operations) between floats and Decimals; so if
# a Decimal instance is exactly representable as a float then
# its hash should match that of the float.
self_as_float = float(self)
if Decimal.from_float(self_as_float) == self:
return hash(self_as_float)
if self._isinteger():
op = _WorkRep(self.to_integral_value())
# to make computation feasible for Decimals with large
# exponent, we use the fact that hash(n) == hash(m) for
# any two nonzero integers n and m such that (i) n and m
# have the same sign, and (ii) n is congruent to m modulo
# 2**64-1. So we can replace hash((-1)**s*c*10**e) with
# hash((-1)**s*c*pow(10, e, 2**64-1).
return hash((-1)**op.sign*op.int*pow(10, op.exp, 2**64-1))
# The value of a nonzero nonspecial Decimal instance is
# faithfully represented by the triple consisting of its sign,
# its adjusted exponent, and its coefficient with trailing
# zeros removed.
return hash((self._sign,
self._exp+len(self._int),
self._int.rstrip('0')))
def as_tuple(self):
"""Represents the number as a triple tuple.
To show the internals exactly as they are.
"""
return DecimalTuple(self._sign, tuple(map(int, self._int)), self._exp)
def __repr__(self):
"""Represents the number as an instance of Decimal."""
# Invariant: eval(repr(d)) == d
return "Decimal('%s')" % str(self)
def __str__(self, eng=False, context=None):
"""Return string representation of the number in scientific notation.
Captures all of the information in the underlying representation.
"""
sign = ['', '-'][self._sign]
if self._is_special:
if self._exp == 'F':
return sign + 'Infinity'
elif self._exp == 'n':
return sign + 'NaN' + self._int
else: # self._exp == 'N'
return sign + 'sNaN' + self._int
# number of digits of self._int to left of decimal point
leftdigits = self._exp + len(self._int)
# dotplace is number of digits of self._int to the left of the
# decimal point in the mantissa of the output string (that is,
# after adjusting the exponent)
if self._exp <= 0 and leftdigits > -6:
# no exponent required
dotplace = leftdigits
elif not eng:
# usual scientific notation: 1 digit on left of the point
dotplace = 1
elif self._int == '0':
# engineering notation, zero
dotplace = (leftdigits + 1) % 3 - 1
else:
# engineering notation, nonzero
dotplace = (leftdigits - 1) % 3 + 1
if dotplace <= 0:
intpart = '0'
fracpart = '.' + '0'*(-dotplace) + self._int
elif dotplace >= len(self._int):
intpart = self._int+'0'*(dotplace-len(self._int))
fracpart = ''
else:
intpart = self._int[:dotplace]
fracpart = '.' + self._int[dotplace:]
if leftdigits == dotplace:
exp = ''
else:
if context is None:
context = getcontext()
exp = ['e', 'E'][context.capitals] + "%+d" % (leftdigits-dotplace)
return sign + intpart + fracpart + exp
def to_eng_string(self, context=None):
"""Convert to engineering-type string.
Engineering notation has an exponent which is a multiple of 3, so there
are up to 3 digits left of the decimal place.
Same rules for when in exponential and when as a value as in __str__.
"""
return self.__str__(eng=True, context=context)
def __neg__(self, context=None):
"""Returns a copy with the sign switched.
Rounds, if it has reason.
"""
if self._is_special:
ans = self._check_nans(context=context)
if ans:
return ans
if context is None:
context = getcontext()
if not self and context.rounding != ROUND_FLOOR:
# -Decimal('0') is Decimal('0'), not Decimal('-0'), except
# in ROUND_FLOOR rounding mode.
ans = self.copy_abs()
else:
ans = self.copy_negate()
return ans._fix(context)
def __pos__(self, context=None):
"""Returns a copy, unless it is a sNaN.
Rounds the number (if more then precision digits)
"""
if self._is_special:
ans = self._check_nans(context=context)
if ans:
return ans
if context is None:
context = getcontext()
if not self and context.rounding != ROUND_FLOOR:
# + (-0) = 0, except in ROUND_FLOOR rounding mode.
ans = self.copy_abs()
else:
ans = Decimal(self)
return ans._fix(context)
def __abs__(self, round=True, context=None):
"""Returns the absolute value of self.
If the keyword argument 'round' is false, do not round. The
expression self.__abs__(round=False) is equivalent to
self.copy_abs().
"""
if not round:
return self.copy_abs()
if self._is_special:
ans = self._check_nans(context=context)
if ans:
return ans
if self._sign:
ans = self.__neg__(context=context)
else:
ans = self.__pos__(context=context)
return ans
def __add__(self, other, context=None):
"""Returns self + other.
-INF + INF (or the reverse) cause InvalidOperation errors.
"""
other = _convert_other(other)
if other is NotImplemented:
return other
if context is None:
context = getcontext()
if self._is_special or other._is_special:
ans = self._check_nans(other, context)
if ans:
return ans
if self._isinfinity():
# If both INF, same sign => same as both, opposite => error.
if self._sign != other._sign and other._isinfinity():
return context._raise_error(InvalidOperation, '-INF + INF')
return Decimal(self)
if other._isinfinity():
return Decimal(other) # Can't both be infinity here
exp = min(self._exp, other._exp)
negativezero = 0
if context.rounding == ROUND_FLOOR and self._sign != other._sign:
# If the answer is 0, the sign should be negative, in this case.
negativezero = 1
if not self and not other:
sign = min(self._sign, other._sign)
if negativezero:
sign = 1
ans = _dec_from_triple(sign, '0', exp)
ans = ans._fix(context)
return ans
if not self:
exp = max(exp, other._exp - context.prec-1)
ans = other._rescale(exp, context.rounding)
ans = ans._fix(context)
return ans
if not other:
exp = max(exp, self._exp - context.prec-1)
ans = self._rescale(exp, context.rounding)
ans = ans._fix(context)
return ans
op1 = _WorkRep(self)
op2 = _WorkRep(other)
op1, op2 = _normalize(op1, op2, context.prec)
result = _WorkRep()
if op1.sign != op2.sign:
# Equal and opposite
if op1.int == op2.int:
ans = _dec_from_triple(negativezero, '0', exp)
ans = ans._fix(context)
return ans
if op1.int < op2.int:
op1, op2 = op2, op1
# OK, now abs(op1) > abs(op2)
if op1.sign == 1:
result.sign = 1
op1.sign, op2.sign = op2.sign, op1.sign
else:
result.sign = 0
# So we know the sign, and op1 > 0.
elif op1.sign == 1:
result.sign = 1
op1.sign, op2.sign = (0, 0)
else:
result.sign = 0
# Now, op1 > abs(op2) > 0
if op2.sign == 0:
result.int = op1.int + op2.int
else:
result.int = op1.int - op2.int
result.exp = op1.exp
ans = Decimal(result)
ans = ans._fix(context)
return ans
__radd__ = __add__
def __sub__(self, other, context=None):
"""Return self - other"""
other = _convert_other(other)
if other is NotImplemented:
return other
if self._is_special or other._is_special:
ans = self._check_nans(other, context=context)
if ans:
return ans
# self - other is computed as self + other.copy_negate()
return self.__add__(other.copy_negate(), context=context)
def __rsub__(self, other, context=None):
"""Return other - self"""
other = _convert_other(other)
if other is NotImplemented:
return other
return other.__sub__(self, context=context)
def __mul__(self, other, context=None):
"""Return self * other.
(+-) INF * 0 (or its reverse) raise InvalidOperation.
"""
other = _convert_other(other)
if other is NotImplemented:
return other
if context is None:
context = getcontext()
resultsign = self._sign ^ other._sign
if self._is_special or other._is_special:
ans = self._check_nans(other, context)
if ans:
return ans
if self._isinfinity():
if not other:
return context._raise_error(InvalidOperation, '(+-)INF * 0')
return _SignedInfinity[resultsign]
if other._isinfinity():
if not self:
return context._raise_error(InvalidOperation, '0 * (+-)INF')
return _SignedInfinity[resultsign]
resultexp = self._exp + other._exp
# Special case for multiplying by zero
if not self or not other:
ans = _dec_from_triple(resultsign, '0', resultexp)
# Fixing in case the exponent is out of bounds
ans = ans._fix(context)
return ans
# Special case for multiplying by power of 10
if self._int == '1':
ans = _dec_from_triple(resultsign, other._int, resultexp)
ans = ans._fix(context)
return ans
if other._int == '1':
ans = _dec_from_triple(resultsign, self._int, resultexp)
ans = ans._fix(context)
return ans
op1 = _WorkRep(self)
op2 = _WorkRep(other)
ans = _dec_from_triple(resultsign, str(op1.int * op2.int), resultexp)
ans = ans._fix(context)
return ans
__rmul__ = __mul__
def __truediv__(self, other, context=None):
"""Return self / other."""
other = _convert_other(other)
if other is NotImplemented:
return NotImplemented
if context is None:
context = getcontext()
sign = self._sign ^ other._sign
if self._is_special or other._is_special:
ans = self._check_nans(other, context)
if ans:
return ans
if self._isinfinity() and other._isinfinity():
return context._raise_error(InvalidOperation, '(+-)INF/(+-)INF')
if self._isinfinity():
return _SignedInfinity[sign]
if other._isinfinity():
context._raise_error(Clamped, 'Division by infinity')
return _dec_from_triple(sign, '0', context.Etiny())
# Special cases for zeroes
if not other:
if not self:
return context._raise_error(DivisionUndefined, '0 / 0')
return context._raise_error(DivisionByZero, 'x / 0', sign)
if not self:
exp = self._exp - other._exp
coeff = 0
else:
# OK, so neither = 0, INF or NaN
shift = len(other._int) - len(self._int) + context.prec + 1
exp = self._exp - other._exp - shift
op1 = _WorkRep(self)
op2 = _WorkRep(other)
if shift >= 0:
coeff, remainder = divmod(op1.int * 10**shift, op2.int)
else:
coeff, remainder = divmod(op1.int, op2.int * 10**-shift)
if remainder:
# result is not exact; adjust to ensure correct rounding
if coeff % 5 == 0:
coeff += 1
else:
# result is exact; get as close to ideal exponent as possible
ideal_exp = self._exp - other._exp
while exp < ideal_exp and coeff % 10 == 0:
coeff //= 10
exp += 1
ans = _dec_from_triple(sign, str(coeff), exp)
return ans._fix(context)
def _divide(self, other, context):
"""Return (self // other, self % other), to context.prec precision.
Assumes that neither self nor other is a NaN, that self is not
infinite and that other is nonzero.
"""
sign = self._sign ^ other._sign
if other._isinfinity():
ideal_exp = self._exp
else:
ideal_exp = min(self._exp, other._exp)
expdiff = self.adjusted() - other.adjusted()
if not self or other._isinfinity() or expdiff <= -2:
return (_dec_from_triple(sign, '0', 0),
self._rescale(ideal_exp, context.rounding))
if expdiff <= context.prec:
op1 = _WorkRep(self)
op2 = _WorkRep(other)
if op1.exp >= op2.exp:
op1.int *= 10**(op1.exp - op2.exp)
else:
op2.int *= 10**(op2.exp - op1.exp)
q, r = divmod(op1.int, op2.int)
if q < 10**context.prec:
return (_dec_from_triple(sign, str(q), 0),
_dec_from_triple(self._sign, str(r), ideal_exp))
# Here the quotient is too large to be representable
ans = context._raise_error(DivisionImpossible,
'quotient too large in //, % or divmod')
return ans, ans
def __rtruediv__(self, other, context=None):
"""Swaps self/other and returns __truediv__."""
other = _convert_other(other)
if other is NotImplemented:
return other
return other.__truediv__(self, context=context)
__div__ = __truediv__
__rdiv__ = __rtruediv__
def __divmod__(self, other, context=None):
"""
Return (self // other, self % other)
"""
other = _convert_other(other)
if other is NotImplemented:
return other
if context is None:
context = getcontext()
ans = self._check_nans(other, context)
if ans:
return (ans, ans)
sign = self._sign ^ other._sign
if self._isinfinity():
if other._isinfinity():
ans = context._raise_error(InvalidOperation, 'divmod(INF, INF)')
return ans, ans
else:
return (_SignedInfinity[sign],
context._raise_error(InvalidOperation, 'INF % x'))
if not other:
if not self:
ans = context._raise_error(DivisionUndefined, 'divmod(0, 0)')
return ans, ans
else:
return (context._raise_error(DivisionByZero, 'x // 0', sign),
context._raise_error(InvalidOperation, 'x % 0'))
quotient, remainder = self._divide(other, context)
remainder = remainder._fix(context)
return quotient, remainder
def __rdivmod__(self, other, context=None):
"""Swaps self/other and returns __divmod__."""
other = _convert_other(other)
if other is NotImplemented:
return other
return other.__divmod__(self, context=context)
def __mod__(self, other, context=None):
"""
self % other
"""
other = _convert_other(other)
if other is NotImplemented:
return other
if context is None:
context = getcontext()
ans = self._check_nans(other, context)
if ans:
return ans
if self._isinfinity():
return context._raise_error(InvalidOperation, 'INF % x')
elif not other:
if self:
return context._raise_error(InvalidOperation, 'x % 0')
else:
return context._raise_error(DivisionUndefined, '0 % 0')
remainder = self._divide(other, context)[1]
remainder = remainder._fix(context)
return remainder
def __rmod__(self, other, context=None):
"""Swaps self/other and returns __mod__."""
other = _convert_other(other)
if other is NotImplemented:
return other
return other.__mod__(self, context=context)
def remainder_near(self, other, context=None):
"""
Remainder nearest to 0- abs(remainder-near) <= other/2
"""
if context is None:
context = getcontext()
other = _convert_other(other, raiseit=True)
ans = self._check_nans(other, context)
if ans:
return ans
# self == +/-infinity -> InvalidOperation
if self._isinfinity():
return context._raise_error(InvalidOperation,
'remainder_near(infinity, x)')
# other == 0 -> either InvalidOperation or DivisionUndefined
if not other:
if self:
return context._raise_error(InvalidOperation,
'remainder_near(x, 0)')
else:
return context._raise_error(DivisionUndefined,
'remainder_near(0, 0)')
# other = +/-infinity -> remainder = self
if other._isinfinity():
ans = Decimal(self)
return ans._fix(context)
# self = 0 -> remainder = self, with ideal exponent
ideal_exponent = min(self._exp, other._exp)
if not self:
ans = _dec_from_triple(self._sign, '0', ideal_exponent)
return ans._fix(context)
# catch most cases of large or small quotient
expdiff = self.adjusted() - other.adjusted()
if expdiff >= context.prec + 1:
# expdiff >= prec+1 => abs(self/other) > 10**prec
return context._raise_error(DivisionImpossible)
if expdiff <= -2:
# expdiff <= -2 => abs(self/other) < 0.1
ans = self._rescale(ideal_exponent, context.rounding)
return ans._fix(context)
# adjust both arguments to have the same exponent, then divide
op1 = _WorkRep(self)
op2 = _WorkRep(other)
if op1.exp >= op2.exp:
op1.int *= 10**(op1.exp - op2.exp)
else:
op2.int *= 10**(op2.exp - op1.exp)
q, r = divmod(op1.int, op2.int)
# remainder is r*10**ideal_exponent; other is +/-op2.int *
# 10**ideal_exponent. Apply correction to ensure that
# abs(remainder) <= abs(other)/2
if 2*r + (q&1) > op2.int:
r -= op2.int
q += 1
if q >= 10**context.prec:
return context._raise_error(DivisionImpossible)
# result has same sign as self unless r is negative
sign = self._sign
if r < 0:
sign = 1-sign
r = -r
ans = _dec_from_triple(sign, str(r), ideal_exponent)
return ans._fix(context)
def __floordiv__(self, other, context=None):
"""self // other"""
other = _convert_other(other)
if other is NotImplemented:
return other
if context is None:
context = getcontext()
ans = self._check_nans(other, context)
if ans:
return ans
if self._isinfinity():
if other._isinfinity():
return context._raise_error(InvalidOperation, 'INF // INF')
else:
return _SignedInfinity[self._sign ^ other._sign]
if not other:
if self:
return context._raise_error(DivisionByZero, 'x // 0',
self._sign ^ other._sign)
else:
return context._raise_error(DivisionUndefined, '0 // 0')
return self._divide(other, context)[0]
def __rfloordiv__(self, other, context=None):
"""Swaps self/other and returns __floordiv__."""
other = _convert_other(other)
if other is NotImplemented:
return other
return other.__floordiv__(self, context=context)
def __float__(self):
"""Float representation."""
return float(str(self))
def __int__(self):
"""Converts self to an int, truncating if necessary."""
if self._is_special:
if self._isnan():
raise ValueError("Cannot convert NaN to integer")
elif self._isinfinity():
raise OverflowError("Cannot convert infinity to integer")
s = (-1)**self._sign
if self._exp >= 0:
return s*int(self._int)*10**self._exp
else:
return s*int(self._int[:self._exp] or '0')
__trunc__ = __int__
def real(self):
return self
real = property(real)
def imag(self):
return Decimal(0)
imag = property(imag)
def conjugate(self):
return self
def __complex__(self):
return complex(float(self))
def __long__(self):
"""Converts to a long.
Equivalent to long(int(self))
"""
return long(self.__int__())
def _fix_nan(self, context):
"""Decapitate the payload of a NaN to fit the context"""
payload = self._int
# maximum length of payload is precision if _clamp=0,
# precision-1 if _clamp=1.
max_payload_len = context.prec - context._clamp
if len(payload) > max_payload_len:
payload = payload[len(payload)-max_payload_len:].lstrip('0')
return _dec_from_triple(self._sign, payload, self._exp, True)
return Decimal(self)
def _fix(self, context):
"""Round if it is necessary to keep self within prec precision.
Rounds and fixes the exponent. Does not raise on a sNaN.
Arguments:
self - Decimal instance
context - context used.
"""
if self._is_special:
if self._isnan():
# decapitate payload if necessary
return self._fix_nan(context)
else:
# self is +/-Infinity; return unaltered
return Decimal(self)
# if self is zero then exponent should be between Etiny and
# Emax if _clamp==0, and between Etiny and Etop if _clamp==1.
Etiny = context.Etiny()
Etop = context.Etop()
if not self:
exp_max = [context.Emax, Etop][context._clamp]
new_exp = min(max(self._exp, Etiny), exp_max)
if new_exp != self._exp:
context._raise_error(Clamped)
return _dec_from_triple(self._sign, '0', new_exp)
else:
return Decimal(self)
# exp_min is the smallest allowable exponent of the result,
# equal to max(self.adjusted()-context.prec+1, Etiny)
exp_min = len(self._int) + self._exp - context.prec
if exp_min > Etop:
# overflow: exp_min > Etop iff self.adjusted() > Emax
ans = context._raise_error(Overflow, 'above Emax', self._sign)
context._raise_error(Inexact)
context._raise_error(Rounded)
return ans
self_is_subnormal = exp_min < Etiny
if self_is_subnormal:
exp_min = Etiny
# round if self has too many digits
if self._exp < exp_min:
digits = len(self._int) + self._exp - exp_min
if digits < 0:
self = _dec_from_triple(self._sign, '1', exp_min-1)
digits = 0
rounding_method = self._pick_rounding_function[context.rounding]
changed = rounding_method(self, digits)
coeff = self._int[:digits] or '0'
if changed > 0:
coeff = str(int(coeff)+1)
if len(coeff) > context.prec:
coeff = coeff[:-1]
exp_min += 1
# check whether the rounding pushed the exponent out of range
if exp_min > Etop:
ans = context._raise_error(Overflow, 'above Emax', self._sign)
else:
ans = _dec_from_triple(self._sign, coeff, exp_min)
# raise the appropriate signals, taking care to respect
# the precedence described in the specification
if changed and self_is_subnormal:
context._raise_error(Underflow)
if self_is_subnormal:
context._raise_error(Subnormal)
if changed:
context._raise_error(Inexact)
context._raise_error(Rounded)
if not ans:
# raise Clamped on underflow to 0
context._raise_error(Clamped)
return ans
if self_is_subnormal:
context._raise_error(Subnormal)
# fold down if _clamp == 1 and self has too few digits
if context._clamp == 1 and self._exp > Etop:
context._raise_error(Clamped)
self_padded = self._int + '0'*(self._exp - Etop)
return _dec_from_triple(self._sign, self_padded, Etop)
# here self was representable to begin with; return unchanged
return Decimal(self)
# for each of the rounding functions below:
# self is a finite, nonzero Decimal
# prec is an integer satisfying 0 <= prec < len(self._int)
#
# each function returns either -1, 0, or 1, as follows:
# 1 indicates that self should be rounded up (away from zero)
# 0 indicates that self should be truncated, and that all the
# digits to be truncated are zeros (so the value is unchanged)
# -1 indicates that there are nonzero digits to be truncated
def _round_down(self, prec):
"""Also known as round-towards-0, truncate."""
if _all_zeros(self._int, prec):
return 0
else:
return -1
def _round_up(self, prec):
"""Rounds away from 0."""
return -self._round_down(prec)
def _round_half_up(self, prec):
"""Rounds 5 up (away from 0)"""
if self._int[prec] in '56789':
return 1
elif _all_zeros(self._int, prec):
return 0
else:
return -1
def _round_half_down(self, prec):
"""Round 5 down"""
if _exact_half(self._int, prec):
return -1
else:
return self._round_half_up(prec)
def _round_half_even(self, prec):
"""Round 5 to even, rest to nearest."""
if _exact_half(self._int, prec) and \
(prec == 0 or self._int[prec-1] in '02468'):
return -1
else:
return self._round_half_up(prec)
def _round_ceiling(self, prec):
"""Rounds up (not away from 0 if negative.)"""
if self._sign:
return self._round_down(prec)
else:
return -self._round_down(prec)
def _round_floor(self, prec):
"""Rounds down (not towards 0 if negative)"""
if not self._sign:
return self._round_down(prec)
else:
return -self._round_down(prec)
def _round_05up(self, prec):
"""Round down unless digit prec-1 is 0 or 5."""
if prec and self._int[prec-1] not in '05':
return self._round_down(prec)
else:
return -self._round_down(prec)
_pick_rounding_function = dict(
ROUND_DOWN = _round_down,
ROUND_UP = _round_up,
ROUND_HALF_UP = _round_half_up,
ROUND_HALF_DOWN = _round_half_down,
ROUND_HALF_EVEN = _round_half_even,
ROUND_CEILING = _round_ceiling,
ROUND_FLOOR = _round_floor,
ROUND_05UP = _round_05up,
)
def fma(self, other, third, context=None):
"""Fused multiply-add.
Returns self*other+third with no rounding of the intermediate
product self*other.
self and other are multiplied together, with no rounding of
the result. The third operand is then added to the result,
and a single final rounding is performed.
"""
other = _convert_other(other, raiseit=True)
# compute product; raise InvalidOperation if either operand is
# a signaling NaN or if the product is zero times infinity.
if self._is_special or other._is_special:
if context is None:
context = getcontext()
if self._exp == 'N':
return context._raise_error(InvalidOperation, 'sNaN', self)
if other._exp == 'N':
return context._raise_error(InvalidOperation, 'sNaN', other)
if self._exp == 'n':
product = self
elif other._exp == 'n':
product = other
elif self._exp == 'F':
if not other:
return context._raise_error(InvalidOperation,
'INF * 0 in fma')
product = _SignedInfinity[self._sign ^ other._sign]
elif other._exp == 'F':
if not self:
return context._raise_error(InvalidOperation,
'0 * INF in fma')
product = _SignedInfinity[self._sign ^ other._sign]
else:
product = _dec_from_triple(self._sign ^ other._sign,
str(int(self._int) * int(other._int)),
self._exp + other._exp)
third = _convert_other(third, raiseit=True)
return product.__add__(third, context)
def _power_modulo(self, other, modulo, context=None):
"""Three argument version of __pow__"""
# if can't convert other and modulo to Decimal, raise
# TypeError; there's no point returning NotImplemented (no
# equivalent of __rpow__ for three argument pow)
other = _convert_other(other, raiseit=True)
modulo = _convert_other(modulo, raiseit=True)
if context is None:
context = getcontext()
# deal with NaNs: if there are any sNaNs then first one wins,
# (i.e. behaviour for NaNs is identical to that of fma)
self_is_nan = self._isnan()
other_is_nan = other._isnan()
modulo_is_nan = modulo._isnan()
if self_is_nan or other_is_nan or modulo_is_nan:
if self_is_nan == 2:
return context._raise_error(InvalidOperation, 'sNaN',
self)
if other_is_nan == 2:
return context._raise_error(InvalidOperation, 'sNaN',
other)
if modulo_is_nan == 2:
return context._raise_error(InvalidOperation, 'sNaN',
modulo)
if self_is_nan:
return self._fix_nan(context)
if other_is_nan:
return other._fix_nan(context)
return modulo._fix_nan(context)
# check inputs: we apply same restrictions as Python's pow()
if not (self._isinteger() and
other._isinteger() and
modulo._isinteger()):
return context._raise_error(InvalidOperation,
'pow() 3rd argument not allowed '
'unless all arguments are integers')
if other < 0:
return context._raise_error(InvalidOperation,
'pow() 2nd argument cannot be '
'negative when 3rd argument specified')
if not modulo:
return context._raise_error(InvalidOperation,
'pow() 3rd argument cannot be 0')
# additional restriction for decimal: the modulus must be less
# than 10**prec in absolute value
if modulo.adjusted() >= context.prec:
return context._raise_error(InvalidOperation,
'insufficient precision: pow() 3rd '
'argument must not have more than '
'precision digits')
# define 0**0 == NaN, for consistency with two-argument pow
# (even though it hurts!)
if not other and not self:
return context._raise_error(InvalidOperation,
'at least one of pow() 1st argument '
'and 2nd argument must be nonzero ;'
'0**0 is not defined')
# compute sign of result
if other._iseven():
sign = 0
else:
sign = self._sign
# convert modulo to a Python integer, and self and other to
# Decimal integers (i.e. force their exponents to be >= 0)
modulo = abs(int(modulo))
base = _WorkRep(self.to_integral_value())
exponent = _WorkRep(other.to_integral_value())
# compute result using integer pow()
base = (base.int % modulo * pow(10, base.exp, modulo)) % modulo
for i in xrange(exponent.exp):
base = pow(base, 10, modulo)
base = pow(base, exponent.int, modulo)
return _dec_from_triple(sign, str(base), 0)
def _power_exact(self, other, p):
"""Attempt to compute self**other exactly.
Given Decimals self and other and an integer p, attempt to
compute an exact result for the power self**other, with p
digits of precision. Return None if self**other is not
exactly representable in p digits.
Assumes that elimination of special cases has already been
performed: self and other must both be nonspecial; self must
be positive and not numerically equal to 1; other must be
nonzero. For efficiency, other._exp should not be too large,
so that 10**abs(other._exp) is a feasible calculation."""
# In the comments below, we write x for the value of self and y for the
# value of other. Write x = xc*10**xe and abs(y) = yc*10**ye, with xc
# and yc positive integers not divisible by 10.
# The main purpose of this method is to identify the *failure*
# of x**y to be exactly representable with as little effort as
# possible. So we look for cheap and easy tests that
# eliminate the possibility of x**y being exact. Only if all
# these tests are passed do we go on to actually compute x**y.
# Here's the main idea. Express y as a rational number m/n, with m and
# n relatively prime and n>0. Then for x**y to be exactly
# representable (at *any* precision), xc must be the nth power of a
# positive integer and xe must be divisible by n. If y is negative
# then additionally xc must be a power of either 2 or 5, hence a power
# of 2**n or 5**n.
#
# There's a limit to how small |y| can be: if y=m/n as above
# then:
#
# (1) if xc != 1 then for the result to be representable we
# need xc**(1/n) >= 2, and hence also xc**|y| >= 2. So
# if |y| <= 1/nbits(xc) then xc < 2**nbits(xc) <=
# 2**(1/|y|), hence xc**|y| < 2 and the result is not
# representable.
#
# (2) if xe != 0, |xe|*(1/n) >= 1, so |xe|*|y| >= 1. Hence if
# |y| < 1/|xe| then the result is not representable.
#
# Note that since x is not equal to 1, at least one of (1) and
# (2) must apply. Now |y| < 1/nbits(xc) iff |yc|*nbits(xc) <
# 10**-ye iff len(str(|yc|*nbits(xc)) <= -ye.
#
# There's also a limit to how large y can be, at least if it's
# positive: the normalized result will have coefficient xc**y,
# so if it's representable then xc**y < 10**p, and y <
# p/log10(xc). Hence if y*log10(xc) >= p then the result is
# not exactly representable.
# if len(str(abs(yc*xe)) <= -ye then abs(yc*xe) < 10**-ye,
# so |y| < 1/xe and the result is not representable.
# Similarly, len(str(abs(yc)*xc_bits)) <= -ye implies |y|
# < 1/nbits(xc).
x = _WorkRep(self)
xc, xe = x.int, x.exp
while xc % 10 == 0:
xc //= 10
xe += 1
y = _WorkRep(other)
yc, ye = y.int, y.exp
while yc % 10 == 0:
yc //= 10
ye += 1
# case where xc == 1: result is 10**(xe*y), with xe*y
# required to be an integer
if xc == 1:
xe *= yc
# result is now 10**(xe * 10**ye); xe * 10**ye must be integral
while xe % 10 == 0:
xe //= 10
ye += 1
if ye < 0:
return None
exponent = xe * 10**ye
if y.sign == 1:
exponent = -exponent
# if other is a nonnegative integer, use ideal exponent
if other._isinteger() and other._sign == 0:
ideal_exponent = self._exp*int(other)
zeros = min(exponent-ideal_exponent, p-1)
else:
zeros = 0
return _dec_from_triple(0, '1' + '0'*zeros, exponent-zeros)
# case where y is negative: xc must be either a power
# of 2 or a power of 5.
if y.sign == 1:
last_digit = xc % 10
if last_digit in (2,4,6,8):
# quick test for power of 2
if xc & -xc != xc:
return None
# now xc is a power of 2; e is its exponent
e = _nbits(xc)-1
# We now have:
#
# x = 2**e * 10**xe, e > 0, and y < 0.
#
# The exact result is:
#
# x**y = 5**(-e*y) * 10**(e*y + xe*y)
#
# provided that both e*y and xe*y are integers. Note that if
# 5**(-e*y) >= 10**p, then the result can't be expressed
# exactly with p digits of precision.
#
# Using the above, we can guard against large values of ye.
# 93/65 is an upper bound for log(10)/log(5), so if
#
# ye >= len(str(93*p//65))
#
# then
#
# -e*y >= -y >= 10**ye > 93*p/65 > p*log(10)/log(5),
#
# so 5**(-e*y) >= 10**p, and the coefficient of the result
# can't be expressed in p digits.
# emax >= largest e such that 5**e < 10**p.
emax = p*93//65
if ye >= len(str(emax)):
return None
# Find -e*y and -xe*y; both must be integers
e = _decimal_lshift_exact(e * yc, ye)
xe = _decimal_lshift_exact(xe * yc, ye)
if e is None or xe is None:
return None
if e > emax:
return None
xc = 5**e
elif last_digit == 5:
# e >= log_5(xc) if xc is a power of 5; we have
# equality all the way up to xc=5**2658
e = _nbits(xc)*28//65
xc, remainder = divmod(5**e, xc)
if remainder:
return None
while xc % 5 == 0:
xc //= 5
e -= 1
# Guard against large values of ye, using the same logic as in
# the 'xc is a power of 2' branch. 10/3 is an upper bound for
# log(10)/log(2).
emax = p*10//3
if ye >= len(str(emax)):
return None
e = _decimal_lshift_exact(e * yc, ye)
xe = _decimal_lshift_exact(xe * yc, ye)
if e is None or xe is None:
return None
if e > emax:
return None
xc = 2**e
else:
return None
if xc >= 10**p:
return None
xe = -e-xe
return _dec_from_triple(0, str(xc), xe)
# now y is positive; find m and n such that y = m/n
if ye >= 0:
m, n = yc*10**ye, 1
else:
if xe != 0 and len(str(abs(yc*xe))) <= -ye:
return None
xc_bits = _nbits(xc)
if xc != 1 and len(str(abs(yc)*xc_bits)) <= -ye:
return None
m, n = yc, 10**(-ye)
while m % 2 == n % 2 == 0:
m //= 2
n //= 2
while m % 5 == n % 5 == 0:
m //= 5
n //= 5
# compute nth root of xc*10**xe
if n > 1:
# if 1 < xc < 2**n then xc isn't an nth power
if xc != 1 and xc_bits <= n:
return None
xe, rem = divmod(xe, n)
if rem != 0:
return None
# compute nth root of xc using Newton's method
a = 1L << -(-_nbits(xc)//n) # initial estimate
while True:
q, r = divmod(xc, a**(n-1))
if a <= q:
break
else:
a = (a*(n-1) + q)//n
if not (a == q and r == 0):
return None
xc = a
# now xc*10**xe is the nth root of the original xc*10**xe
# compute mth power of xc*10**xe
# if m > p*100//_log10_lb(xc) then m > p/log10(xc), hence xc**m >
# 10**p and the result is not representable.
if xc > 1 and m > p*100//_log10_lb(xc):
return None
xc = xc**m
xe *= m
if xc > 10**p:
return None
# by this point the result *is* exactly representable
# adjust the exponent to get as close as possible to the ideal
# exponent, if necessary
str_xc = str(xc)
if other._isinteger() and other._sign == 0:
ideal_exponent = self._exp*int(other)
zeros = min(xe-ideal_exponent, p-len(str_xc))
else:
zeros = 0
return _dec_from_triple(0, str_xc+'0'*zeros, xe-zeros)
def __pow__(self, other, modulo=None, context=None):
"""Return self ** other [ % modulo].
With two arguments, compute self**other.
With three arguments, compute (self**other) % modulo. For the
three argument form, the following restrictions on the
arguments hold:
- all three arguments must be integral
- other must be nonnegative
- either self or other (or both) must be nonzero
- modulo must be nonzero and must have at most p digits,
where p is the context precision.
If any of these restrictions is violated the InvalidOperation
flag is raised.
The result of pow(self, other, modulo) is identical to the
result that would be obtained by computing (self**other) %
modulo with unbounded precision, but is computed more
efficiently. It is always exact.
"""
if modulo is not None:
return self._power_modulo(other, modulo, context)
other = _convert_other(other)
if other is NotImplemented:
return other
if context is None:
context = getcontext()
# either argument is a NaN => result is NaN
ans = self._check_nans(other, context)
if ans:
return ans
# 0**0 = NaN (!), x**0 = 1 for nonzero x (including +/-Infinity)
if not other:
if not self:
return context._raise_error(InvalidOperation, '0 ** 0')
else:
return _One
# result has sign 1 iff self._sign is 1 and other is an odd integer
result_sign = 0
if self._sign == 1:
if other._isinteger():
if not other._iseven():
result_sign = 1
else:
# -ve**noninteger = NaN
# (-0)**noninteger = 0**noninteger
if self:
return context._raise_error(InvalidOperation,
'x ** y with x negative and y not an integer')
# negate self, without doing any unwanted rounding
self = self.copy_negate()
# 0**(+ve or Inf)= 0; 0**(-ve or -Inf) = Infinity
if not self:
if other._sign == 0:
return _dec_from_triple(result_sign, '0', 0)
else:
return _SignedInfinity[result_sign]
# Inf**(+ve or Inf) = Inf; Inf**(-ve or -Inf) = 0
if self._isinfinity():
if other._sign == 0:
return _SignedInfinity[result_sign]
else:
return _dec_from_triple(result_sign, '0', 0)
# 1**other = 1, but the choice of exponent and the flags
# depend on the exponent of self, and on whether other is a
# positive integer, a negative integer, or neither
if self == _One:
if other._isinteger():
# exp = max(self._exp*max(int(other), 0),
# 1-context.prec) but evaluating int(other) directly
# is dangerous until we know other is small (other
# could be 1e999999999)
if other._sign == 1:
multiplier = 0
elif other > context.prec:
multiplier = context.prec
else:
multiplier = int(other)
exp = self._exp * multiplier
if exp < 1-context.prec:
exp = 1-context.prec
context._raise_error(Rounded)
else:
context._raise_error(Inexact)
context._raise_error(Rounded)
exp = 1-context.prec
return _dec_from_triple(result_sign, '1'+'0'*-exp, exp)
# compute adjusted exponent of self
self_adj = self.adjusted()
# self ** infinity is infinity if self > 1, 0 if self < 1
# self ** -infinity is infinity if self < 1, 0 if self > 1
if other._isinfinity():
if (other._sign == 0) == (self_adj < 0):
return _dec_from_triple(result_sign, '0', 0)
else:
return _SignedInfinity[result_sign]
# from here on, the result always goes through the call
# to _fix at the end of this function.
ans = None
exact = False
# crude test to catch cases of extreme overflow/underflow. If
# log10(self)*other >= 10**bound and bound >= len(str(Emax))
# then 10**bound >= 10**len(str(Emax)) >= Emax+1 and hence
# self**other >= 10**(Emax+1), so overflow occurs. The test
# for underflow is similar.
bound = self._log10_exp_bound() + other.adjusted()
if (self_adj >= 0) == (other._sign == 0):
# self > 1 and other +ve, or self < 1 and other -ve
# possibility of overflow
if bound >= len(str(context.Emax)):
ans = _dec_from_triple(result_sign, '1', context.Emax+1)
else:
# self > 1 and other -ve, or self < 1 and other +ve
# possibility of underflow to 0
Etiny = context.Etiny()
if bound >= len(str(-Etiny)):
ans = _dec_from_triple(result_sign, '1', Etiny-1)
# try for an exact result with precision +1
if ans is None:
ans = self._power_exact(other, context.prec + 1)
if ans is not None:
if result_sign == 1:
ans = _dec_from_triple(1, ans._int, ans._exp)
exact = True
# usual case: inexact result, x**y computed directly as exp(y*log(x))
if ans is None:
p = context.prec
x = _WorkRep(self)
xc, xe = x.int, x.exp
y = _WorkRep(other)
yc, ye = y.int, y.exp
if y.sign == 1:
yc = -yc
# compute correctly rounded result: start with precision +3,
# then increase precision until result is unambiguously roundable
extra = 3
while True:
coeff, exp = _dpower(xc, xe, yc, ye, p+extra)
if coeff % (5*10**(len(str(coeff))-p-1)):
break
extra += 3
ans = _dec_from_triple(result_sign, str(coeff), exp)
# unlike exp, ln and log10, the power function respects the
# rounding mode; no need to switch to ROUND_HALF_EVEN here
# There's a difficulty here when 'other' is not an integer and
# the result is exact. In this case, the specification
# requires that the Inexact flag be raised (in spite of
# exactness), but since the result is exact _fix won't do this
# for us. (Correspondingly, the Underflow signal should also
# be raised for subnormal results.) We can't directly raise
# these signals either before or after calling _fix, since
# that would violate the precedence for signals. So we wrap
# the ._fix call in a temporary context, and reraise
# afterwards.
if exact and not other._isinteger():
# pad with zeros up to length context.prec+1 if necessary; this
# ensures that the Rounded signal will be raised.
if len(ans._int) <= context.prec:
expdiff = context.prec + 1 - len(ans._int)
ans = _dec_from_triple(ans._sign, ans._int+'0'*expdiff,
ans._exp-expdiff)
# create a copy of the current context, with cleared flags/traps
newcontext = context.copy()
newcontext.clear_flags()
for exception in _signals:
newcontext.traps[exception] = 0
# round in the new context
ans = ans._fix(newcontext)
# raise Inexact, and if necessary, Underflow
newcontext._raise_error(Inexact)
if newcontext.flags[Subnormal]:
newcontext._raise_error(Underflow)
# propagate signals to the original context; _fix could
# have raised any of Overflow, Underflow, Subnormal,
# Inexact, Rounded, Clamped. Overflow needs the correct
# arguments. Note that the order of the exceptions is
# important here.
if newcontext.flags[Overflow]:
context._raise_error(Overflow, 'above Emax', ans._sign)
for exception in Underflow, Subnormal, Inexact, Rounded, Clamped:
if newcontext.flags[exception]:
context._raise_error(exception)
else:
ans = ans._fix(context)
return ans
def __rpow__(self, other, context=None):
"""Swaps self/other and returns __pow__."""
other = _convert_other(other)
if other is NotImplemented:
return other
return other.__pow__(self, context=context)
def normalize(self, context=None):
"""Normalize- strip trailing 0s, change anything equal to 0 to 0e0"""
if context is None:
context = getcontext()
if self._is_special:
ans = self._check_nans(context=context)
if ans:
return ans
dup = self._fix(context)
if dup._isinfinity():
return dup
if not dup:
return _dec_from_triple(dup._sign, '0', 0)
exp_max = [context.Emax, context.Etop()][context._clamp]
end = len(dup._int)
exp = dup._exp
while dup._int[end-1] == '0' and exp < exp_max:
exp += 1
end -= 1
return _dec_from_triple(dup._sign, dup._int[:end], exp)
def quantize(self, exp, rounding=None, context=None, watchexp=True):
"""Quantize self so its exponent is the same as that of exp.
Similar to self._rescale(exp._exp) but with error checking.
"""
exp = _convert_other(exp, raiseit=True)
if context is None:
context = getcontext()
if rounding is None:
rounding = context.rounding
if self._is_special or exp._is_special:
ans = self._check_nans(exp, context)
if ans:
return ans
if exp._isinfinity() or self._isinfinity():
if exp._isinfinity() and self._isinfinity():
return Decimal(self) # if both are inf, it is OK
return context._raise_error(InvalidOperation,
'quantize with one INF')
# if we're not watching exponents, do a simple rescale
if not watchexp:
ans = self._rescale(exp._exp, rounding)
# raise Inexact and Rounded where appropriate
if ans._exp > self._exp:
context._raise_error(Rounded)
if ans != self:
context._raise_error(Inexact)
return ans
# exp._exp should be between Etiny and Emax
if not (context.Etiny() <= exp._exp <= context.Emax):
return context._raise_error(InvalidOperation,
'target exponent out of bounds in quantize')
if not self:
ans = _dec_from_triple(self._sign, '0', exp._exp)
return ans._fix(context)
self_adjusted = self.adjusted()
if self_adjusted > context.Emax:
return context._raise_error(InvalidOperation,
'exponent of quantize result too large for current context')
if self_adjusted - exp._exp + 1 > context.prec:
return context._raise_error(InvalidOperation,
'quantize result has too many digits for current context')
ans = self._rescale(exp._exp, rounding)
if ans.adjusted() > context.Emax:
return context._raise_error(InvalidOperation,
'exponent of quantize result too large for current context')
if len(ans._int) > context.prec:
return context._raise_error(InvalidOperation,
'quantize result has too many digits for current context')
# raise appropriate flags
if ans and ans.adjusted() < context.Emin:
context._raise_error(Subnormal)
if ans._exp > self._exp:
if ans != self:
context._raise_error(Inexact)
context._raise_error(Rounded)
# call to fix takes care of any necessary folddown, and
# signals Clamped if necessary
ans = ans._fix(context)
return ans
def same_quantum(self, other):
"""Return True if self and other have the same exponent; otherwise
return False.
If either operand is a special value, the following rules are used:
* return True if both operands are infinities
* return True if both operands are NaNs
* otherwise, return False.
"""
other = _convert_other(other, raiseit=True)
if self._is_special or other._is_special:
return (self.is_nan() and other.is_nan() or
self.is_infinite() and other.is_infinite())
return self._exp == other._exp
def _rescale(self, exp, rounding):
"""Rescale self so that the exponent is exp, either by padding with zeros
or by truncating digits, using the given rounding mode.
Specials are returned without change. This operation is
quiet: it raises no flags, and uses no information from the
context.
exp = exp to scale to (an integer)
rounding = rounding mode
"""
if self._is_special:
return Decimal(self)
if not self:
return _dec_from_triple(self._sign, '0', exp)
if self._exp >= exp:
# pad answer with zeros if necessary
return _dec_from_triple(self._sign,
self._int + '0'*(self._exp - exp), exp)
# too many digits; round and lose data. If self.adjusted() <
# exp-1, replace self by 10**(exp-1) before rounding
digits = len(self._int) + self._exp - exp
if digits < 0:
self = _dec_from_triple(self._sign, '1', exp-1)
digits = 0
this_function = self._pick_rounding_function[rounding]
changed = this_function(self, digits)
coeff = self._int[:digits] or '0'
if changed == 1:
coeff = str(int(coeff)+1)
return _dec_from_triple(self._sign, coeff, exp)
def _round(self, places, rounding):
"""Round a nonzero, nonspecial Decimal to a fixed number of
significant figures, using the given rounding mode.
Infinities, NaNs and zeros are returned unaltered.
This operation is quiet: it raises no flags, and uses no
information from the context.
"""
if places <= 0:
raise ValueError("argument should be at least 1 in _round")
if self._is_special or not self:
return Decimal(self)
ans = self._rescale(self.adjusted()+1-places, rounding)
# it can happen that the rescale alters the adjusted exponent;
# for example when rounding 99.97 to 3 significant figures.
# When this happens we end up with an extra 0 at the end of
# the number; a second rescale fixes this.
if ans.adjusted() != self.adjusted():
ans = ans._rescale(ans.adjusted()+1-places, rounding)
return ans
def to_integral_exact(self, rounding=None, context=None):
"""Rounds to a nearby integer.
If no rounding mode is specified, take the rounding mode from
the context. This method raises the Rounded and Inexact flags
when appropriate.
See also: to_integral_value, which does exactly the same as
this method except that it doesn't raise Inexact or Rounded.
"""
if self._is_special:
ans = self._check_nans(context=context)
if ans:
return ans
return Decimal(self)
if self._exp >= 0:
return Decimal(self)
if not self:
return _dec_from_triple(self._sign, '0', 0)
if context is None:
context = getcontext()
if rounding is None:
rounding = context.rounding
ans = self._rescale(0, rounding)
if ans != self:
context._raise_error(Inexact)
context._raise_error(Rounded)
return ans
def to_integral_value(self, rounding=None, context=None):
"""Rounds to the nearest integer, without raising inexact, rounded."""
if context is None:
context = getcontext()
if rounding is None:
rounding = context.rounding
if self._is_special:
ans = self._check_nans(context=context)
if ans:
return ans
return Decimal(self)
if self._exp >= 0:
return Decimal(self)
else:
return self._rescale(0, rounding)
# the method name changed, but we provide also the old one, for compatibility
to_integral = to_integral_value
def sqrt(self, context=None):
"""Return the square root of self."""
if context is None:
context = getcontext()
if self._is_special:
ans = self._check_nans(context=context)
if ans:
return ans
if self._isinfinity() and self._sign == 0:
return Decimal(self)
if not self:
# exponent = self._exp // 2. sqrt(-0) = -0
ans = _dec_from_triple(self._sign, '0', self._exp // 2)
return ans._fix(context)
if self._sign == 1:
return context._raise_error(InvalidOperation, 'sqrt(-x), x > 0')
# At this point self represents a positive number. Let p be
# the desired precision and express self in the form c*100**e
# with c a positive real number and e an integer, c and e
# being chosen so that 100**(p-1) <= c < 100**p. Then the
# (exact) square root of self is sqrt(c)*10**e, and 10**(p-1)
# <= sqrt(c) < 10**p, so the closest representable Decimal at
# precision p is n*10**e where n = round_half_even(sqrt(c)),
# the closest integer to sqrt(c) with the even integer chosen
# in the case of a tie.
#
# To ensure correct rounding in all cases, we use the
# following trick: we compute the square root to an extra
# place (precision p+1 instead of precision p), rounding down.
# Then, if the result is inexact and its last digit is 0 or 5,
# we increase the last digit to 1 or 6 respectively; if it's
# exact we leave the last digit alone. Now the final round to
# p places (or fewer in the case of underflow) will round
# correctly and raise the appropriate flags.
# use an extra digit of precision
prec = context.prec+1
# write argument in the form c*100**e where e = self._exp//2
# is the 'ideal' exponent, to be used if the square root is
# exactly representable. l is the number of 'digits' of c in
# base 100, so that 100**(l-1) <= c < 100**l.
op = _WorkRep(self)
e = op.exp >> 1
if op.exp & 1:
c = op.int * 10
l = (len(self._int) >> 1) + 1
else:
c = op.int
l = len(self._int)+1 >> 1
# rescale so that c has exactly prec base 100 'digits'
shift = prec-l
if shift >= 0:
c *= 100**shift
exact = True
else:
c, remainder = divmod(c, 100**-shift)
exact = not remainder
e -= shift
# find n = floor(sqrt(c)) using Newton's method
n = 10**prec
while True:
q = c//n
if n <= q:
break
else:
n = n + q >> 1
exact = exact and n*n == c
if exact:
# result is exact; rescale to use ideal exponent e
if shift >= 0:
# assert n % 10**shift == 0
n //= 10**shift
else:
n *= 10**-shift
e += shift
else:
# result is not exact; fix last digit as described above
if n % 5 == 0:
n += 1
ans = _dec_from_triple(0, str(n), e)
# round, and fit to current context
context = context._shallow_copy()
rounding = context._set_rounding(ROUND_HALF_EVEN)
ans = ans._fix(context)
context.rounding = rounding
return ans
def max(self, other, context=None):
"""Returns the larger value.
Like max(self, other) except if one is not a number, returns
NaN (and signals if one is sNaN). Also rounds.
"""
other = _convert_other(other, raiseit=True)
if context is None:
context = getcontext()
if self._is_special or other._is_special:
# If one operand is a quiet NaN and the other is number, then the
# number is always returned
sn = self._isnan()
on = other._isnan()
if sn or on:
if on == 1 and sn == 0:
return self._fix(context)
if sn == 1 and on == 0:
return other._fix(context)
return self._check_nans(other, context)
c = self._cmp(other)
if c == 0:
# If both operands are finite and equal in numerical value
# then an ordering is applied:
#
# If the signs differ then max returns the operand with the
# positive sign and min returns the operand with the negative sign
#
# If the signs are the same then the exponent is used to select
# the result. This is exactly the ordering used in compare_total.
c = self.compare_total(other)
if c == -1:
ans = other
else:
ans = self
return ans._fix(context)
def min(self, other, context=None):
"""Returns the smaller value.
Like min(self, other) except if one is not a number, returns
NaN (and signals if one is sNaN). Also rounds.
"""
other = _convert_other(other, raiseit=True)
if context is None:
context = getcontext()
if self._is_special or other._is_special:
# If one operand is a quiet NaN and the other is number, then the
# number is always returned
sn = self._isnan()
on = other._isnan()
if sn or on:
if on == 1 and sn == 0:
return self._fix(context)
if sn == 1 and on == 0:
return other._fix(context)
return self._check_nans(other, context)
c = self._cmp(other)
if c == 0:
c = self.compare_total(other)
if c == -1:
ans = self
else:
ans = other
return ans._fix(context)
def _isinteger(self):
"""Returns whether self is an integer"""
if self._is_special:
return False
if self._exp >= 0:
return True
rest = self._int[self._exp:]
return rest == '0'*len(rest)
def _iseven(self):
"""Returns True if self is even. Assumes self is an integer."""
if not self or self._exp > 0:
return True
return self._int[-1+self._exp] in '02468'
def adjusted(self):
"""Return the adjusted exponent of self"""
try:
return self._exp + len(self._int) - 1
# If NaN or Infinity, self._exp is string
except TypeError:
return 0
def canonical(self, context=None):
"""Returns the same Decimal object.
As we do not have different encodings for the same number, the
received object already is in its canonical form.
"""
return self
def compare_signal(self, other, context=None):
"""Compares self to the other operand numerically.
It's pretty much like compare(), but all NaNs signal, with signaling
NaNs taking precedence over quiet NaNs.
"""
other = _convert_other(other, raiseit = True)
ans = self._compare_check_nans(other, context)
if ans:
return ans
return self.compare(other, context=context)
def compare_total(self, other):
"""Compares self to other using the abstract representations.
This is not like the standard compare, which use their numerical
value. Note that a total ordering is defined for all possible abstract
representations.
"""
other = _convert_other(other, raiseit=True)
# if one is negative and the other is positive, it's easy
if self._sign and not other._sign:
return _NegativeOne
if not self._sign and other._sign:
return _One
sign = self._sign
# let's handle both NaN types
self_nan = self._isnan()
other_nan = other._isnan()
if self_nan or other_nan:
if self_nan == other_nan:
# compare payloads as though they're integers
self_key = len(self._int), self._int
other_key = len(other._int), other._int
if self_key < other_key:
if sign:
return _One
else:
return _NegativeOne
if self_key > other_key:
if sign:
return _NegativeOne
else:
return _One
return _Zero
if sign:
if self_nan == 1:
return _NegativeOne
if other_nan == 1:
return _One
if self_nan == 2:
return _NegativeOne
if other_nan == 2:
return _One
else:
if self_nan == 1:
return _One
if other_nan == 1:
return _NegativeOne
if self_nan == 2:
return _One
if other_nan == 2:
return _NegativeOne
if self < other:
return _NegativeOne
if self > other:
return _One
if self._exp < other._exp:
if sign:
return _One
else:
return _NegativeOne
if self._exp > other._exp:
if sign:
return _NegativeOne
else:
return _One
return _Zero
def compare_total_mag(self, other):
"""Compares self to other using abstract repr., ignoring sign.
Like compare_total, but with operand's sign ignored and assumed to be 0.
"""
other = _convert_other(other, raiseit=True)
s = self.copy_abs()
o = other.copy_abs()
return s.compare_total(o)
def copy_abs(self):
"""Returns a copy with the sign set to 0. """
return _dec_from_triple(0, self._int, self._exp, self._is_special)
def copy_negate(self):
"""Returns a copy with the sign inverted."""
if self._sign:
return _dec_from_triple(0, self._int, self._exp, self._is_special)
else:
return _dec_from_triple(1, self._int, self._exp, self._is_special)
def copy_sign(self, other):
"""Returns self with the sign of other."""
other = _convert_other(other, raiseit=True)
return _dec_from_triple(other._sign, self._int,
self._exp, self._is_special)
def exp(self, context=None):
"""Returns e ** self."""
if context is None:
context = getcontext()
# exp(NaN) = NaN
ans = self._check_nans(context=context)
if ans:
return ans
# exp(-Infinity) = 0
if self._isinfinity() == -1:
return _Zero
# exp(0) = 1
if not self:
return _One
# exp(Infinity) = Infinity
if self._isinfinity() == 1:
return Decimal(self)
# the result is now guaranteed to be inexact (the true
# mathematical result is transcendental). There's no need to
# raise Rounded and Inexact here---they'll always be raised as
# a result of the call to _fix.
p = context.prec
adj = self.adjusted()
# we only need to do any computation for quite a small range
# of adjusted exponents---for example, -29 <= adj <= 10 for
# the default context. For smaller exponent the result is
# indistinguishable from 1 at the given precision, while for
# larger exponent the result either overflows or underflows.
if self._sign == 0 and adj > len(str((context.Emax+1)*3)):
# overflow
ans = _dec_from_triple(0, '1', context.Emax+1)
elif self._sign == 1 and adj > len(str((-context.Etiny()+1)*3)):
# underflow to 0
ans = _dec_from_triple(0, '1', context.Etiny()-1)
elif self._sign == 0 and adj < -p:
# p+1 digits; final round will raise correct flags
ans = _dec_from_triple(0, '1' + '0'*(p-1) + '1', -p)
elif self._sign == 1 and adj < -p-1:
# p+1 digits; final round will raise correct flags
ans = _dec_from_triple(0, '9'*(p+1), -p-1)
# general case
else:
op = _WorkRep(self)
c, e = op.int, op.exp
if op.sign == 1:
c = -c
# compute correctly rounded result: increase precision by
# 3 digits at a time until we get an unambiguously
# roundable result
extra = 3
while True:
coeff, exp = _dexp(c, e, p+extra)
if coeff % (5*10**(len(str(coeff))-p-1)):
break
extra += 3
ans = _dec_from_triple(0, str(coeff), exp)
# at this stage, ans should round correctly with *any*
# rounding mode, not just with ROUND_HALF_EVEN
context = context._shallow_copy()
rounding = context._set_rounding(ROUND_HALF_EVEN)
ans = ans._fix(context)
context.rounding = rounding
return ans
def is_canonical(self):
"""Return True if self is canonical; otherwise return False.
Currently, the encoding of a Decimal instance is always
canonical, so this method returns True for any Decimal.
"""
return True
def is_finite(self):
"""Return True if self is finite; otherwise return False.
A Decimal instance is considered finite if it is neither
infinite nor a NaN.
"""
return not self._is_special
def is_infinite(self):
"""Return True if self is infinite; otherwise return False."""
return self._exp == 'F'
def is_nan(self):
"""Return True if self is a qNaN or sNaN; otherwise return False."""
return self._exp in ('n', 'N')
def is_normal(self, context=None):
"""Return True if self is a normal number; otherwise return False."""
if self._is_special or not self:
return False
if context is None:
context = getcontext()
return context.Emin <= self.adjusted()
def is_qnan(self):
"""Return True if self is a quiet NaN; otherwise return False."""
return self._exp == 'n'
def is_signed(self):
"""Return True if self is negative; otherwise return False."""
return self._sign == 1
def is_snan(self):
"""Return True if self is a signaling NaN; otherwise return False."""
return self._exp == 'N'
def is_subnormal(self, context=None):
"""Return True if self is subnormal; otherwise return False."""
if self._is_special or not self:
return False
if context is None:
context = getcontext()
return self.adjusted() < context.Emin
def is_zero(self):
"""Return True if self is a zero; otherwise return False."""
return not self._is_special and self._int == '0'
def _ln_exp_bound(self):
"""Compute a lower bound for the adjusted exponent of self.ln().
In other words, compute r such that self.ln() >= 10**r. Assumes
that self is finite and positive and that self != 1.
"""
# for 0.1 <= x <= 10 we use the inequalities 1-1/x <= ln(x) <= x-1
adj = self._exp + len(self._int) - 1
if adj >= 1:
# argument >= 10; we use 23/10 = 2.3 as a lower bound for ln(10)
return len(str(adj*23//10)) - 1
if adj <= -2:
# argument <= 0.1
return len(str((-1-adj)*23//10)) - 1
op = _WorkRep(self)
c, e = op.int, op.exp
if adj == 0:
# 1 < self < 10
num = str(c-10**-e)
den = str(c)
return len(num) - len(den) - (num < den)
# adj == -1, 0.1 <= self < 1
return e + len(str(10**-e - c)) - 1
def ln(self, context=None):
"""Returns the natural (base e) logarithm of self."""
if context is None:
context = getcontext()
# ln(NaN) = NaN
ans = self._check_nans(context=context)
if ans:
return ans
# ln(0.0) == -Infinity
if not self:
return _NegativeInfinity
# ln(Infinity) = Infinity
if self._isinfinity() == 1:
return _Infinity
# ln(1.0) == 0.0
if self == _One:
return _Zero
# ln(negative) raises InvalidOperation
if self._sign == 1:
return context._raise_error(InvalidOperation,
'ln of a negative value')
# result is irrational, so necessarily inexact
op = _WorkRep(self)
c, e = op.int, op.exp
p = context.prec
# correctly rounded result: repeatedly increase precision by 3
# until we get an unambiguously roundable result
places = p - self._ln_exp_bound() + 2 # at least p+3 places
while True:
coeff = _dlog(c, e, places)
# assert len(str(abs(coeff)))-p >= 1
if coeff % (5*10**(len(str(abs(coeff)))-p-1)):
break
places += 3
ans = _dec_from_triple(int(coeff<0), str(abs(coeff)), -places)
context = context._shallow_copy()
rounding = context._set_rounding(ROUND_HALF_EVEN)
ans = ans._fix(context)
context.rounding = rounding
return ans
def _log10_exp_bound(self):
"""Compute a lower bound for the adjusted exponent of self.log10().
In other words, find r such that self.log10() >= 10**r.
Assumes that self is finite and positive and that self != 1.
"""
# For x >= 10 or x < 0.1 we only need a bound on the integer
# part of log10(self), and this comes directly from the
# exponent of x. For 0.1 <= x <= 10 we use the inequalities
# 1-1/x <= log(x) <= x-1. If x > 1 we have |log10(x)| >
# (1-1/x)/2.31 > 0. If x < 1 then |log10(x)| > (1-x)/2.31 > 0
adj = self._exp + len(self._int) - 1
if adj >= 1:
# self >= 10
return len(str(adj))-1
if adj <= -2:
# self < 0.1
return len(str(-1-adj))-1
op = _WorkRep(self)
c, e = op.int, op.exp
if adj == 0:
# 1 < self < 10
num = str(c-10**-e)
den = str(231*c)
return len(num) - len(den) - (num < den) + 2
# adj == -1, 0.1 <= self < 1
num = str(10**-e-c)
return len(num) + e - (num < "231") - 1
def log10(self, context=None):
"""Returns the base 10 logarithm of self."""
if context is None:
context = getcontext()
# log10(NaN) = NaN
ans = self._check_nans(context=context)
if ans:
return ans
# log10(0.0) == -Infinity
if not self:
return _NegativeInfinity
# log10(Infinity) = Infinity
if self._isinfinity() == 1:
return _Infinity
# log10(negative or -Infinity) raises InvalidOperation
if self._sign == 1:
return context._raise_error(InvalidOperation,
'log10 of a negative value')
# log10(10**n) = n
if self._int[0] == '1' and self._int[1:] == '0'*(len(self._int) - 1):
# answer may need rounding
ans = Decimal(self._exp + len(self._int) - 1)
else:
# result is irrational, so necessarily inexact
op = _WorkRep(self)
c, e = op.int, op.exp
p = context.prec
# correctly rounded result: repeatedly increase precision
# until result is unambiguously roundable
places = p-self._log10_exp_bound()+2
while True:
coeff = _dlog10(c, e, places)
# assert len(str(abs(coeff)))-p >= 1
if coeff % (5*10**(len(str(abs(coeff)))-p-1)):
break
places += 3
ans = _dec_from_triple(int(coeff<0), str(abs(coeff)), -places)
context = context._shallow_copy()
rounding = context._set_rounding(ROUND_HALF_EVEN)
ans = ans._fix(context)
context.rounding = rounding
return ans
def logb(self, context=None):
""" Returns the exponent of the magnitude of self's MSD.
The result is the integer which is the exponent of the magnitude
of the most significant digit of self (as though it were truncated
to a single digit while maintaining the value of that digit and
without limiting the resulting exponent).
"""
# logb(NaN) = NaN
ans = self._check_nans(context=context)
if ans:
return ans
if context is None:
context = getcontext()
# logb(+/-Inf) = +Inf
if self._isinfinity():
return _Infinity
# logb(0) = -Inf, DivisionByZero
if not self:
return context._raise_error(DivisionByZero, 'logb(0)', 1)
# otherwise, simply return the adjusted exponent of self, as a
# Decimal. Note that no attempt is made to fit the result
# into the current context.
ans = Decimal(self.adjusted())
return ans._fix(context)
def _islogical(self):
"""Return True if self is a logical operand.
For being logical, it must be a finite number with a sign of 0,
an exponent of 0, and a coefficient whose digits must all be
either 0 or 1.
"""
if self._sign != 0 or self._exp != 0:
return False
for dig in self._int:
if dig not in '01':
return False
return True
def _fill_logical(self, context, opa, opb):
dif = context.prec - len(opa)
if dif > 0:
opa = '0'*dif + opa
elif dif < 0:
opa = opa[-context.prec:]
dif = context.prec - len(opb)
if dif > 0:
opb = '0'*dif + opb
elif dif < 0:
opb = opb[-context.prec:]
return opa, opb
def logical_and(self, other, context=None):
"""Applies an 'and' operation between self and other's digits."""
if context is None:
context = getcontext()
other = _convert_other(other, raiseit=True)
if not self._islogical() or not other._islogical():
return context._raise_error(InvalidOperation)
# fill to context.prec
(opa, opb) = self._fill_logical(context, self._int, other._int)
# make the operation, and clean starting zeroes
result = "".join([str(int(a)&int(b)) for a,b in zip(opa,opb)])
return _dec_from_triple(0, result.lstrip('0') or '0', 0)
def logical_invert(self, context=None):
"""Invert all its digits."""
if context is None:
context = getcontext()
return self.logical_xor(_dec_from_triple(0,'1'*context.prec,0),
context)
def logical_or(self, other, context=None):
"""Applies an 'or' operation between self and other's digits."""
if context is None:
context = getcontext()
other = _convert_other(other, raiseit=True)
if not self._islogical() or not other._islogical():
return context._raise_error(InvalidOperation)
# fill to context.prec
(opa, opb) = self._fill_logical(context, self._int, other._int)
# make the operation, and clean starting zeroes
result = "".join([str(int(a)|int(b)) for a,b in zip(opa,opb)])
return _dec_from_triple(0, result.lstrip('0') or '0', 0)
def logical_xor(self, other, context=None):
"""Applies an 'xor' operation between self and other's digits."""
if context is None:
context = getcontext()
other = _convert_other(other, raiseit=True)
if not self._islogical() or not other._islogical():
return context._raise_error(InvalidOperation)
# fill to context.prec
(opa, opb) = self._fill_logical(context, self._int, other._int)
# make the operation, and clean starting zeroes
result = "".join([str(int(a)^int(b)) for a,b in zip(opa,opb)])
return _dec_from_triple(0, result.lstrip('0') or '0', 0)
def max_mag(self, other, context=None):
"""Compares the values numerically with their sign ignored."""
other = _convert_other(other, raiseit=True)
if context is None:
context = getcontext()
if self._is_special or other._is_special:
# If one operand is a quiet NaN and the other is number, then the
# number is always returned
sn = self._isnan()
on = other._isnan()
if sn or on:
if on == 1 and sn == 0:
return self._fix(context)
if sn == 1 and on == 0:
return other._fix(context)
return self._check_nans(other, context)
c = self.copy_abs()._cmp(other.copy_abs())
if c == 0:
c = self.compare_total(other)
if c == -1:
ans = other
else:
ans = self
return ans._fix(context)
def min_mag(self, other, context=None):
"""Compares the values numerically with their sign ignored."""
other = _convert_other(other, raiseit=True)
if context is None:
context = getcontext()
if self._is_special or other._is_special:
# If one operand is a quiet NaN and the other is number, then the
# number is always returned
sn = self._isnan()
on = other._isnan()
if sn or on:
if on == 1 and sn == 0:
return self._fix(context)
if sn == 1 and on == 0:
return other._fix(context)
return self._check_nans(other, context)
c = self.copy_abs()._cmp(other.copy_abs())
if c == 0:
c = self.compare_total(other)
if c == -1:
ans = self
else:
ans = other
return ans._fix(context)
def next_minus(self, context=None):
"""Returns the largest representable number smaller than itself."""
if context is None:
context = getcontext()
ans = self._check_nans(context=context)
if ans:
return ans
if self._isinfinity() == -1:
return _NegativeInfinity
if self._isinfinity() == 1:
return _dec_from_triple(0, '9'*context.prec, context.Etop())
context = context.copy()
context._set_rounding(ROUND_FLOOR)
context._ignore_all_flags()
new_self = self._fix(context)
if new_self != self:
return new_self
return self.__sub__(_dec_from_triple(0, '1', context.Etiny()-1),
context)
def next_plus(self, context=None):
"""Returns the smallest representable number larger than itself."""
if context is None:
context = getcontext()
ans = self._check_nans(context=context)
if ans:
return ans
if self._isinfinity() == 1:
return _Infinity
if self._isinfinity() == -1:
return _dec_from_triple(1, '9'*context.prec, context.Etop())
context = context.copy()
context._set_rounding(ROUND_CEILING)
context._ignore_all_flags()
new_self = self._fix(context)
if new_self != self:
return new_self
return self.__add__(_dec_from_triple(0, '1', context.Etiny()-1),
context)
def next_toward(self, other, context=None):
"""Returns the number closest to self, in the direction towards other.
The result is the closest representable number to self
(excluding self) that is in the direction towards other,
unless both have the same value. If the two operands are
numerically equal, then the result is a copy of self with the
sign set to be the same as the sign of other.
"""
other = _convert_other(other, raiseit=True)
if context is None:
context = getcontext()
ans = self._check_nans(other, context)
if ans:
return ans
comparison = self._cmp(other)
if comparison == 0:
return self.copy_sign(other)
if comparison == -1:
ans = self.next_plus(context)
else: # comparison == 1
ans = self.next_minus(context)
# decide which flags to raise using value of ans
if ans._isinfinity():
context._raise_error(Overflow,
'Infinite result from next_toward',
ans._sign)
context._raise_error(Inexact)
context._raise_error(Rounded)
elif ans.adjusted() < context.Emin:
context._raise_error(Underflow)
context._raise_error(Subnormal)
context._raise_error(Inexact)
context._raise_error(Rounded)
# if precision == 1 then we don't raise Clamped for a
# result 0E-Etiny.
if not ans:
context._raise_error(Clamped)
return ans
def number_class(self, context=None):
"""Returns an indication of the class of self.
The class is one of the following strings:
sNaN
NaN
-Infinity
-Normal
-Subnormal
-Zero
+Zero
+Subnormal
+Normal
+Infinity
"""
if self.is_snan():
return "sNaN"
if self.is_qnan():
return "NaN"
inf = self._isinfinity()
if inf == 1:
return "+Infinity"
if inf == -1:
return "-Infinity"
if self.is_zero():
if self._sign:
return "-Zero"
else:
return "+Zero"
if context is None:
context = getcontext()
if self.is_subnormal(context=context):
if self._sign:
return "-Subnormal"
else:
return "+Subnormal"
# just a normal, regular, boring number, :)
if self._sign:
return "-Normal"
else:
return "+Normal"
def radix(self):
"""Just returns 10, as this is Decimal, :)"""
return Decimal(10)
def rotate(self, other, context=None):
"""Returns a rotated copy of self, value-of-other times."""
if context is None:
context = getcontext()
other = _convert_other(other, raiseit=True)
ans = self._check_nans(other, context)
if ans:
return ans
if other._exp != 0:
return context._raise_error(InvalidOperation)
if not (-context.prec <= int(other) <= context.prec):
return context._raise_error(InvalidOperation)
if self._isinfinity():
return Decimal(self)
# get values, pad if necessary
torot = int(other)
rotdig = self._int
topad = context.prec - len(rotdig)
if topad > 0:
rotdig = '0'*topad + rotdig
elif topad < 0:
rotdig = rotdig[-topad:]
# let's rotate!
rotated = rotdig[torot:] + rotdig[:torot]
return _dec_from_triple(self._sign,
rotated.lstrip('0') or '0', self._exp)
def scaleb(self, other, context=None):
"""Returns self operand after adding the second value to its exp."""
if context is None:
context = getcontext()
other = _convert_other(other, raiseit=True)
ans = self._check_nans(other, context)
if ans:
return ans
if other._exp != 0:
return context._raise_error(InvalidOperation)
liminf = -2 * (context.Emax + context.prec)
limsup = 2 * (context.Emax + context.prec)
if not (liminf <= int(other) <= limsup):
return context._raise_error(InvalidOperation)
if self._isinfinity():
return Decimal(self)
d = _dec_from_triple(self._sign, self._int, self._exp + int(other))
d = d._fix(context)
return d
def shift(self, other, context=None):
"""Returns a shifted copy of self, value-of-other times."""
if context is None:
context = getcontext()
other = _convert_other(other, raiseit=True)
ans = self._check_nans(other, context)
if ans:
return ans
if other._exp != 0:
return context._raise_error(InvalidOperation)
if not (-context.prec <= int(other) <= context.prec):
return context._raise_error(InvalidOperation)
if self._isinfinity():
return Decimal(self)
# get values, pad if necessary
torot = int(other)
rotdig = self._int
topad = context.prec - len(rotdig)
if topad > 0:
rotdig = '0'*topad + rotdig
elif topad < 0:
rotdig = rotdig[-topad:]
# let's shift!
if torot < 0:
shifted = rotdig[:torot]
else:
shifted = rotdig + '0'*torot
shifted = shifted[-context.prec:]
return _dec_from_triple(self._sign,
shifted.lstrip('0') or '0', self._exp)
# Support for pickling, copy, and deepcopy
def __reduce__(self):
return (self.__class__, (str(self),))
def __copy__(self):
if type(self) is Decimal:
return self # I'm immutable; therefore I am my own clone
return self.__class__(str(self))
def __deepcopy__(self, memo):
if type(self) is Decimal:
return self # My components are also immutable
return self.__class__(str(self))
# PEP 3101 support. the _localeconv keyword argument should be
# considered private: it's provided for ease of testing only.
def __format__(self, specifier, context=None, _localeconv=None):
"""Format a Decimal instance according to the given specifier.
The specifier should be a standard format specifier, with the
form described in PEP 3101. Formatting types 'e', 'E', 'f',
'F', 'g', 'G', 'n' and '%' are supported. If the formatting
type is omitted it defaults to 'g' or 'G', depending on the
value of context.capitals.
"""
# Note: PEP 3101 says that if the type is not present then
# there should be at least one digit after the decimal point.
# We take the liberty of ignoring this requirement for
# Decimal---it's presumably there to make sure that
# format(float, '') behaves similarly to str(float).
if context is None:
context = getcontext()
spec = _parse_format_specifier(specifier, _localeconv=_localeconv)
# special values don't care about the type or precision
if self._is_special:
sign = _format_sign(self._sign, spec)
body = str(self.copy_abs())
return _format_align(sign, body, spec)
# a type of None defaults to 'g' or 'G', depending on context
if spec['type'] is None:
spec['type'] = ['g', 'G'][context.capitals]
# if type is '%', adjust exponent of self accordingly
if spec['type'] == '%':
self = _dec_from_triple(self._sign, self._int, self._exp+2)
# round if necessary, taking rounding mode from the context
rounding = context.rounding
precision = spec['precision']
if precision is not None:
if spec['type'] in 'eE':
self = self._round(precision+1, rounding)
elif spec['type'] in 'fF%':
self = self._rescale(-precision, rounding)
elif spec['type'] in 'gG' and len(self._int) > precision:
self = self._round(precision, rounding)
# special case: zeros with a positive exponent can't be
# represented in fixed point; rescale them to 0e0.
if not self and self._exp > 0 and spec['type'] in 'fF%':
self = self._rescale(0, rounding)
# figure out placement of the decimal point
leftdigits = self._exp + len(self._int)
if spec['type'] in 'eE':
if not self and precision is not None:
dotplace = 1 - precision
else:
dotplace = 1
elif spec['type'] in 'fF%':
dotplace = leftdigits
elif spec['type'] in 'gG':
if self._exp <= 0 and leftdigits > -6:
dotplace = leftdigits
else:
dotplace = 1
# find digits before and after decimal point, and get exponent
if dotplace < 0:
intpart = '0'
fracpart = '0'*(-dotplace) + self._int
elif dotplace > len(self._int):
intpart = self._int + '0'*(dotplace-len(self._int))
fracpart = ''
else:
intpart = self._int[:dotplace] or '0'
fracpart = self._int[dotplace:]
exp = leftdigits-dotplace
# done with the decimal-specific stuff; hand over the rest
# of the formatting to the _format_number function
return _format_number(self._sign, intpart, fracpart, exp, spec)
def _dec_from_triple(sign, coefficient, exponent, special=False):
"""Create a decimal instance directly, without any validation,
normalization (e.g. removal of leading zeros) or argument
conversion.
This function is for *internal use only*.
"""
self = object.__new__(Decimal)
self._sign = sign
self._int = coefficient
self._exp = exponent
self._is_special = special
return self
# Register Decimal as a kind of Number (an abstract base class).
# However, do not register it as Real (because Decimals are not
# interoperable with floats).
_numbers.Number.register(Decimal)
##### Context class #######################################################
class _ContextManager(object):
"""Context manager class to support localcontext().
Sets a copy of the supplied context in __enter__() and restores
the previous decimal context in __exit__()
"""
def __init__(self, new_context):
self.new_context = new_context.copy()
def __enter__(self):
self.saved_context = getcontext()
setcontext(self.new_context)
return self.new_context
def __exit__(self, t, v, tb):
setcontext(self.saved_context)
class Context(object):
"""Contains the context for a Decimal instance.
Contains:
prec - precision (for use in rounding, division, square roots..)
rounding - rounding type (how you round)
traps - If traps[exception] = 1, then the exception is
raised when it is caused. Otherwise, a value is
substituted in.
flags - When an exception is caused, flags[exception] is set.
(Whether or not the trap_enabler is set)
Should be reset by user of Decimal instance.
Emin - Minimum exponent
Emax - Maximum exponent
capitals - If 1, 1*10^1 is printed as 1E+1.
If 0, printed as 1e1
_clamp - If 1, change exponents if too high (Default 0)
"""
def __init__(self, prec=None, rounding=None,
traps=None, flags=None,
Emin=None, Emax=None,
capitals=None, _clamp=0,
_ignored_flags=None):
# Set defaults; for everything except flags and _ignored_flags,
# inherit from DefaultContext.
try:
dc = DefaultContext
except NameError:
pass
self.prec = prec if prec is not None else dc.prec
self.rounding = rounding if rounding is not None else dc.rounding
self.Emin = Emin if Emin is not None else dc.Emin
self.Emax = Emax if Emax is not None else dc.Emax
self.capitals = capitals if capitals is not None else dc.capitals
self._clamp = _clamp if _clamp is not None else dc._clamp
if _ignored_flags is None:
self._ignored_flags = []
else:
self._ignored_flags = _ignored_flags
if traps is None:
self.traps = dc.traps.copy()
elif not isinstance(traps, dict):
self.traps = dict((s, int(s in traps)) for s in _signals)
else:
self.traps = traps
if flags is None:
self.flags = dict.fromkeys(_signals, 0)
elif not isinstance(flags, dict):
self.flags = dict((s, int(s in flags)) for s in _signals)
else:
self.flags = flags
def __repr__(self):
"""Show the current context."""
s = []
s.append('Context(prec=%(prec)d, rounding=%(rounding)s, '
'Emin=%(Emin)d, Emax=%(Emax)d, capitals=%(capitals)d'
% vars(self))
names = [f.__name__ for f, v in self.flags.items() if v]
s.append('flags=[' + ', '.join(names) + ']')
names = [t.__name__ for t, v in self.traps.items() if v]
s.append('traps=[' + ', '.join(names) + ']')
return ', '.join(s) + ')'
def clear_flags(self):
"""Reset all flags to zero"""
for flag in self.flags:
self.flags[flag] = 0
def _shallow_copy(self):
"""Returns a shallow copy from self."""
nc = Context(self.prec, self.rounding, self.traps,
self.flags, self.Emin, self.Emax,
self.capitals, self._clamp, self._ignored_flags)
return nc
def copy(self):
"""Returns a deep copy from self."""
nc = Context(self.prec, self.rounding, self.traps.copy(),
self.flags.copy(), self.Emin, self.Emax,
self.capitals, self._clamp, self._ignored_flags)
return nc
__copy__ = copy
def _raise_error(self, condition, explanation = None, *args):
"""Handles an error
If the flag is in _ignored_flags, returns the default response.
Otherwise, it sets the flag, then, if the corresponding
trap_enabler is set, it reraises the exception. Otherwise, it returns
the default value after setting the flag.
"""
error = _condition_map.get(condition, condition)
if error in self._ignored_flags:
# Don't touch the flag
return error().handle(self, *args)
self.flags[error] = 1
if not self.traps[error]:
# The errors define how to handle themselves.
return condition().handle(self, *args)
# Errors should only be risked on copies of the context
# self._ignored_flags = []
raise error(explanation)
def _ignore_all_flags(self):
"""Ignore all flags, if they are raised"""
return self._ignore_flags(*_signals)
def _ignore_flags(self, *flags):
"""Ignore the flags, if they are raised"""
# Do not mutate-- This way, copies of a context leave the original
# alone.
self._ignored_flags = (self._ignored_flags + list(flags))
return list(flags)
def _regard_flags(self, *flags):
"""Stop ignoring the flags, if they are raised"""
if flags and isinstance(flags[0], (tuple,list)):
flags = flags[0]
for flag in flags:
self._ignored_flags.remove(flag)
# We inherit object.__hash__, so we must deny this explicitly
__hash__ = None
def Etiny(self):
"""Returns Etiny (= Emin - prec + 1)"""
return int(self.Emin - self.prec + 1)
def Etop(self):
"""Returns maximum exponent (= Emax - prec + 1)"""
return int(self.Emax - self.prec + 1)
def _set_rounding(self, type):
"""Sets the rounding type.
Sets the rounding type, and returns the current (previous)
rounding type. Often used like:
context = context.copy()
# so you don't change the calling context
# if an error occurs in the middle.
rounding = context._set_rounding(ROUND_UP)
val = self.__sub__(other, context=context)
context._set_rounding(rounding)
This will make it round up for that operation.
"""
rounding = self.rounding
self.rounding= type
return rounding
def create_decimal(self, num='0'):
"""Creates a new Decimal instance but using self as context.
This method implements the to-number operation of the
IBM Decimal specification."""
if isinstance(num, basestring) and num != num.strip():
return self._raise_error(ConversionSyntax,
"no trailing or leading whitespace is "
"permitted.")
d = Decimal(num, context=self)
if d._isnan() and len(d._int) > self.prec - self._clamp:
return self._raise_error(ConversionSyntax,
"diagnostic info too long in NaN")
return d._fix(self)
def create_decimal_from_float(self, f):
"""Creates a new Decimal instance from a float but rounding using self
as the context.
>>> context = Context(prec=5, rounding=ROUND_DOWN)
>>> context.create_decimal_from_float(3.1415926535897932)
Decimal('3.1415')
>>> context = Context(prec=5, traps=[Inexact])
>>> context.create_decimal_from_float(3.1415926535897932)
Traceback (most recent call last):
...
Inexact: None
"""
d = Decimal.from_float(f) # An exact conversion
return d._fix(self) # Apply the context rounding
# Methods
def abs(self, a):
"""Returns the absolute value of the operand.
If the operand is negative, the result is the same as using the minus
operation on the operand. Otherwise, the result is the same as using
the plus operation on the operand.
>>> ExtendedContext.abs(Decimal('2.1'))
Decimal('2.1')
>>> ExtendedContext.abs(Decimal('-100'))
Decimal('100')
>>> ExtendedContext.abs(Decimal('101.5'))
Decimal('101.5')
>>> ExtendedContext.abs(Decimal('-101.5'))
Decimal('101.5')
>>> ExtendedContext.abs(-1)
Decimal('1')
"""
a = _convert_other(a, raiseit=True)
return a.__abs__(context=self)
def add(self, a, b):
"""Return the sum of the two operands.
>>> ExtendedContext.add(Decimal('12'), Decimal('7.00'))
Decimal('19.00')
>>> ExtendedContext.add(Decimal('1E+2'), Decimal('1.01E+4'))
Decimal('1.02E+4')
>>> ExtendedContext.add(1, Decimal(2))
Decimal('3')
>>> ExtendedContext.add(Decimal(8), 5)
Decimal('13')
>>> ExtendedContext.add(5, 5)
Decimal('10')
"""
a = _convert_other(a, raiseit=True)
r = a.__add__(b, context=self)
if r is NotImplemented:
raise TypeError("Unable to convert %s to Decimal" % b)
else:
return r
def _apply(self, a):
return str(a._fix(self))
def canonical(self, a):
"""Returns the same Decimal object.
As we do not have different encodings for the same number, the
received object already is in its canonical form.
>>> ExtendedContext.canonical(Decimal('2.50'))
Decimal('2.50')
"""
return a.canonical(context=self)
def compare(self, a, b):
"""Compares values numerically.
If the signs of the operands differ, a value representing each operand
('-1' if the operand is less than zero, '0' if the operand is zero or
negative zero, or '1' if the operand is greater than zero) is used in
place of that operand for the comparison instead of the actual
operand.
The comparison is then effected by subtracting the second operand from
the first and then returning a value according to the result of the
subtraction: '-1' if the result is less than zero, '0' if the result is
zero or negative zero, or '1' if the result is greater than zero.
>>> ExtendedContext.compare(Decimal('2.1'), Decimal('3'))
Decimal('-1')
>>> ExtendedContext.compare(Decimal('2.1'), Decimal('2.1'))
Decimal('0')
>>> ExtendedContext.compare(Decimal('2.1'), Decimal('2.10'))
Decimal('0')
>>> ExtendedContext.compare(Decimal('3'), Decimal('2.1'))
Decimal('1')
>>> ExtendedContext.compare(Decimal('2.1'), Decimal('-3'))
Decimal('1')
>>> ExtendedContext.compare(Decimal('-3'), Decimal('2.1'))
Decimal('-1')
>>> ExtendedContext.compare(1, 2)
Decimal('-1')
>>> ExtendedContext.compare(Decimal(1), 2)
Decimal('-1')
>>> ExtendedContext.compare(1, Decimal(2))
Decimal('-1')
"""
a = _convert_other(a, raiseit=True)
return a.compare(b, context=self)
def compare_signal(self, a, b):
"""Compares the values of the two operands numerically.
It's pretty much like compare(), but all NaNs signal, with signaling
NaNs taking precedence over quiet NaNs.
>>> c = ExtendedContext
>>> c.compare_signal(Decimal('2.1'), Decimal('3'))
Decimal('-1')
>>> c.compare_signal(Decimal('2.1'), Decimal('2.1'))
Decimal('0')
>>> c.flags[InvalidOperation] = 0
>>> print c.flags[InvalidOperation]
0
>>> c.compare_signal(Decimal('NaN'), Decimal('2.1'))
Decimal('NaN')
>>> print c.flags[InvalidOperation]
1
>>> c.flags[InvalidOperation] = 0
>>> print c.flags[InvalidOperation]
0
>>> c.compare_signal(Decimal('sNaN'), Decimal('2.1'))
Decimal('NaN')
>>> print c.flags[InvalidOperation]
1
>>> c.compare_signal(-1, 2)
Decimal('-1')
>>> c.compare_signal(Decimal(-1), 2)
Decimal('-1')
>>> c.compare_signal(-1, Decimal(2))
Decimal('-1')
"""
a = _convert_other(a, raiseit=True)
return a.compare_signal(b, context=self)
def compare_total(self, a, b):
"""Compares two operands using their abstract representation.
This is not like the standard compare, which use their numerical
value. Note that a total ordering is defined for all possible abstract
representations.
>>> ExtendedContext.compare_total(Decimal('12.73'), Decimal('127.9'))
Decimal('-1')
>>> ExtendedContext.compare_total(Decimal('-127'), Decimal('12'))
Decimal('-1')
>>> ExtendedContext.compare_total(Decimal('12.30'), Decimal('12.3'))
Decimal('-1')
>>> ExtendedContext.compare_total(Decimal('12.30'), Decimal('12.30'))
Decimal('0')
>>> ExtendedContext.compare_total(Decimal('12.3'), Decimal('12.300'))
Decimal('1')
>>> ExtendedContext.compare_total(Decimal('12.3'), Decimal('NaN'))
Decimal('-1')
>>> ExtendedContext.compare_total(1, 2)
Decimal('-1')
>>> ExtendedContext.compare_total(Decimal(1), 2)
Decimal('-1')
>>> ExtendedContext.compare_total(1, Decimal(2))
Decimal('-1')
"""
a = _convert_other(a, raiseit=True)
return a.compare_total(b)
def compare_total_mag(self, a, b):
"""Compares two operands using their abstract representation ignoring sign.
Like compare_total, but with operand's sign ignored and assumed to be 0.
"""
a = _convert_other(a, raiseit=True)
return a.compare_total_mag(b)
def copy_abs(self, a):
"""Returns a copy of the operand with the sign set to 0.
>>> ExtendedContext.copy_abs(Decimal('2.1'))
Decimal('2.1')
>>> ExtendedContext.copy_abs(Decimal('-100'))
Decimal('100')
>>> ExtendedContext.copy_abs(-1)
Decimal('1')
"""
a = _convert_other(a, raiseit=True)
return a.copy_abs()
def copy_decimal(self, a):
"""Returns a copy of the decimal object.
>>> ExtendedContext.copy_decimal(Decimal('2.1'))
Decimal('2.1')
>>> ExtendedContext.copy_decimal(Decimal('-1.00'))
Decimal('-1.00')
>>> ExtendedContext.copy_decimal(1)
Decimal('1')
"""
a = _convert_other(a, raiseit=True)
return Decimal(a)
def copy_negate(self, a):
"""Returns a copy of the operand with the sign inverted.
>>> ExtendedContext.copy_negate(Decimal('101.5'))
Decimal('-101.5')
>>> ExtendedContext.copy_negate(Decimal('-101.5'))
Decimal('101.5')
>>> ExtendedContext.copy_negate(1)
Decimal('-1')
"""
a = _convert_other(a, raiseit=True)
return a.copy_negate()
def copy_sign(self, a, b):
"""Copies the second operand's sign to the first one.
In detail, it returns a copy of the first operand with the sign
equal to the sign of the second operand.
>>> ExtendedContext.copy_sign(Decimal( '1.50'), Decimal('7.33'))
Decimal('1.50')
>>> ExtendedContext.copy_sign(Decimal('-1.50'), Decimal('7.33'))
Decimal('1.50')
>>> ExtendedContext.copy_sign(Decimal( '1.50'), Decimal('-7.33'))
Decimal('-1.50')
>>> ExtendedContext.copy_sign(Decimal('-1.50'), Decimal('-7.33'))
Decimal('-1.50')
>>> ExtendedContext.copy_sign(1, -2)
Decimal('-1')
>>> ExtendedContext.copy_sign(Decimal(1), -2)
Decimal('-1')
>>> ExtendedContext.copy_sign(1, Decimal(-2))
Decimal('-1')
"""
a = _convert_other(a, raiseit=True)
return a.copy_sign(b)
def divide(self, a, b):
"""Decimal division in a specified context.
>>> ExtendedContext.divide(Decimal('1'), Decimal('3'))
Decimal('0.333333333')
>>> ExtendedContext.divide(Decimal('2'), Decimal('3'))
Decimal('0.666666667')
>>> ExtendedContext.divide(Decimal('5'), Decimal('2'))
Decimal('2.5')
>>> ExtendedContext.divide(Decimal('1'), Decimal('10'))
Decimal('0.1')
>>> ExtendedContext.divide(Decimal('12'), Decimal('12'))
Decimal('1')
>>> ExtendedContext.divide(Decimal('8.00'), Decimal('2'))
Decimal('4.00')
>>> ExtendedContext.divide(Decimal('2.400'), Decimal('2.0'))
Decimal('1.20')
>>> ExtendedContext.divide(Decimal('1000'), Decimal('100'))
Decimal('10')
>>> ExtendedContext.divide(Decimal('1000'), Decimal('1'))
Decimal('1000')
>>> ExtendedContext.divide(Decimal('2.40E+6'), Decimal('2'))
Decimal('1.20E+6')
>>> ExtendedContext.divide(5, 5)
Decimal('1')
>>> ExtendedContext.divide(Decimal(5), 5)
Decimal('1')
>>> ExtendedContext.divide(5, Decimal(5))
Decimal('1')
"""
a = _convert_other(a, raiseit=True)
r = a.__div__(b, context=self)
if r is NotImplemented:
raise TypeError("Unable to convert %s to Decimal" % b)
else:
return r
def divide_int(self, a, b):
"""Divides two numbers and returns the integer part of the result.
>>> ExtendedContext.divide_int(Decimal('2'), Decimal('3'))
Decimal('0')
>>> ExtendedContext.divide_int(Decimal('10'), Decimal('3'))
Decimal('3')
>>> ExtendedContext.divide_int(Decimal('1'), Decimal('0.3'))
Decimal('3')
>>> ExtendedContext.divide_int(10, 3)
Decimal('3')
>>> ExtendedContext.divide_int(Decimal(10), 3)
Decimal('3')
>>> ExtendedContext.divide_int(10, Decimal(3))
Decimal('3')
"""
a = _convert_other(a, raiseit=True)
r = a.__floordiv__(b, context=self)
if r is NotImplemented:
raise TypeError("Unable to convert %s to Decimal" % b)
else:
return r
def divmod(self, a, b):
"""Return (a // b, a % b).
>>> ExtendedContext.divmod(Decimal(8), Decimal(3))
(Decimal('2'), Decimal('2'))
>>> ExtendedContext.divmod(Decimal(8), Decimal(4))
(Decimal('2'), Decimal('0'))
>>> ExtendedContext.divmod(8, 4)
(Decimal('2'), Decimal('0'))
>>> ExtendedContext.divmod(Decimal(8), 4)
(Decimal('2'), Decimal('0'))
>>> ExtendedContext.divmod(8, Decimal(4))
(Decimal('2'), Decimal('0'))
"""
a = _convert_other(a, raiseit=True)
r = a.__divmod__(b, context=self)
if r is NotImplemented:
raise TypeError("Unable to convert %s to Decimal" % b)
else:
return r
def exp(self, a):
"""Returns e ** a.
>>> c = ExtendedContext.copy()
>>> c.Emin = -999
>>> c.Emax = 999
>>> c.exp(Decimal('-Infinity'))
Decimal('0')
>>> c.exp(Decimal('-1'))
Decimal('0.367879441')
>>> c.exp(Decimal('0'))
Decimal('1')
>>> c.exp(Decimal('1'))
Decimal('2.71828183')
>>> c.exp(Decimal('0.693147181'))
Decimal('2.00000000')
>>> c.exp(Decimal('+Infinity'))
Decimal('Infinity')
>>> c.exp(10)
Decimal('22026.4658')
"""
a =_convert_other(a, raiseit=True)
return a.exp(context=self)
def fma(self, a, b, c):
"""Returns a multiplied by b, plus c.
The first two operands are multiplied together, using multiply,
the third operand is then added to the result of that
multiplication, using add, all with only one final rounding.
>>> ExtendedContext.fma(Decimal('3'), Decimal('5'), Decimal('7'))
Decimal('22')
>>> ExtendedContext.fma(Decimal('3'), Decimal('-5'), Decimal('7'))
Decimal('-8')
>>> ExtendedContext.fma(Decimal('888565290'), Decimal('1557.96930'), Decimal('-86087.7578'))
Decimal('1.38435736E+12')
>>> ExtendedContext.fma(1, 3, 4)
Decimal('7')
>>> ExtendedContext.fma(1, Decimal(3), 4)
Decimal('7')
>>> ExtendedContext.fma(1, 3, Decimal(4))
Decimal('7')
"""
a = _convert_other(a, raiseit=True)
return a.fma(b, c, context=self)
def is_canonical(self, a):
"""Return True if the operand is canonical; otherwise return False.
Currently, the encoding of a Decimal instance is always
canonical, so this method returns True for any Decimal.
>>> ExtendedContext.is_canonical(Decimal('2.50'))
True
"""
return a.is_canonical()
def is_finite(self, a):
"""Return True if the operand is finite; otherwise return False.
A Decimal instance is considered finite if it is neither
infinite nor a NaN.
>>> ExtendedContext.is_finite(Decimal('2.50'))
True
>>> ExtendedContext.is_finite(Decimal('-0.3'))
True
>>> ExtendedContext.is_finite(Decimal('0'))
True
>>> ExtendedContext.is_finite(Decimal('Inf'))
False
>>> ExtendedContext.is_finite(Decimal('NaN'))
False
>>> ExtendedContext.is_finite(1)
True
"""
a = _convert_other(a, raiseit=True)
return a.is_finite()
def is_infinite(self, a):
"""Return True if the operand is infinite; otherwise return False.
>>> ExtendedContext.is_infinite(Decimal('2.50'))
False
>>> ExtendedContext.is_infinite(Decimal('-Inf'))
True
>>> ExtendedContext.is_infinite(Decimal('NaN'))
False
>>> ExtendedContext.is_infinite(1)
False
"""
a = _convert_other(a, raiseit=True)
return a.is_infinite()
def is_nan(self, a):
"""Return True if the operand is a qNaN or sNaN;
otherwise return False.
>>> ExtendedContext.is_nan(Decimal('2.50'))
False
>>> ExtendedContext.is_nan(Decimal('NaN'))
True
>>> ExtendedContext.is_nan(Decimal('-sNaN'))
True
>>> ExtendedContext.is_nan(1)
False
"""
a = _convert_other(a, raiseit=True)
return a.is_nan()
def is_normal(self, a):
"""Return True if the operand is a normal number;
otherwise return False.
>>> c = ExtendedContext.copy()
>>> c.Emin = -999
>>> c.Emax = 999
>>> c.is_normal(Decimal('2.50'))
True
>>> c.is_normal(Decimal('0.1E-999'))
False
>>> c.is_normal(Decimal('0.00'))
False
>>> c.is_normal(Decimal('-Inf'))
False
>>> c.is_normal(Decimal('NaN'))
False
>>> c.is_normal(1)
True
"""
a = _convert_other(a, raiseit=True)
return a.is_normal(context=self)
def is_qnan(self, a):
"""Return True if the operand is a quiet NaN; otherwise return False.
>>> ExtendedContext.is_qnan(Decimal('2.50'))
False
>>> ExtendedContext.is_qnan(Decimal('NaN'))
True
>>> ExtendedContext.is_qnan(Decimal('sNaN'))
False
>>> ExtendedContext.is_qnan(1)
False
"""
a = _convert_other(a, raiseit=True)
return a.is_qnan()
def is_signed(self, a):
"""Return True if the operand is negative; otherwise return False.
>>> ExtendedContext.is_signed(Decimal('2.50'))
False
>>> ExtendedContext.is_signed(Decimal('-12'))
True
>>> ExtendedContext.is_signed(Decimal('-0'))
True
>>> ExtendedContext.is_signed(8)
False
>>> ExtendedContext.is_signed(-8)
True
"""
a = _convert_other(a, raiseit=True)
return a.is_signed()
def is_snan(self, a):
"""Return True if the operand is a signaling NaN;
otherwise return False.
>>> ExtendedContext.is_snan(Decimal('2.50'))
False
>>> ExtendedContext.is_snan(Decimal('NaN'))
False
>>> ExtendedContext.is_snan(Decimal('sNaN'))
True
>>> ExtendedContext.is_snan(1)
False
"""
a = _convert_other(a, raiseit=True)
return a.is_snan()
def is_subnormal(self, a):
"""Return True if the operand is subnormal; otherwise return False.
>>> c = ExtendedContext.copy()
>>> c.Emin = -999
>>> c.Emax = 999
>>> c.is_subnormal(Decimal('2.50'))
False
>>> c.is_subnormal(Decimal('0.1E-999'))
True
>>> c.is_subnormal(Decimal('0.00'))
False
>>> c.is_subnormal(Decimal('-Inf'))
False
>>> c.is_subnormal(Decimal('NaN'))
False
>>> c.is_subnormal(1)
False
"""
a = _convert_other(a, raiseit=True)
return a.is_subnormal(context=self)
def is_zero(self, a):
"""Return True if the operand is a zero; otherwise return False.
>>> ExtendedContext.is_zero(Decimal('0'))
True
>>> ExtendedContext.is_zero(Decimal('2.50'))
False
>>> ExtendedContext.is_zero(Decimal('-0E+2'))
True
>>> ExtendedContext.is_zero(1)
False
>>> ExtendedContext.is_zero(0)
True
"""
a = _convert_other(a, raiseit=True)
return a.is_zero()
def ln(self, a):
"""Returns the natural (base e) logarithm of the operand.
>>> c = ExtendedContext.copy()
>>> c.Emin = -999
>>> c.Emax = 999
>>> c.ln(Decimal('0'))
Decimal('-Infinity')
>>> c.ln(Decimal('1.000'))
Decimal('0')
>>> c.ln(Decimal('2.71828183'))
Decimal('1.00000000')
>>> c.ln(Decimal('10'))
Decimal('2.30258509')
>>> c.ln(Decimal('+Infinity'))
Decimal('Infinity')
>>> c.ln(1)
Decimal('0')
"""
a = _convert_other(a, raiseit=True)
return a.ln(context=self)
def log10(self, a):
"""Returns the base 10 logarithm of the operand.
>>> c = ExtendedContext.copy()
>>> c.Emin = -999
>>> c.Emax = 999
>>> c.log10(Decimal('0'))
Decimal('-Infinity')
>>> c.log10(Decimal('0.001'))
Decimal('-3')
>>> c.log10(Decimal('1.000'))
Decimal('0')
>>> c.log10(Decimal('2'))
Decimal('0.301029996')
>>> c.log10(Decimal('10'))
Decimal('1')
>>> c.log10(Decimal('70'))
Decimal('1.84509804')
>>> c.log10(Decimal('+Infinity'))
Decimal('Infinity')
>>> c.log10(0)
Decimal('-Infinity')
>>> c.log10(1)
Decimal('0')
"""
a = _convert_other(a, raiseit=True)
return a.log10(context=self)
def logb(self, a):
""" Returns the exponent of the magnitude of the operand's MSD.
The result is the integer which is the exponent of the magnitude
of the most significant digit of the operand (as though the
operand were truncated to a single digit while maintaining the
value of that digit and without limiting the resulting exponent).
>>> ExtendedContext.logb(Decimal('250'))
Decimal('2')
>>> ExtendedContext.logb(Decimal('2.50'))
Decimal('0')
>>> ExtendedContext.logb(Decimal('0.03'))
Decimal('-2')
>>> ExtendedContext.logb(Decimal('0'))
Decimal('-Infinity')
>>> ExtendedContext.logb(1)
Decimal('0')
>>> ExtendedContext.logb(10)
Decimal('1')
>>> ExtendedContext.logb(100)
Decimal('2')
"""
a = _convert_other(a, raiseit=True)
return a.logb(context=self)
def logical_and(self, a, b):
"""Applies the logical operation 'and' between each operand's digits.
The operands must be both logical numbers.
>>> ExtendedContext.logical_and(Decimal('0'), Decimal('0'))
Decimal('0')
>>> ExtendedContext.logical_and(Decimal('0'), Decimal('1'))
Decimal('0')
>>> ExtendedContext.logical_and(Decimal('1'), Decimal('0'))
Decimal('0')
>>> ExtendedContext.logical_and(Decimal('1'), Decimal('1'))
Decimal('1')
>>> ExtendedContext.logical_and(Decimal('1100'), Decimal('1010'))
Decimal('1000')
>>> ExtendedContext.logical_and(Decimal('1111'), Decimal('10'))
Decimal('10')
>>> ExtendedContext.logical_and(110, 1101)
Decimal('100')
>>> ExtendedContext.logical_and(Decimal(110), 1101)
Decimal('100')
>>> ExtendedContext.logical_and(110, Decimal(1101))
Decimal('100')
"""
a = _convert_other(a, raiseit=True)
return a.logical_and(b, context=self)
def logical_invert(self, a):
"""Invert all the digits in the operand.
The operand must be a logical number.
>>> ExtendedContext.logical_invert(Decimal('0'))
Decimal('111111111')
>>> ExtendedContext.logical_invert(Decimal('1'))
Decimal('111111110')
>>> ExtendedContext.logical_invert(Decimal('111111111'))
Decimal('0')
>>> ExtendedContext.logical_invert(Decimal('101010101'))
Decimal('10101010')
>>> ExtendedContext.logical_invert(1101)
Decimal('111110010')
"""
a = _convert_other(a, raiseit=True)
return a.logical_invert(context=self)
def logical_or(self, a, b):
"""Applies the logical operation 'or' between each operand's digits.
The operands must be both logical numbers.
>>> ExtendedContext.logical_or(Decimal('0'), Decimal('0'))
Decimal('0')
>>> ExtendedContext.logical_or(Decimal('0'), Decimal('1'))
Decimal('1')
>>> ExtendedContext.logical_or(Decimal('1'), Decimal('0'))
Decimal('1')
>>> ExtendedContext.logical_or(Decimal('1'), Decimal('1'))
Decimal('1')
>>> ExtendedContext.logical_or(Decimal('1100'), Decimal('1010'))
Decimal('1110')
>>> ExtendedContext.logical_or(Decimal('1110'), Decimal('10'))
Decimal('1110')
>>> ExtendedContext.logical_or(110, 1101)
Decimal('1111')
>>> ExtendedContext.logical_or(Decimal(110), 1101)
Decimal('1111')
>>> ExtendedContext.logical_or(110, Decimal(1101))
Decimal('1111')
"""
a = _convert_other(a, raiseit=True)
return a.logical_or(b, context=self)
def logical_xor(self, a, b):
"""Applies the logical operation 'xor' between each operand's digits.
The operands must be both logical numbers.
>>> ExtendedContext.logical_xor(Decimal('0'), Decimal('0'))
Decimal('0')
>>> ExtendedContext.logical_xor(Decimal('0'), Decimal('1'))
Decimal('1')
>>> ExtendedContext.logical_xor(Decimal('1'), Decimal('0'))
Decimal('1')
>>> ExtendedContext.logical_xor(Decimal('1'), Decimal('1'))
Decimal('0')
>>> ExtendedContext.logical_xor(Decimal('1100'), Decimal('1010'))
Decimal('110')
>>> ExtendedContext.logical_xor(Decimal('1111'), Decimal('10'))
Decimal('1101')
>>> ExtendedContext.logical_xor(110, 1101)
Decimal('1011')
>>> ExtendedContext.logical_xor(Decimal(110), 1101)
Decimal('1011')
>>> ExtendedContext.logical_xor(110, Decimal(1101))
Decimal('1011')
"""
a = _convert_other(a, raiseit=True)
return a.logical_xor(b, context=self)
def max(self, a, b):
"""max compares two values numerically and returns the maximum.
If either operand is a NaN then the general rules apply.
Otherwise, the operands are compared as though by the compare
operation. If they are numerically equal then the left-hand operand
is chosen as the result. Otherwise the maximum (closer to positive
infinity) of the two operands is chosen as the result.
>>> ExtendedContext.max(Decimal('3'), Decimal('2'))
Decimal('3')
>>> ExtendedContext.max(Decimal('-10'), Decimal('3'))
Decimal('3')
>>> ExtendedContext.max(Decimal('1.0'), Decimal('1'))
Decimal('1')
>>> ExtendedContext.max(Decimal('7'), Decimal('NaN'))
Decimal('7')
>>> ExtendedContext.max(1, 2)
Decimal('2')
>>> ExtendedContext.max(Decimal(1), 2)
Decimal('2')
>>> ExtendedContext.max(1, Decimal(2))
Decimal('2')
"""
a = _convert_other(a, raiseit=True)
return a.max(b, context=self)
def max_mag(self, a, b):
"""Compares the values numerically with their sign ignored.
>>> ExtendedContext.max_mag(Decimal('7'), Decimal('NaN'))
Decimal('7')
>>> ExtendedContext.max_mag(Decimal('7'), Decimal('-10'))
Decimal('-10')
>>> ExtendedContext.max_mag(1, -2)
Decimal('-2')
>>> ExtendedContext.max_mag(Decimal(1), -2)
Decimal('-2')
>>> ExtendedContext.max_mag(1, Decimal(-2))
Decimal('-2')
"""
a = _convert_other(a, raiseit=True)
return a.max_mag(b, context=self)
def min(self, a, b):
"""min compares two values numerically and returns the minimum.
If either operand is a NaN then the general rules apply.
Otherwise, the operands are compared as though by the compare
operation. If they are numerically equal then the left-hand operand
is chosen as the result. Otherwise the minimum (closer to negative
infinity) of the two operands is chosen as the result.
>>> ExtendedContext.min(Decimal('3'), Decimal('2'))
Decimal('2')
>>> ExtendedContext.min(Decimal('-10'), Decimal('3'))
Decimal('-10')
>>> ExtendedContext.min(Decimal('1.0'), Decimal('1'))
Decimal('1.0')
>>> ExtendedContext.min(Decimal('7'), Decimal('NaN'))
Decimal('7')
>>> ExtendedContext.min(1, 2)
Decimal('1')
>>> ExtendedContext.min(Decimal(1), 2)
Decimal('1')
>>> ExtendedContext.min(1, Decimal(29))
Decimal('1')
"""
a = _convert_other(a, raiseit=True)
return a.min(b, context=self)
def min_mag(self, a, b):
"""Compares the values numerically with their sign ignored.
>>> ExtendedContext.min_mag(Decimal('3'), Decimal('-2'))
Decimal('-2')
>>> ExtendedContext.min_mag(Decimal('-3'), Decimal('NaN'))
Decimal('-3')
>>> ExtendedContext.min_mag(1, -2)
Decimal('1')
>>> ExtendedContext.min_mag(Decimal(1), -2)
Decimal('1')
>>> ExtendedContext.min_mag(1, Decimal(-2))
Decimal('1')
"""
a = _convert_other(a, raiseit=True)
return a.min_mag(b, context=self)
def minus(self, a):
"""Minus corresponds to unary prefix minus in Python.
The operation is evaluated using the same rules as subtract; the
operation minus(a) is calculated as subtract('0', a) where the '0'
has the same exponent as the operand.
>>> ExtendedContext.minus(Decimal('1.3'))
Decimal('-1.3')
>>> ExtendedContext.minus(Decimal('-1.3'))
Decimal('1.3')
>>> ExtendedContext.minus(1)
Decimal('-1')
"""
a = _convert_other(a, raiseit=True)
return a.__neg__(context=self)
def multiply(self, a, b):
"""multiply multiplies two operands.
If either operand is a special value then the general rules apply.
Otherwise, the operands are multiplied together
('long multiplication'), resulting in a number which may be as long as
the sum of the lengths of the two operands.
>>> ExtendedContext.multiply(Decimal('1.20'), Decimal('3'))
Decimal('3.60')
>>> ExtendedContext.multiply(Decimal('7'), Decimal('3'))
Decimal('21')
>>> ExtendedContext.multiply(Decimal('0.9'), Decimal('0.8'))
Decimal('0.72')
>>> ExtendedContext.multiply(Decimal('0.9'), Decimal('-0'))
Decimal('-0.0')
>>> ExtendedContext.multiply(Decimal('654321'), Decimal('654321'))
Decimal('4.28135971E+11')
>>> ExtendedContext.multiply(7, 7)
Decimal('49')
>>> ExtendedContext.multiply(Decimal(7), 7)
Decimal('49')
>>> ExtendedContext.multiply(7, Decimal(7))
Decimal('49')
"""
a = _convert_other(a, raiseit=True)
r = a.__mul__(b, context=self)
if r is NotImplemented:
raise TypeError("Unable to convert %s to Decimal" % b)
else:
return r
def next_minus(self, a):
"""Returns the largest representable number smaller than a.
>>> c = ExtendedContext.copy()
>>> c.Emin = -999
>>> c.Emax = 999
>>> ExtendedContext.next_minus(Decimal('1'))
Decimal('0.999999999')
>>> c.next_minus(Decimal('1E-1007'))
Decimal('0E-1007')
>>> ExtendedContext.next_minus(Decimal('-1.00000003'))
Decimal('-1.00000004')
>>> c.next_minus(Decimal('Infinity'))
Decimal('9.99999999E+999')
>>> c.next_minus(1)
Decimal('0.999999999')
"""
a = _convert_other(a, raiseit=True)
return a.next_minus(context=self)
def next_plus(self, a):
"""Returns the smallest representable number larger than a.
>>> c = ExtendedContext.copy()
>>> c.Emin = -999
>>> c.Emax = 999
>>> ExtendedContext.next_plus(Decimal('1'))
Decimal('1.00000001')
>>> c.next_plus(Decimal('-1E-1007'))
Decimal('-0E-1007')
>>> ExtendedContext.next_plus(Decimal('-1.00000003'))
Decimal('-1.00000002')
>>> c.next_plus(Decimal('-Infinity'))
Decimal('-9.99999999E+999')
>>> c.next_plus(1)
Decimal('1.00000001')
"""
a = _convert_other(a, raiseit=True)
return a.next_plus(context=self)
def next_toward(self, a, b):
"""Returns the number closest to a, in direction towards b.
The result is the closest representable number from the first
operand (but not the first operand) that is in the direction
towards the second operand, unless the operands have the same
value.
>>> c = ExtendedContext.copy()
>>> c.Emin = -999
>>> c.Emax = 999
>>> c.next_toward(Decimal('1'), Decimal('2'))
Decimal('1.00000001')
>>> c.next_toward(Decimal('-1E-1007'), Decimal('1'))
Decimal('-0E-1007')
>>> c.next_toward(Decimal('-1.00000003'), Decimal('0'))
Decimal('-1.00000002')
>>> c.next_toward(Decimal('1'), Decimal('0'))
Decimal('0.999999999')
>>> c.next_toward(Decimal('1E-1007'), Decimal('-100'))
Decimal('0E-1007')
>>> c.next_toward(Decimal('-1.00000003'), Decimal('-10'))
Decimal('-1.00000004')
>>> c.next_toward(Decimal('0.00'), Decimal('-0.0000'))
Decimal('-0.00')
>>> c.next_toward(0, 1)
Decimal('1E-1007')
>>> c.next_toward(Decimal(0), 1)
Decimal('1E-1007')
>>> c.next_toward(0, Decimal(1))
Decimal('1E-1007')
"""
a = _convert_other(a, raiseit=True)
return a.next_toward(b, context=self)
def normalize(self, a):
"""normalize reduces an operand to its simplest form.
Essentially a plus operation with all trailing zeros removed from the
result.
>>> ExtendedContext.normalize(Decimal('2.1'))
Decimal('2.1')
>>> ExtendedContext.normalize(Decimal('-2.0'))
Decimal('-2')
>>> ExtendedContext.normalize(Decimal('1.200'))
Decimal('1.2')
>>> ExtendedContext.normalize(Decimal('-120'))
Decimal('-1.2E+2')
>>> ExtendedContext.normalize(Decimal('120.00'))
Decimal('1.2E+2')
>>> ExtendedContext.normalize(Decimal('0.00'))
Decimal('0')
>>> ExtendedContext.normalize(6)
Decimal('6')
"""
a = _convert_other(a, raiseit=True)
return a.normalize(context=self)
def number_class(self, a):
"""Returns an indication of the class of the operand.
The class is one of the following strings:
-sNaN
-NaN
-Infinity
-Normal
-Subnormal
-Zero
+Zero
+Subnormal
+Normal
+Infinity
>>> c = Context(ExtendedContext)
>>> c.Emin = -999
>>> c.Emax = 999
>>> c.number_class(Decimal('Infinity'))
'+Infinity'
>>> c.number_class(Decimal('1E-10'))
'+Normal'
>>> c.number_class(Decimal('2.50'))
'+Normal'
>>> c.number_class(Decimal('0.1E-999'))
'+Subnormal'
>>> c.number_class(Decimal('0'))
'+Zero'
>>> c.number_class(Decimal('-0'))
'-Zero'
>>> c.number_class(Decimal('-0.1E-999'))
'-Subnormal'
>>> c.number_class(Decimal('-1E-10'))
'-Normal'
>>> c.number_class(Decimal('-2.50'))
'-Normal'
>>> c.number_class(Decimal('-Infinity'))
'-Infinity'
>>> c.number_class(Decimal('NaN'))
'NaN'
>>> c.number_class(Decimal('-NaN'))
'NaN'
>>> c.number_class(Decimal('sNaN'))
'sNaN'
>>> c.number_class(123)
'+Normal'
"""
a = _convert_other(a, raiseit=True)
return a.number_class(context=self)
def plus(self, a):
"""Plus corresponds to unary prefix plus in Python.
The operation is evaluated using the same rules as add; the
operation plus(a) is calculated as add('0', a) where the '0'
has the same exponent as the operand.
>>> ExtendedContext.plus(Decimal('1.3'))
Decimal('1.3')
>>> ExtendedContext.plus(Decimal('-1.3'))
Decimal('-1.3')
>>> ExtendedContext.plus(-1)
Decimal('-1')
"""
a = _convert_other(a, raiseit=True)
return a.__pos__(context=self)
def power(self, a, b, modulo=None):
"""Raises a to the power of b, to modulo if given.
With two arguments, compute a**b. If a is negative then b
must be integral. The result will be inexact unless b is
integral and the result is finite and can be expressed exactly
in 'precision' digits.
With three arguments, compute (a**b) % modulo. For the
three argument form, the following restrictions on the
arguments hold:
- all three arguments must be integral
- b must be nonnegative
- at least one of a or b must be nonzero
- modulo must be nonzero and have at most 'precision' digits
The result of pow(a, b, modulo) is identical to the result
that would be obtained by computing (a**b) % modulo with
unbounded precision, but is computed more efficiently. It is
always exact.
>>> c = ExtendedContext.copy()
>>> c.Emin = -999
>>> c.Emax = 999
>>> c.power(Decimal('2'), Decimal('3'))
Decimal('8')
>>> c.power(Decimal('-2'), Decimal('3'))
Decimal('-8')
>>> c.power(Decimal('2'), Decimal('-3'))
Decimal('0.125')
>>> c.power(Decimal('1.7'), Decimal('8'))
Decimal('69.7575744')
>>> c.power(Decimal('10'), Decimal('0.301029996'))
Decimal('2.00000000')
>>> c.power(Decimal('Infinity'), Decimal('-1'))
Decimal('0')
>>> c.power(Decimal('Infinity'), Decimal('0'))
Decimal('1')
>>> c.power(Decimal('Infinity'), Decimal('1'))
Decimal('Infinity')
>>> c.power(Decimal('-Infinity'), Decimal('-1'))
Decimal('-0')
>>> c.power(Decimal('-Infinity'), Decimal('0'))
Decimal('1')
>>> c.power(Decimal('-Infinity'), Decimal('1'))
Decimal('-Infinity')
>>> c.power(Decimal('-Infinity'), Decimal('2'))
Decimal('Infinity')
>>> c.power(Decimal('0'), Decimal('0'))
Decimal('NaN')
>>> c.power(Decimal('3'), Decimal('7'), Decimal('16'))
Decimal('11')
>>> c.power(Decimal('-3'), Decimal('7'), Decimal('16'))
Decimal('-11')
>>> c.power(Decimal('-3'), Decimal('8'), Decimal('16'))
Decimal('1')
>>> c.power(Decimal('3'), Decimal('7'), Decimal('-16'))
Decimal('11')
>>> c.power(Decimal('23E12345'), Decimal('67E189'), Decimal('123456789'))
Decimal('11729830')
>>> c.power(Decimal('-0'), Decimal('17'), Decimal('1729'))
Decimal('-0')
>>> c.power(Decimal('-23'), Decimal('0'), Decimal('65537'))
Decimal('1')
>>> ExtendedContext.power(7, 7)
Decimal('823543')
>>> ExtendedContext.power(Decimal(7), 7)
Decimal('823543')
>>> ExtendedContext.power(7, Decimal(7), 2)
Decimal('1')
"""
a = _convert_other(a, raiseit=True)
r = a.__pow__(b, modulo, context=self)
if r is NotImplemented:
raise TypeError("Unable to convert %s to Decimal" % b)
else:
return r
def quantize(self, a, b):
"""Returns a value equal to 'a' (rounded), having the exponent of 'b'.
The coefficient of the result is derived from that of the left-hand
operand. It may be rounded using the current rounding setting (if the
exponent is being increased), multiplied by a positive power of ten (if
the exponent is being decreased), or is unchanged (if the exponent is
already equal to that of the right-hand operand).
Unlike other operations, if the length of the coefficient after the
quantize operation would be greater than precision then an Invalid
operation condition is raised. This guarantees that, unless there is
an error condition, the exponent of the result of a quantize is always
equal to that of the right-hand operand.
Also unlike other operations, quantize will never raise Underflow, even
if the result is subnormal and inexact.
>>> ExtendedContext.quantize(Decimal('2.17'), Decimal('0.001'))
Decimal('2.170')
>>> ExtendedContext.quantize(Decimal('2.17'), Decimal('0.01'))
Decimal('2.17')
>>> ExtendedContext.quantize(Decimal('2.17'), Decimal('0.1'))
Decimal('2.2')
>>> ExtendedContext.quantize(Decimal('2.17'), Decimal('1e+0'))
Decimal('2')
>>> ExtendedContext.quantize(Decimal('2.17'), Decimal('1e+1'))
Decimal('0E+1')
>>> ExtendedContext.quantize(Decimal('-Inf'), Decimal('Infinity'))
Decimal('-Infinity')
>>> ExtendedContext.quantize(Decimal('2'), Decimal('Infinity'))
Decimal('NaN')
>>> ExtendedContext.quantize(Decimal('-0.1'), Decimal('1'))
Decimal('-0')
>>> ExtendedContext.quantize(Decimal('-0'), Decimal('1e+5'))
Decimal('-0E+5')
>>> ExtendedContext.quantize(Decimal('+35236450.6'), Decimal('1e-2'))
Decimal('NaN')
>>> ExtendedContext.quantize(Decimal('-35236450.6'), Decimal('1e-2'))
Decimal('NaN')
>>> ExtendedContext.quantize(Decimal('217'), Decimal('1e-1'))
Decimal('217.0')
>>> ExtendedContext.quantize(Decimal('217'), Decimal('1e-0'))
Decimal('217')
>>> ExtendedContext.quantize(Decimal('217'), Decimal('1e+1'))
Decimal('2.2E+2')
>>> ExtendedContext.quantize(Decimal('217'), Decimal('1e+2'))
Decimal('2E+2')
>>> ExtendedContext.quantize(1, 2)
Decimal('1')
>>> ExtendedContext.quantize(Decimal(1), 2)
Decimal('1')
>>> ExtendedContext.quantize(1, Decimal(2))
Decimal('1')
"""
a = _convert_other(a, raiseit=True)
return a.quantize(b, context=self)
def radix(self):
"""Just returns 10, as this is Decimal, :)
>>> ExtendedContext.radix()
Decimal('10')
"""
return Decimal(10)
def remainder(self, a, b):
"""Returns the remainder from integer division.
The result is the residue of the dividend after the operation of
calculating integer division as described for divide-integer, rounded
to precision digits if necessary. The sign of the result, if
non-zero, is the same as that of the original dividend.
This operation will fail under the same conditions as integer division
(that is, if integer division on the same two operands would fail, the
remainder cannot be calculated).
>>> ExtendedContext.remainder(Decimal('2.1'), Decimal('3'))
Decimal('2.1')
>>> ExtendedContext.remainder(Decimal('10'), Decimal('3'))
Decimal('1')
>>> ExtendedContext.remainder(Decimal('-10'), Decimal('3'))
Decimal('-1')
>>> ExtendedContext.remainder(Decimal('10.2'), Decimal('1'))
Decimal('0.2')
>>> ExtendedContext.remainder(Decimal('10'), Decimal('0.3'))
Decimal('0.1')
>>> ExtendedContext.remainder(Decimal('3.6'), Decimal('1.3'))
Decimal('1.0')
>>> ExtendedContext.remainder(22, 6)
Decimal('4')
>>> ExtendedContext.remainder(Decimal(22), 6)
Decimal('4')
>>> ExtendedContext.remainder(22, Decimal(6))
Decimal('4')
"""
a = _convert_other(a, raiseit=True)
r = a.__mod__(b, context=self)
if r is NotImplemented:
raise TypeError("Unable to convert %s to Decimal" % b)
else:
return r
def remainder_near(self, a, b):
"""Returns to be "a - b * n", where n is the integer nearest the exact
value of "x / b" (if two integers are equally near then the even one
is chosen). If the result is equal to 0 then its sign will be the
sign of a.
This operation will fail under the same conditions as integer division
(that is, if integer division on the same two operands would fail, the
remainder cannot be calculated).
>>> ExtendedContext.remainder_near(Decimal('2.1'), Decimal('3'))
Decimal('-0.9')
>>> ExtendedContext.remainder_near(Decimal('10'), Decimal('6'))
Decimal('-2')
>>> ExtendedContext.remainder_near(Decimal('10'), Decimal('3'))
Decimal('1')
>>> ExtendedContext.remainder_near(Decimal('-10'), Decimal('3'))
Decimal('-1')
>>> ExtendedContext.remainder_near(Decimal('10.2'), Decimal('1'))
Decimal('0.2')
>>> ExtendedContext.remainder_near(Decimal('10'), Decimal('0.3'))
Decimal('0.1')
>>> ExtendedContext.remainder_near(Decimal('3.6'), Decimal('1.3'))
Decimal('-0.3')
>>> ExtendedContext.remainder_near(3, 11)
Decimal('3')
>>> ExtendedContext.remainder_near(Decimal(3), 11)
Decimal('3')
>>> ExtendedContext.remainder_near(3, Decimal(11))
Decimal('3')
"""
a = _convert_other(a, raiseit=True)
return a.remainder_near(b, context=self)
def rotate(self, a, b):
"""Returns a rotated copy of a, b times.
The coefficient of the result is a rotated copy of the digits in
the coefficient of the first operand. The number of places of
rotation is taken from the absolute value of the second operand,
with the rotation being to the left if the second operand is
positive or to the right otherwise.
>>> ExtendedContext.rotate(Decimal('34'), Decimal('8'))
Decimal('400000003')
>>> ExtendedContext.rotate(Decimal('12'), Decimal('9'))
Decimal('12')
>>> ExtendedContext.rotate(Decimal('123456789'), Decimal('-2'))
Decimal('891234567')
>>> ExtendedContext.rotate(Decimal('123456789'), Decimal('0'))
Decimal('123456789')
>>> ExtendedContext.rotate(Decimal('123456789'), Decimal('+2'))
Decimal('345678912')
>>> ExtendedContext.rotate(1333333, 1)
Decimal('13333330')
>>> ExtendedContext.rotate(Decimal(1333333), 1)
Decimal('13333330')
>>> ExtendedContext.rotate(1333333, Decimal(1))
Decimal('13333330')
"""
a = _convert_other(a, raiseit=True)
return a.rotate(b, context=self)
def same_quantum(self, a, b):
"""Returns True if the two operands have the same exponent.
The result is never affected by either the sign or the coefficient of
either operand.
>>> ExtendedContext.same_quantum(Decimal('2.17'), Decimal('0.001'))
False
>>> ExtendedContext.same_quantum(Decimal('2.17'), Decimal('0.01'))
True
>>> ExtendedContext.same_quantum(Decimal('2.17'), Decimal('1'))
False
>>> ExtendedContext.same_quantum(Decimal('Inf'), Decimal('-Inf'))
True
>>> ExtendedContext.same_quantum(10000, -1)
True
>>> ExtendedContext.same_quantum(Decimal(10000), -1)
True
>>> ExtendedContext.same_quantum(10000, Decimal(-1))
True
"""
a = _convert_other(a, raiseit=True)
return a.same_quantum(b)
def scaleb (self, a, b):
"""Returns the first operand after adding the second value its exp.
>>> ExtendedContext.scaleb(Decimal('7.50'), Decimal('-2'))
Decimal('0.0750')
>>> ExtendedContext.scaleb(Decimal('7.50'), Decimal('0'))
Decimal('7.50')
>>> ExtendedContext.scaleb(Decimal('7.50'), Decimal('3'))
Decimal('7.50E+3')
>>> ExtendedContext.scaleb(1, 4)
Decimal('1E+4')
>>> ExtendedContext.scaleb(Decimal(1), 4)
Decimal('1E+4')
>>> ExtendedContext.scaleb(1, Decimal(4))
Decimal('1E+4')
"""
a = _convert_other(a, raiseit=True)
return a.scaleb(b, context=self)
def shift(self, a, b):
"""Returns a shifted copy of a, b times.
The coefficient of the result is a shifted copy of the digits
in the coefficient of the first operand. The number of places
to shift is taken from the absolute value of the second operand,
with the shift being to the left if the second operand is
positive or to the right otherwise. Digits shifted into the
coefficient are zeros.
>>> ExtendedContext.shift(Decimal('34'), Decimal('8'))
Decimal('400000000')
>>> ExtendedContext.shift(Decimal('12'), Decimal('9'))
Decimal('0')
>>> ExtendedContext.shift(Decimal('123456789'), Decimal('-2'))
Decimal('1234567')
>>> ExtendedContext.shift(Decimal('123456789'), Decimal('0'))
Decimal('123456789')
>>> ExtendedContext.shift(Decimal('123456789'), Decimal('+2'))
Decimal('345678900')
>>> ExtendedContext.shift(88888888, 2)
Decimal('888888800')
>>> ExtendedContext.shift(Decimal(88888888), 2)
Decimal('888888800')
>>> ExtendedContext.shift(88888888, Decimal(2))
Decimal('888888800')
"""
a = _convert_other(a, raiseit=True)
return a.shift(b, context=self)
def sqrt(self, a):
"""Square root of a non-negative number to context precision.
If the result must be inexact, it is rounded using the round-half-even
algorithm.
>>> ExtendedContext.sqrt(Decimal('0'))
Decimal('0')
>>> ExtendedContext.sqrt(Decimal('-0'))
Decimal('-0')
>>> ExtendedContext.sqrt(Decimal('0.39'))
Decimal('0.624499800')
>>> ExtendedContext.sqrt(Decimal('100'))
Decimal('10')
>>> ExtendedContext.sqrt(Decimal('1'))
Decimal('1')
>>> ExtendedContext.sqrt(Decimal('1.0'))
Decimal('1.0')
>>> ExtendedContext.sqrt(Decimal('1.00'))
Decimal('1.0')
>>> ExtendedContext.sqrt(Decimal('7'))
Decimal('2.64575131')
>>> ExtendedContext.sqrt(Decimal('10'))
Decimal('3.16227766')
>>> ExtendedContext.sqrt(2)
Decimal('1.41421356')
>>> ExtendedContext.prec
9
"""
a = _convert_other(a, raiseit=True)
return a.sqrt(context=self)
def subtract(self, a, b):
"""Return the difference between the two operands.
>>> ExtendedContext.subtract(Decimal('1.3'), Decimal('1.07'))
Decimal('0.23')
>>> ExtendedContext.subtract(Decimal('1.3'), Decimal('1.30'))
Decimal('0.00')
>>> ExtendedContext.subtract(Decimal('1.3'), Decimal('2.07'))
Decimal('-0.77')
>>> ExtendedContext.subtract(8, 5)
Decimal('3')
>>> ExtendedContext.subtract(Decimal(8), 5)
Decimal('3')
>>> ExtendedContext.subtract(8, Decimal(5))
Decimal('3')
"""
a = _convert_other(a, raiseit=True)
r = a.__sub__(b, context=self)
if r is NotImplemented:
raise TypeError("Unable to convert %s to Decimal" % b)
else:
return r
def to_eng_string(self, a):
"""Converts a number to a string, using scientific notation.
The operation is not affected by the context.
"""
a = _convert_other(a, raiseit=True)
return a.to_eng_string(context=self)
def to_sci_string(self, a):
"""Converts a number to a string, using scientific notation.
The operation is not affected by the context.
"""
a = _convert_other(a, raiseit=True)
return a.__str__(context=self)
def to_integral_exact(self, a):
"""Rounds to an integer.
When the operand has a negative exponent, the result is the same
as using the quantize() operation using the given operand as the
left-hand-operand, 1E+0 as the right-hand-operand, and the precision
of the operand as the precision setting; Inexact and Rounded flags
are allowed in this operation. The rounding mode is taken from the
context.
>>> ExtendedContext.to_integral_exact(Decimal('2.1'))
Decimal('2')
>>> ExtendedContext.to_integral_exact(Decimal('100'))
Decimal('100')
>>> ExtendedContext.to_integral_exact(Decimal('100.0'))
Decimal('100')
>>> ExtendedContext.to_integral_exact(Decimal('101.5'))
Decimal('102')
>>> ExtendedContext.to_integral_exact(Decimal('-101.5'))
Decimal('-102')
>>> ExtendedContext.to_integral_exact(Decimal('10E+5'))
Decimal('1.0E+6')
>>> ExtendedContext.to_integral_exact(Decimal('7.89E+77'))
Decimal('7.89E+77')
>>> ExtendedContext.to_integral_exact(Decimal('-Inf'))
Decimal('-Infinity')
"""
a = _convert_other(a, raiseit=True)
return a.to_integral_exact(context=self)
def to_integral_value(self, a):
"""Rounds to an integer.
When the operand has a negative exponent, the result is the same
as using the quantize() operation using the given operand as the
left-hand-operand, 1E+0 as the right-hand-operand, and the precision
of the operand as the precision setting, except that no flags will
be set. The rounding mode is taken from the context.
>>> ExtendedContext.to_integral_value(Decimal('2.1'))
Decimal('2')
>>> ExtendedContext.to_integral_value(Decimal('100'))
Decimal('100')
>>> ExtendedContext.to_integral_value(Decimal('100.0'))
Decimal('100')
>>> ExtendedContext.to_integral_value(Decimal('101.5'))
Decimal('102')
>>> ExtendedContext.to_integral_value(Decimal('-101.5'))
Decimal('-102')
>>> ExtendedContext.to_integral_value(Decimal('10E+5'))
Decimal('1.0E+6')
>>> ExtendedContext.to_integral_value(Decimal('7.89E+77'))
Decimal('7.89E+77')
>>> ExtendedContext.to_integral_value(Decimal('-Inf'))
Decimal('-Infinity')
"""
a = _convert_other(a, raiseit=True)
return a.to_integral_value(context=self)
# the method name changed, but we provide also the old one, for compatibility
to_integral = to_integral_value
class _WorkRep(object):
__slots__ = ('sign','int','exp')
# sign: 0 or 1
# int: int or long
# exp: None, int, or string
def __init__(self, value=None):
if value is None:
self.sign = None
self.int = 0
self.exp = None
elif isinstance(value, Decimal):
self.sign = value._sign
self.int = int(value._int)
self.exp = value._exp
else:
# assert isinstance(value, tuple)
self.sign = value[0]
self.int = value[1]
self.exp = value[2]
def __repr__(self):
return "(%r, %r, %r)" % (self.sign, self.int, self.exp)
__str__ = __repr__
def _normalize(op1, op2, prec = 0):
"""Normalizes op1, op2 to have the same exp and length of coefficient.
Done during addition.
"""
if op1.exp < op2.exp:
tmp = op2
other = op1
else:
tmp = op1
other = op2
# Let exp = min(tmp.exp - 1, tmp.adjusted() - precision - 1).
# Then adding 10**exp to tmp has the same effect (after rounding)
# as adding any positive quantity smaller than 10**exp; similarly
# for subtraction. So if other is smaller than 10**exp we replace
# it with 10**exp. This avoids tmp.exp - other.exp getting too large.
tmp_len = len(str(tmp.int))
other_len = len(str(other.int))
exp = tmp.exp + min(-1, tmp_len - prec - 2)
if other_len + other.exp - 1 < exp:
other.int = 1
other.exp = exp
tmp.int *= 10 ** (tmp.exp - other.exp)
tmp.exp = other.exp
return op1, op2
##### Integer arithmetic functions used by ln, log10, exp and __pow__ #####
# This function from Tim Peters was taken from here:
# http://mail.python.org/pipermail/python-list/1999-July/007758.html
# The correction being in the function definition is for speed, and
# the whole function is not resolved with math.log because of avoiding
# the use of floats.
def _nbits(n, correction = {
'0': 4, '1': 3, '2': 2, '3': 2,
'4': 1, '5': 1, '6': 1, '7': 1,
'8': 0, '9': 0, 'a': 0, 'b': 0,
'c': 0, 'd': 0, 'e': 0, 'f': 0}):
"""Number of bits in binary representation of the positive integer n,
or 0 if n == 0.
"""
if n < 0:
raise ValueError("The argument to _nbits should be nonnegative.")
hex_n = "%x" % n
return 4*len(hex_n) - correction[hex_n[0]]
def _decimal_lshift_exact(n, e):
""" Given integers n and e, return n * 10**e if it's an integer, else None.
The computation is designed to avoid computing large powers of 10
unnecessarily.
>>> _decimal_lshift_exact(3, 4)
30000
>>> _decimal_lshift_exact(300, -999999999) # returns None
"""
if n == 0:
return 0
elif e >= 0:
return n * 10**e
else:
# val_n = largest power of 10 dividing n.
str_n = str(abs(n))
val_n = len(str_n) - len(str_n.rstrip('0'))
return None if val_n < -e else n // 10**-e
def _sqrt_nearest(n, a):
"""Closest integer to the square root of the positive integer n. a is
an initial approximation to the square root. Any positive integer
will do for a, but the closer a is to the square root of n the
faster convergence will be.
"""
if n <= 0 or a <= 0:
raise ValueError("Both arguments to _sqrt_nearest should be positive.")
b=0
while a != b:
b, a = a, a--n//a>>1
return a
def _rshift_nearest(x, shift):
"""Given an integer x and a nonnegative integer shift, return closest
integer to x / 2**shift; use round-to-even in case of a tie.
"""
b, q = 1L << shift, x >> shift
return q + (2*(x & (b-1)) + (q&1) > b)
def _div_nearest(a, b):
"""Closest integer to a/b, a and b positive integers; rounds to even
in the case of a tie.
"""
q, r = divmod(a, b)
return q + (2*r + (q&1) > b)
def _ilog(x, M, L = 8):
"""Integer approximation to M*log(x/M), with absolute error boundable
in terms only of x/M.
Given positive integers x and M, return an integer approximation to
M * log(x/M). For L = 8 and 0.1 <= x/M <= 10 the difference
between the approximation and the exact result is at most 22. For
L = 8 and 1.0 <= x/M <= 10.0 the difference is at most 15. In
both cases these are upper bounds on the error; it will usually be
much smaller."""
# The basic algorithm is the following: let log1p be the function
# log1p(x) = log(1+x). Then log(x/M) = log1p((x-M)/M). We use
# the reduction
#
# log1p(y) = 2*log1p(y/(1+sqrt(1+y)))
#
# repeatedly until the argument to log1p is small (< 2**-L in
# absolute value). For small y we can use the Taylor series
# expansion
#
# log1p(y) ~ y - y**2/2 + y**3/3 - ... - (-y)**T/T
#
# truncating at T such that y**T is small enough. The whole
# computation is carried out in a form of fixed-point arithmetic,
# with a real number z being represented by an integer
# approximation to z*M. To avoid loss of precision, the y below
# is actually an integer approximation to 2**R*y*M, where R is the
# number of reductions performed so far.
y = x-M
# argument reduction; R = number of reductions performed
R = 0
while (R <= L and long(abs(y)) << L-R >= M or
R > L and abs(y) >> R-L >= M):
y = _div_nearest(long(M*y) << 1,
M + _sqrt_nearest(M*(M+_rshift_nearest(y, R)), M))
R += 1
# Taylor series with T terms
T = -int(-10*len(str(M))//(3*L))
yshift = _rshift_nearest(y, R)
w = _div_nearest(M, T)
for k in xrange(T-1, 0, -1):
w = _div_nearest(M, k) - _div_nearest(yshift*w, M)
return _div_nearest(w*y, M)
def _dlog10(c, e, p):
"""Given integers c, e and p with c > 0, p >= 0, compute an integer
approximation to 10**p * log10(c*10**e), with an absolute error of
at most 1. Assumes that c*10**e is not exactly 1."""
# increase precision by 2; compensate for this by dividing
# final result by 100
p += 2
# write c*10**e as d*10**f with either:
# f >= 0 and 1 <= d <= 10, or
# f <= 0 and 0.1 <= d <= 1.
# Thus for c*10**e close to 1, f = 0
l = len(str(c))
f = e+l - (e+l >= 1)
if p > 0:
M = 10**p
k = e+p-f
if k >= 0:
c *= 10**k
else:
c = _div_nearest(c, 10**-k)
log_d = _ilog(c, M) # error < 5 + 22 = 27
log_10 = _log10_digits(p) # error < 1
log_d = _div_nearest(log_d*M, log_10)
log_tenpower = f*M # exact
else:
log_d = 0 # error < 2.31
log_tenpower = _div_nearest(f, 10**-p) # error < 0.5
return _div_nearest(log_tenpower+log_d, 100)
def _dlog(c, e, p):
"""Given integers c, e and p with c > 0, compute an integer
approximation to 10**p * log(c*10**e), with an absolute error of
at most 1. Assumes that c*10**e is not exactly 1."""
# Increase precision by 2. The precision increase is compensated
# for at the end with a division by 100.
p += 2
# rewrite c*10**e as d*10**f with either f >= 0 and 1 <= d <= 10,
# or f <= 0 and 0.1 <= d <= 1. Then we can compute 10**p * log(c*10**e)
# as 10**p * log(d) + 10**p*f * log(10).
l = len(str(c))
f = e+l - (e+l >= 1)
# compute approximation to 10**p*log(d), with error < 27
if p > 0:
k = e+p-f
if k >= 0:
c *= 10**k
else:
c = _div_nearest(c, 10**-k) # error of <= 0.5 in c
# _ilog magnifies existing error in c by a factor of at most 10
log_d = _ilog(c, 10**p) # error < 5 + 22 = 27
else:
# p <= 0: just approximate the whole thing by 0; error < 2.31
log_d = 0
# compute approximation to f*10**p*log(10), with error < 11.
if f:
extra = len(str(abs(f)))-1
if p + extra >= 0:
# error in f * _log10_digits(p+extra) < |f| * 1 = |f|
# after division, error < |f|/10**extra + 0.5 < 10 + 0.5 < 11
f_log_ten = _div_nearest(f*_log10_digits(p+extra), 10**extra)
else:
f_log_ten = 0
else:
f_log_ten = 0
# error in sum < 11+27 = 38; error after division < 0.38 + 0.5 < 1
return _div_nearest(f_log_ten + log_d, 100)
class _Log10Memoize(object):
"""Class to compute, store, and allow retrieval of, digits of the
constant log(10) = 2.302585.... This constant is needed by
Decimal.ln, Decimal.log10, Decimal.exp and Decimal.__pow__."""
def __init__(self):
self.digits = "23025850929940456840179914546843642076011014886"
def getdigits(self, p):
"""Given an integer p >= 0, return floor(10**p)*log(10).
For example, self.getdigits(3) returns 2302.
"""
# digits are stored as a string, for quick conversion to
# integer in the case that we've already computed enough
# digits; the stored digits should always be correct
# (truncated, not rounded to nearest).
if p < 0:
raise ValueError("p should be nonnegative")
if p >= len(self.digits):
# compute p+3, p+6, p+9, ... digits; continue until at
# least one of the extra digits is nonzero
extra = 3
while True:
# compute p+extra digits, correct to within 1ulp
M = 10**(p+extra+2)
digits = str(_div_nearest(_ilog(10*M, M), 100))
if digits[-extra:] != '0'*extra:
break
extra += 3
# keep all reliable digits so far; remove trailing zeros
# and next nonzero digit
self.digits = digits.rstrip('0')[:-1]
return int(self.digits[:p+1])
_log10_digits = _Log10Memoize().getdigits
def _iexp(x, M, L=8):
"""Given integers x and M, M > 0, such that x/M is small in absolute
value, compute an integer approximation to M*exp(x/M). For 0 <=
x/M <= 2.4, the absolute error in the result is bounded by 60 (and
is usually much smaller)."""
# Algorithm: to compute exp(z) for a real number z, first divide z
# by a suitable power R of 2 so that |z/2**R| < 2**-L. Then
# compute expm1(z/2**R) = exp(z/2**R) - 1 using the usual Taylor
# series
#
# expm1(x) = x + x**2/2! + x**3/3! + ...
#
# Now use the identity
#
# expm1(2x) = expm1(x)*(expm1(x)+2)
#
# R times to compute the sequence expm1(z/2**R),
# expm1(z/2**(R-1)), ... , exp(z/2), exp(z).
# Find R such that x/2**R/M <= 2**-L
R = _nbits((long(x)<<L)//M)
# Taylor series. (2**L)**T > M
T = -int(-10*len(str(M))//(3*L))
y = _div_nearest(x, T)
Mshift = long(M)<<R
for i in xrange(T-1, 0, -1):
y = _div_nearest(x*(Mshift + y), Mshift * i)
# Expansion
for k in xrange(R-1, -1, -1):
Mshift = long(M)<<(k+2)
y = _div_nearest(y*(y+Mshift), Mshift)
return M+y
def _dexp(c, e, p):
"""Compute an approximation to exp(c*10**e), with p decimal places of
precision.
Returns integers d, f such that:
10**(p-1) <= d <= 10**p, and
(d-1)*10**f < exp(c*10**e) < (d+1)*10**f
In other words, d*10**f is an approximation to exp(c*10**e) with p
digits of precision, and with an error in d of at most 1. This is
almost, but not quite, the same as the error being < 1ulp: when d
= 10**(p-1) the error could be up to 10 ulp."""
# we'll call iexp with M = 10**(p+2), giving p+3 digits of precision
p += 2
# compute log(10) with extra precision = adjusted exponent of c*10**e
extra = max(0, e + len(str(c)) - 1)
q = p + extra
# compute quotient c*10**e/(log(10)) = c*10**(e+q)/(log(10)*10**q),
# rounding down
shift = e+q
if shift >= 0:
cshift = c*10**shift
else:
cshift = c//10**-shift
quot, rem = divmod(cshift, _log10_digits(q))
# reduce remainder back to original precision
rem = _div_nearest(rem, 10**extra)
# error in result of _iexp < 120; error after division < 0.62
return _div_nearest(_iexp(rem, 10**p), 1000), quot - p + 3
def _dpower(xc, xe, yc, ye, p):
"""Given integers xc, xe, yc and ye representing Decimals x = xc*10**xe and
y = yc*10**ye, compute x**y. Returns a pair of integers (c, e) such that:
10**(p-1) <= c <= 10**p, and
(c-1)*10**e < x**y < (c+1)*10**e
in other words, c*10**e is an approximation to x**y with p digits
of precision, and with an error in c of at most 1. (This is
almost, but not quite, the same as the error being < 1ulp: when c
== 10**(p-1) we can only guarantee error < 10ulp.)
We assume that: x is positive and not equal to 1, and y is nonzero.
"""
# Find b such that 10**(b-1) <= |y| <= 10**b
b = len(str(abs(yc))) + ye
# log(x) = lxc*10**(-p-b-1), to p+b+1 places after the decimal point
lxc = _dlog(xc, xe, p+b+1)
# compute product y*log(x) = yc*lxc*10**(-p-b-1+ye) = pc*10**(-p-1)
shift = ye-b
if shift >= 0:
pc = lxc*yc*10**shift
else:
pc = _div_nearest(lxc*yc, 10**-shift)
if pc == 0:
# we prefer a result that isn't exactly 1; this makes it
# easier to compute a correctly rounded result in __pow__
if ((len(str(xc)) + xe >= 1) == (yc > 0)): # if x**y > 1:
coeff, exp = 10**(p-1)+1, 1-p
else:
coeff, exp = 10**p-1, -p
else:
coeff, exp = _dexp(pc, -(p+1), p+1)
coeff = _div_nearest(coeff, 10)
exp += 1
return coeff, exp
def _log10_lb(c, correction = {
'1': 100, '2': 70, '3': 53, '4': 40, '5': 31,
'6': 23, '7': 16, '8': 10, '9': 5}):
"""Compute a lower bound for 100*log10(c) for a positive integer c."""
if c <= 0:
raise ValueError("The argument to _log10_lb should be nonnegative.")
str_c = str(c)
return 100*len(str_c) - correction[str_c[0]]
##### Helper Functions ####################################################
def _convert_other(other, raiseit=False, allow_float=False):
"""Convert other to Decimal.
Verifies that it's ok to use in an implicit construction.
If allow_float is true, allow conversion from float; this
is used in the comparison methods (__eq__ and friends).
"""
if isinstance(other, Decimal):
return other
if isinstance(other, (int, long)):
return Decimal(other)
if allow_float and isinstance(other, float):
return Decimal.from_float(other)
if raiseit:
raise TypeError("Unable to convert %s to Decimal" % other)
return NotImplemented
##### Setup Specific Contexts ############################################
# The default context prototype used by Context()
# Is mutable, so that new contexts can have different default values
DefaultContext = Context(
prec=28, rounding=ROUND_HALF_EVEN,
traps=[DivisionByZero, Overflow, InvalidOperation],
flags=[],
Emax=999999999,
Emin=-999999999,
capitals=1
)
# Pre-made alternate contexts offered by the specification
# Don't change these; the user should be able to select these
# contexts and be able to reproduce results from other implementations
# of the spec.
BasicContext = Context(
prec=9, rounding=ROUND_HALF_UP,
traps=[DivisionByZero, Overflow, InvalidOperation, Clamped, Underflow],
flags=[],
)
ExtendedContext = Context(
prec=9, rounding=ROUND_HALF_EVEN,
traps=[],
flags=[],
)
##### crud for parsing strings #############################################
#
# Regular expression used for parsing numeric strings. Additional
# comments:
#
# 1. Uncomment the two '\s*' lines to allow leading and/or trailing
# whitespace. But note that the specification disallows whitespace in
# a numeric string.
#
# 2. For finite numbers (not infinities and NaNs) the body of the
# number between the optional sign and the optional exponent must have
# at least one decimal digit, possibly after the decimal point. The
# lookahead expression '(?=\d|\.\d)' checks this.
import re
_parser = re.compile(r""" # A numeric string consists of:
# \s*
(?P<sign>[-+])? # an optional sign, followed by either...
(
(?=\d|\.\d) # ...a number (with at least one digit)
(?P<int>\d*) # having a (possibly empty) integer part
(\.(?P<frac>\d*))? # followed by an optional fractional part
(E(?P<exp>[-+]?\d+))? # followed by an optional exponent, or...
|
Inf(inity)? # ...an infinity, or...
|
(?P<signal>s)? # ...an (optionally signaling)
NaN # NaN
(?P<diag>\d*) # with (possibly empty) diagnostic info.
)
# \s*
\Z
""", re.VERBOSE | re.IGNORECASE | re.UNICODE).match
_all_zeros = re.compile('0*$').match
_exact_half = re.compile('50*$').match
##### PEP3101 support functions ##############################################
# The functions in this section have little to do with the Decimal
# class, and could potentially be reused or adapted for other pure
# Python numeric classes that want to implement __format__
#
# A format specifier for Decimal looks like:
#
# [[fill]align][sign][0][minimumwidth][,][.precision][type]
_parse_format_specifier_regex = re.compile(r"""\A
(?:
(?P<fill>.)?
(?P<align>[<>=^])
)?
(?P<sign>[-+ ])?
(?P<zeropad>0)?
(?P<minimumwidth>(?!0)\d+)?
(?P<thousands_sep>,)?
(?:\.(?P<precision>0|(?!0)\d+))?
(?P<type>[eEfFgGn%])?
\Z
""", re.VERBOSE)
del re
# The locale module is only needed for the 'n' format specifier. The
# rest of the PEP 3101 code functions quite happily without it, so we
# don't care too much if locale isn't present.
try:
import locale as _locale
except ImportError:
pass
def _parse_format_specifier(format_spec, _localeconv=None):
"""Parse and validate a format specifier.
Turns a standard numeric format specifier into a dict, with the
following entries:
fill: fill character to pad field to minimum width
align: alignment type, either '<', '>', '=' or '^'
sign: either '+', '-' or ' '
minimumwidth: nonnegative integer giving minimum width
zeropad: boolean, indicating whether to pad with zeros
thousands_sep: string to use as thousands separator, or ''
grouping: grouping for thousands separators, in format
used by localeconv
decimal_point: string to use for decimal point
precision: nonnegative integer giving precision, or None
type: one of the characters 'eEfFgG%', or None
unicode: boolean (always True for Python 3.x)
"""
m = _parse_format_specifier_regex.match(format_spec)
if m is None:
raise ValueError("Invalid format specifier: " + format_spec)
# get the dictionary
format_dict = m.groupdict()
# zeropad; defaults for fill and alignment. If zero padding
# is requested, the fill and align fields should be absent.
fill = format_dict['fill']
align = format_dict['align']
format_dict['zeropad'] = (format_dict['zeropad'] is not None)
if format_dict['zeropad']:
if fill is not None:
raise ValueError("Fill character conflicts with '0'"
" in format specifier: " + format_spec)
if align is not None:
raise ValueError("Alignment conflicts with '0' in "
"format specifier: " + format_spec)
format_dict['fill'] = fill or ' '
# PEP 3101 originally specified that the default alignment should
# be left; it was later agreed that right-aligned makes more sense
# for numeric types. See http://bugs.python.org/issue6857.
format_dict['align'] = align or '>'
# default sign handling: '-' for negative, '' for positive
if format_dict['sign'] is None:
format_dict['sign'] = '-'
# minimumwidth defaults to 0; precision remains None if not given
format_dict['minimumwidth'] = int(format_dict['minimumwidth'] or '0')
if format_dict['precision'] is not None:
format_dict['precision'] = int(format_dict['precision'])
# if format type is 'g' or 'G' then a precision of 0 makes little
# sense; convert it to 1. Same if format type is unspecified.
if format_dict['precision'] == 0:
if format_dict['type'] is None or format_dict['type'] in 'gG':
format_dict['precision'] = 1
# determine thousands separator, grouping, and decimal separator, and
# add appropriate entries to format_dict
if format_dict['type'] == 'n':
# apart from separators, 'n' behaves just like 'g'
format_dict['type'] = 'g'
if _localeconv is None:
_localeconv = _locale.localeconv()
if format_dict['thousands_sep'] is not None:
raise ValueError("Explicit thousands separator conflicts with "
"'n' type in format specifier: " + format_spec)
format_dict['thousands_sep'] = _localeconv['thousands_sep']
format_dict['grouping'] = _localeconv['grouping']
format_dict['decimal_point'] = _localeconv['decimal_point']
else:
if format_dict['thousands_sep'] is None:
format_dict['thousands_sep'] = ''
format_dict['grouping'] = [3, 0]
format_dict['decimal_point'] = '.'
# record whether return type should be str or unicode
format_dict['unicode'] = isinstance(format_spec, unicode)
return format_dict
def _format_align(sign, body, spec):
"""Given an unpadded, non-aligned numeric string 'body' and sign
string 'sign', add padding and alignment conforming to the given
format specifier dictionary 'spec' (as produced by
parse_format_specifier).
Also converts result to unicode if necessary.
"""
# how much extra space do we have to play with?
minimumwidth = spec['minimumwidth']
fill = spec['fill']
padding = fill*(minimumwidth - len(sign) - len(body))
align = spec['align']
if align == '<':
result = sign + body + padding
elif align == '>':
result = padding + sign + body
elif align == '=':
result = sign + padding + body
elif align == '^':
half = len(padding)//2
result = padding[:half] + sign + body + padding[half:]
else:
raise ValueError('Unrecognised alignment field')
# make sure that result is unicode if necessary
if spec['unicode']:
result = unicode(result)
return result
def _group_lengths(grouping):
"""Convert a localeconv-style grouping into a (possibly infinite)
iterable of integers representing group lengths.
"""
# The result from localeconv()['grouping'], and the input to this
# function, should be a list of integers in one of the
# following three forms:
#
# (1) an empty list, or
# (2) nonempty list of positive integers + [0]
# (3) list of positive integers + [locale.CHAR_MAX], or
from itertools import chain, repeat
if not grouping:
return []
elif grouping[-1] == 0 and len(grouping) >= 2:
return chain(grouping[:-1], repeat(grouping[-2]))
elif grouping[-1] == _locale.CHAR_MAX:
return grouping[:-1]
else:
raise ValueError('unrecognised format for grouping')
def _insert_thousands_sep(digits, spec, min_width=1):
"""Insert thousands separators into a digit string.
spec is a dictionary whose keys should include 'thousands_sep' and
'grouping'; typically it's the result of parsing the format
specifier using _parse_format_specifier.
The min_width keyword argument gives the minimum length of the
result, which will be padded on the left with zeros if necessary.
If necessary, the zero padding adds an extra '0' on the left to
avoid a leading thousands separator. For example, inserting
commas every three digits in '123456', with min_width=8, gives
'0,123,456', even though that has length 9.
"""
sep = spec['thousands_sep']
grouping = spec['grouping']
groups = []
for l in _group_lengths(grouping):
if l <= 0:
raise ValueError("group length should be positive")
# max(..., 1) forces at least 1 digit to the left of a separator
l = min(max(len(digits), min_width, 1), l)
groups.append('0'*(l - len(digits)) + digits[-l:])
digits = digits[:-l]
min_width -= l
if not digits and min_width <= 0:
break
min_width -= len(sep)
else:
l = max(len(digits), min_width, 1)
groups.append('0'*(l - len(digits)) + digits[-l:])
return sep.join(reversed(groups))
def _format_sign(is_negative, spec):
"""Determine sign character."""
if is_negative:
return '-'
elif spec['sign'] in ' +':
return spec['sign']
else:
return ''
def _format_number(is_negative, intpart, fracpart, exp, spec):
"""Format a number, given the following data:
is_negative: true if the number is negative, else false
intpart: string of digits that must appear before the decimal point
fracpart: string of digits that must come after the point
exp: exponent, as an integer
spec: dictionary resulting from parsing the format specifier
This function uses the information in spec to:
insert separators (decimal separator and thousands separators)
format the sign
format the exponent
add trailing '%' for the '%' type
zero-pad if necessary
fill and align if necessary
"""
sign = _format_sign(is_negative, spec)
if fracpart:
fracpart = spec['decimal_point'] + fracpart
if exp != 0 or spec['type'] in 'eE':
echar = {'E': 'E', 'e': 'e', 'G': 'E', 'g': 'e'}[spec['type']]
fracpart += "{0}{1:+}".format(echar, exp)
if spec['type'] == '%':
fracpart += '%'
if spec['zeropad']:
min_width = spec['minimumwidth'] - len(fracpart) - len(sign)
else:
min_width = 0
intpart = _insert_thousands_sep(intpart, spec, min_width)
return _format_align(sign, intpart+fracpart, spec)
##### Useful Constants (internal use only) ################################
# Reusable defaults
_Infinity = Decimal('Inf')
_NegativeInfinity = Decimal('-Inf')
_NaN = Decimal('NaN')
_Zero = Decimal(0)
_One = Decimal(1)
_NegativeOne = Decimal(-1)
# _SignedInfinity[sign] is infinity w/ that sign
_SignedInfinity = (_Infinity, _NegativeInfinity)
if __name__ == '__main__':
import doctest, sys
doctest.testmod(sys.modules[__name__])
|
mpl-2.0
|
dsparrow27/zoocore
|
zoo/libs/plugin/plugin.py
|
1
|
1836
|
import inspect
import time
from zoo.libs.utils import env
class Plugin(object):
"""Base plugin class that all plugins inherent from. The client should subclass this to provide a standard
interface when needed.
.. code-block:: python
Class CustomPlugin(Plugin):
id = "CustomPlugin.example"
def execute(self):
print "executed plugin: {}".format(self.id)
"""
id = ""
def __init__(self, manager=None):
self.manager = manager
self.stats = PluginStats(self)
class PluginStats(object):
def __init__(self, plugin):
self.plugin = plugin
self.id = self.plugin.id
self.startTime = 0.0
self.endTime = 0.0
self.executionTime = 0.0
self.info = {}
self._init()
def _init(self):
"""Initializes some basic info about the plugin and the use environment
Internal use only.
"""
self.info.update({"name": self.plugin.__class__.__name__,
"creator": self.plugin.creator,
"module": self.plugin.__class__.__module__,
"filepath": inspect.getfile(self.plugin.__class__),
"id": self.id,
"application": env.application()
})
self.info.update(env.machineInfo())
def start(self):
self.startTime = time.time()
def finish(self, tb=None):
"""Called when the plugin has finish executing.
:param tb:
:type tb:
"""
self.endTime = time.time()
self.executionTime = self.endTime - self.startTime
self.info["executionTime"] = self.executionTime
self.info["lastUsed"] = self.endTime
if tb:
self.info["traceback"] = tb
|
gpl-3.0
|
GbalsaC/bitnamiP
|
common/djangoapps/student/tests/test_password_policy.py
|
113
|
12723
|
# -*- coding: utf-8 -*-
"""
This test file will verify proper password policy enforcement, which is an option feature
"""
import json
from django.test import TestCase
from django.test.client import RequestFactory
from django.core.urlresolvers import reverse
from django.contrib.auth.models import AnonymousUser
from django.utils.importlib import import_module
from django.test.utils import override_settings
from django.conf import settings
from mock import patch
from edxmako.tests import mako_middleware_process_request
from external_auth.models import ExternalAuthMap
from student.views import create_account
@patch.dict("django.conf.settings.FEATURES", {'ENFORCE_PASSWORD_POLICY': True})
class TestPasswordPolicy(TestCase):
"""
Go through some password policy tests to make sure things are properly working
"""
def setUp(self):
super(TestPasswordPolicy, self).setUp()
self.url = reverse('create_account')
self.request_factory = RequestFactory()
self.url_params = {
'username': 'username',
'email': '[email protected]',
'name': 'username',
'terms_of_service': 'true',
'honor_code': 'true',
}
@override_settings(PASSWORD_MIN_LENGTH=6)
def test_password_length_too_short(self):
self.url_params['password'] = 'aaa'
response = self.client.post(self.url, self.url_params)
self.assertEqual(response.status_code, 400)
obj = json.loads(response.content)
self.assertEqual(
obj['value'],
"Password: Invalid Length (must be 6 characters or more)",
)
@override_settings(PASSWORD_MIN_LENGTH=6)
def test_password_length_long_enough(self):
self.url_params['password'] = 'ThisIsALongerPassword'
response = self.client.post(self.url, self.url_params)
self.assertEqual(response.status_code, 200)
obj = json.loads(response.content)
self.assertTrue(obj['success'])
@override_settings(PASSWORD_MAX_LENGTH=12)
def test_password_length_too_long(self):
self.url_params['password'] = 'ThisPasswordIsWayTooLong'
response = self.client.post(self.url, self.url_params)
self.assertEqual(response.status_code, 400)
obj = json.loads(response.content)
self.assertEqual(
obj['value'],
"Password: Invalid Length (must be 12 characters or less)",
)
@patch.dict("django.conf.settings.PASSWORD_COMPLEXITY", {'UPPER': 3})
def test_password_not_enough_uppercase(self):
self.url_params['password'] = 'thisshouldfail'
response = self.client.post(self.url, self.url_params)
self.assertEqual(response.status_code, 400)
obj = json.loads(response.content)
self.assertEqual(
obj['value'],
"Password: Must be more complex (must contain 3 or more uppercase characters)",
)
@patch.dict("django.conf.settings.PASSWORD_COMPLEXITY", {'UPPER': 3})
def test_password_enough_uppercase(self):
self.url_params['password'] = 'ThisShouldPass'
response = self.client.post(self.url, self.url_params)
self.assertEqual(response.status_code, 200)
obj = json.loads(response.content)
self.assertTrue(obj['success'])
@patch.dict("django.conf.settings.PASSWORD_COMPLEXITY", {'LOWER': 3})
def test_password_not_enough_lowercase(self):
self.url_params['password'] = 'THISSHOULDFAIL'
response = self.client.post(self.url, self.url_params)
self.assertEqual(response.status_code, 400)
obj = json.loads(response.content)
self.assertEqual(
obj['value'],
"Password: Must be more complex (must contain 3 or more lowercase characters)",
)
@patch.dict("django.conf.settings.PASSWORD_COMPLEXITY", {'LOWER': 3})
def test_password_enough_lowercase(self):
self.url_params['password'] = 'ThisShouldPass'
response = self.client.post(self.url, self.url_params)
self.assertEqual(response.status_code, 200)
obj = json.loads(response.content)
self.assertTrue(obj['success'])
@patch.dict("django.conf.settings.PASSWORD_COMPLEXITY", {'DIGITS': 3})
def test_not_enough_digits(self):
self.url_params['password'] = 'thishasnodigits'
response = self.client.post(self.url, self.url_params)
self.assertEqual(response.status_code, 400)
obj = json.loads(response.content)
self.assertEqual(
obj['value'],
"Password: Must be more complex (must contain 3 or more digits)",
)
@patch.dict("django.conf.settings.PASSWORD_COMPLEXITY", {'DIGITS': 3})
def test_enough_digits(self):
self.url_params['password'] = 'Th1sSh0uldPa88'
response = self.client.post(self.url, self.url_params)
self.assertEqual(response.status_code, 200)
obj = json.loads(response.content)
self.assertTrue(obj['success'])
@patch.dict("django.conf.settings.PASSWORD_COMPLEXITY", {'PUNCTUATION': 3})
def test_not_enough_punctuations(self):
self.url_params['password'] = 'thisshouldfail'
response = self.client.post(self.url, self.url_params)
self.assertEqual(response.status_code, 400)
obj = json.loads(response.content)
self.assertEqual(
obj['value'],
"Password: Must be more complex (must contain 3 or more punctuation characters)",
)
@patch.dict("django.conf.settings.PASSWORD_COMPLEXITY", {'PUNCTUATION': 3})
def test_enough_punctuations(self):
self.url_params['password'] = 'Th!sSh.uldPa$*'
response = self.client.post(self.url, self.url_params)
self.assertEqual(response.status_code, 200)
obj = json.loads(response.content)
self.assertTrue(obj['success'])
@patch.dict("django.conf.settings.PASSWORD_COMPLEXITY", {'WORDS': 3})
def test_not_enough_words(self):
self.url_params['password'] = 'thisshouldfail'
response = self.client.post(self.url, self.url_params)
self.assertEqual(response.status_code, 400)
obj = json.loads(response.content)
self.assertEqual(
obj['value'],
"Password: Must be more complex (must contain 3 or more unique words)",
)
@patch.dict("django.conf.settings.PASSWORD_COMPLEXITY", {'WORDS': 3})
def test_enough_wordss(self):
self.url_params['password'] = u'this should pass'
response = self.client.post(self.url, self.url_params)
self.assertEqual(response.status_code, 200)
obj = json.loads(response.content)
self.assertTrue(obj['success'])
@patch.dict("django.conf.settings.PASSWORD_COMPLEXITY", {
'PUNCTUATION': 3,
'WORDS': 3,
'DIGITS': 3,
'LOWER': 3,
'UPPER': 3,
})
def test_multiple_errors_fail(self):
self.url_params['password'] = 'thisshouldfail'
response = self.client.post(self.url, self.url_params)
self.assertEqual(response.status_code, 400)
obj = json.loads(response.content)
errstring = (
"Password: Must be more complex ("
"must contain 3 or more uppercase characters, "
"must contain 3 or more digits, "
"must contain 3 or more punctuation characters, "
"must contain 3 or more unique words"
")"
)
self.assertEqual(obj['value'], errstring)
@patch.dict("django.conf.settings.PASSWORD_COMPLEXITY", {
'PUNCTUATION': 3,
'WORDS': 3,
'DIGITS': 3,
'LOWER': 3,
'UPPER': 3,
})
def test_multiple_errors_pass(self):
self.url_params['password'] = u'tH1s Sh0u!d P3#$'
response = self.client.post(self.url, self.url_params)
self.assertEqual(response.status_code, 200)
obj = json.loads(response.content)
self.assertTrue(obj['success'])
@override_settings(PASSWORD_DICTIONARY=['foo', 'bar'])
@override_settings(PASSWORD_DICTIONARY_EDIT_DISTANCE_THRESHOLD=1)
def test_dictionary_similarity_fail1(self):
self.url_params['password'] = 'foo'
response = self.client.post(self.url, self.url_params)
self.assertEqual(response.status_code, 400)
obj = json.loads(response.content)
self.assertEqual(
obj['value'],
"Password: Too similar to a restricted dictionary word.",
)
@override_settings(PASSWORD_DICTIONARY=['foo', 'bar'])
@override_settings(PASSWORD_DICTIONARY_EDIT_DISTANCE_THRESHOLD=1)
def test_dictionary_similarity_fail2(self):
self.url_params['password'] = 'bar'
response = self.client.post(self.url, self.url_params)
self.assertEqual(response.status_code, 400)
obj = json.loads(response.content)
self.assertEqual(
obj['value'],
"Password: Too similar to a restricted dictionary word.",
)
@override_settings(PASSWORD_DICTIONARY=['foo', 'bar'])
@override_settings(PASSWORD_DICTIONARY_EDIT_DISTANCE_THRESHOLD=1)
def test_dictionary_similarity_fail3(self):
self.url_params['password'] = 'fo0'
response = self.client.post(self.url, self.url_params)
self.assertEqual(response.status_code, 400)
obj = json.loads(response.content)
self.assertEqual(
obj['value'],
"Password: Too similar to a restricted dictionary word.",
)
@override_settings(PASSWORD_DICTIONARY=['foo', 'bar'])
@override_settings(PASSWORD_DICTIONARY_EDIT_DISTANCE_THRESHOLD=1)
def test_dictionary_similarity_pass(self):
self.url_params['password'] = 'this_is_ok'
response = self.client.post(self.url, self.url_params)
self.assertEqual(response.status_code, 200)
obj = json.loads(response.content)
self.assertTrue(obj['success'])
def test_with_unicode(self):
self.url_params['password'] = u'四節比分和七年前'
response = self.client.post(self.url, self.url_params)
self.assertEqual(response.status_code, 200)
obj = json.loads(response.content)
self.assertTrue(obj['success'])
@override_settings(PASSWORD_MIN_LENGTH=6, SESSION_ENGINE='django.contrib.sessions.backends.cache')
def test_ext_auth_password_length_too_short(self):
"""
Tests that even if password policy is enforced, ext_auth registrations aren't subject to it
"""
self.url_params['password'] = 'aaa' # shouldn't pass validation
request = self.request_factory.post(self.url, self.url_params)
# now indicate we are doing ext_auth by setting 'ExternalAuthMap' in the session.
request.session = import_module(settings.SESSION_ENGINE).SessionStore() # empty session
extauth = ExternalAuthMap(external_id='[email protected]',
external_email='[email protected]',
internal_password=self.url_params['password'],
external_domain='shib:https://idp.stanford.edu/')
request.session['ExternalAuthMap'] = extauth
request.user = AnonymousUser()
mako_middleware_process_request(request)
response = create_account(request)
self.assertEqual(response.status_code, 200)
obj = json.loads(response.content)
self.assertTrue(obj['success'])
class TestUsernamePasswordNonmatch(TestCase):
"""
Test that registration username and password fields differ
"""
def setUp(self):
super(TestUsernamePasswordNonmatch, self).setUp()
self.url = reverse('create_account')
self.url_params = {
'username': 'username',
'email': '[email protected]',
'name': 'username',
'terms_of_service': 'true',
'honor_code': 'true',
}
def test_with_username_password_match(self):
self.url_params['username'] = "foobar"
self.url_params['password'] = "foobar"
response = self.client.post(self.url, self.url_params)
self.assertEquals(response.status_code, 400)
obj = json.loads(response.content)
self.assertEqual(
obj['value'],
"Username and password fields cannot match",
)
def test_with_username_password_nonmatch(self):
self.url_params['username'] = "foobar"
self.url_params['password'] = "nonmatch"
response = self.client.post(self.url, self.url_params)
self.assertEquals(response.status_code, 200)
obj = json.loads(response.content)
self.assertTrue(obj['success'])
|
agpl-3.0
|
shakamunyi/neutron-dvr
|
neutron/plugins/cisco/db/n1kv_models_v2.py
|
6
|
7207
|
# Copyright 2013 Cisco Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# @author: Abhishek Raut, Cisco Systems Inc.
# @author: Rudrajit Tapadar, Cisco Systems Inc.
import sqlalchemy as sa
from sqlalchemy import sql
from neutron.db import model_base
from neutron.db import models_v2
from neutron.openstack.common import log as logging
from neutron.plugins.cisco.common import cisco_constants
LOG = logging.getLogger(__name__)
class N1kvVlanAllocation(model_base.BASEV2):
"""Represents allocation state of vlan_id on physical network."""
__tablename__ = 'cisco_n1kv_vlan_allocations'
physical_network = sa.Column(sa.String(64),
nullable=False,
primary_key=True)
vlan_id = sa.Column(sa.Integer, nullable=False, primary_key=True,
autoincrement=False)
allocated = sa.Column(sa.Boolean, nullable=False, default=False,
server_default=sql.false())
network_profile_id = sa.Column(sa.String(36),
sa.ForeignKey('cisco_network_profiles.id',
ondelete="CASCADE"),
nullable=False)
class N1kvVxlanAllocation(model_base.BASEV2):
"""Represents allocation state of vxlan_id."""
__tablename__ = 'cisco_n1kv_vxlan_allocations'
vxlan_id = sa.Column(sa.Integer, nullable=False, primary_key=True,
autoincrement=False)
allocated = sa.Column(sa.Boolean, nullable=False, default=False,
server_default=sql.false())
network_profile_id = sa.Column(sa.String(36),
sa.ForeignKey('cisco_network_profiles.id',
ondelete="CASCADE"),
nullable=False)
class N1kvPortBinding(model_base.BASEV2):
"""Represents binding of ports to policy profile."""
__tablename__ = 'cisco_n1kv_port_bindings'
port_id = sa.Column(sa.String(36),
sa.ForeignKey('ports.id', ondelete="CASCADE"),
primary_key=True)
profile_id = sa.Column(sa.String(36),
sa.ForeignKey('cisco_policy_profiles.id'))
class N1kvNetworkBinding(model_base.BASEV2):
"""Represents binding of virtual network to physical realization."""
__tablename__ = 'cisco_n1kv_network_bindings'
network_id = sa.Column(sa.String(36),
sa.ForeignKey('networks.id', ondelete="CASCADE"),
primary_key=True)
network_type = sa.Column(sa.String(32), nullable=False)
physical_network = sa.Column(sa.String(64))
segmentation_id = sa.Column(sa.Integer)
multicast_ip = sa.Column(sa.String(32))
profile_id = sa.Column(sa.String(36),
sa.ForeignKey('cisco_network_profiles.id'))
class N1kVmNetwork(model_base.BASEV2):
"""Represents VM Network information."""
__tablename__ = 'cisco_n1kv_vmnetworks'
name = sa.Column(sa.String(80), primary_key=True)
profile_id = sa.Column(sa.String(36),
sa.ForeignKey('cisco_policy_profiles.id'))
network_id = sa.Column(sa.String(36))
port_count = sa.Column(sa.Integer)
class NetworkProfile(model_base.BASEV2, models_v2.HasId):
"""
Nexus1000V Network Profiles
segment_type - VLAN, OVERLAY, TRUNK, MULTI_SEGMENT
sub_type - TRUNK_VLAN, TRUNK_VXLAN, native_vxlan, enhanced_vxlan
segment_range - '<integer>-<integer>'
multicast_ip_index - <integer>
multicast_ip_range - '<ip>-<ip>'
physical_network - Name for the physical network
"""
__tablename__ = 'cisco_network_profiles'
name = sa.Column(sa.String(255))
segment_type = sa.Column(sa.Enum(cisco_constants.NETWORK_TYPE_VLAN,
cisco_constants.NETWORK_TYPE_OVERLAY,
cisco_constants.NETWORK_TYPE_TRUNK,
cisco_constants.
NETWORK_TYPE_MULTI_SEGMENT,
name='segment_type'),
nullable=False)
sub_type = sa.Column(sa.String(255))
segment_range = sa.Column(sa.String(255))
multicast_ip_index = sa.Column(sa.Integer, default=0,
server_default='0')
multicast_ip_range = sa.Column(sa.String(255))
physical_network = sa.Column(sa.String(255))
class PolicyProfile(model_base.BASEV2):
"""
Nexus1000V Network Profiles
Both 'id' and 'name' are coming from Nexus1000V switch
"""
__tablename__ = 'cisco_policy_profiles'
id = sa.Column(sa.String(36), primary_key=True)
name = sa.Column(sa.String(255))
class ProfileBinding(model_base.BASEV2):
"""
Represents a binding of Network Profile
or Policy Profile to tenant_id
"""
__tablename__ = 'cisco_n1kv_profile_bindings'
profile_type = sa.Column(sa.Enum(cisco_constants.NETWORK,
cisco_constants.POLICY,
name='profile_type'))
tenant_id = sa.Column(sa.String(36),
primary_key=True,
default=cisco_constants.TENANT_ID_NOT_SET,
server_default=cisco_constants.TENANT_ID_NOT_SET)
profile_id = sa.Column(sa.String(36), primary_key=True)
class N1kvTrunkSegmentBinding(model_base.BASEV2):
"""Represents binding of segments in trunk networks."""
__tablename__ = 'cisco_n1kv_trunk_segments'
trunk_segment_id = sa.Column(sa.String(36),
sa.ForeignKey('networks.id',
ondelete="CASCADE"),
primary_key=True)
segment_id = sa.Column(sa.String(36), nullable=False, primary_key=True)
dot1qtag = sa.Column(sa.String(36), nullable=False, primary_key=True)
class N1kvMultiSegmentNetworkBinding(model_base.BASEV2):
"""Represents binding of segments in multi-segment networks."""
__tablename__ = 'cisco_n1kv_multi_segments'
multi_segment_id = sa.Column(sa.String(36),
sa.ForeignKey('networks.id',
ondelete="CASCADE"),
primary_key=True)
segment1_id = sa.Column(sa.String(36), nullable=False, primary_key=True)
segment2_id = sa.Column(sa.String(36), nullable=False, primary_key=True)
encap_profile_name = sa.Column(sa.String(36))
|
apache-2.0
|
double-y/django
|
django/core/mail/backends/console.py
|
696
|
1477
|
"""
Email backend that writes messages to console instead of sending them.
"""
import sys
import threading
from django.core.mail.backends.base import BaseEmailBackend
from django.utils import six
class EmailBackend(BaseEmailBackend):
def __init__(self, *args, **kwargs):
self.stream = kwargs.pop('stream', sys.stdout)
self._lock = threading.RLock()
super(EmailBackend, self).__init__(*args, **kwargs)
def write_message(self, message):
msg = message.message()
msg_data = msg.as_bytes()
if six.PY3:
charset = msg.get_charset().get_output_charset() if msg.get_charset() else 'utf-8'
msg_data = msg_data.decode(charset)
self.stream.write('%s\n' % msg_data)
self.stream.write('-' * 79)
self.stream.write('\n')
def send_messages(self, email_messages):
"""Write all messages to the stream in a thread-safe way."""
if not email_messages:
return
msg_count = 0
with self._lock:
try:
stream_created = self.open()
for message in email_messages:
self.write_message(message)
self.stream.flush() # flush after each message
msg_count += 1
if stream_created:
self.close()
except Exception:
if not self.fail_silently:
raise
return msg_count
|
bsd-3-clause
|
bcoca/ansible
|
test/units/modules/test_systemd.py
|
35
|
1801
|
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from units.compat import unittest
from ansible.modules.systemd import parse_systemctl_show
class ParseSystemctlShowTestCase(unittest.TestCase):
def test_simple(self):
lines = [
'Type=simple',
'Restart=no',
'Requires=system.slice sysinit.target',
'Description=Blah blah blah',
]
parsed = parse_systemctl_show(lines)
self.assertEqual(parsed, {
'Type': 'simple',
'Restart': 'no',
'Requires': 'system.slice sysinit.target',
'Description': 'Blah blah blah',
})
def test_multiline_exec(self):
# This was taken from a real service that specified "ExecStart=/bin/echo foo\nbar"
lines = [
'Type=simple',
'ExecStart={ path=/bin/echo ; argv[]=/bin/echo foo',
'bar ; ignore_errors=no ; start_time=[n/a] ; stop_time=[n/a] ; pid=0 ; code=(null) ; status=0/0 }',
'Description=blah',
]
parsed = parse_systemctl_show(lines)
self.assertEqual(parsed, {
'Type': 'simple',
'ExecStart': '{ path=/bin/echo ; argv[]=/bin/echo foo\n'
'bar ; ignore_errors=no ; start_time=[n/a] ; stop_time=[n/a] ; pid=0 ; code=(null) ; status=0/0 }',
'Description': 'blah',
})
def test_single_line_with_brace(self):
lines = [
'Type=simple',
'Description={ this is confusing',
'Restart=no',
]
parsed = parse_systemctl_show(lines)
self.assertEqual(parsed, {
'Type': 'simple',
'Description': '{ this is confusing',
'Restart': 'no',
})
|
gpl-3.0
|
Big-B702/python-for-android
|
python-build/python-libs/gdata/tests/gdata_tests/calendar_test.py
|
87
|
38211
|
#!/usr/bin/python
#
# Copyright (C) 2006 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__author__ = '[email protected] (Jeff Scudder)'
import unittest
try:
from xml.etree import ElementTree
except ImportError:
from elementtree import ElementTree
import atom
import gdata
from gdata import test_data
import gdata.calendar
class CalendarFeedTest(unittest.TestCase):
def setUp(self):
self.calendar_feed = gdata.calendar.CalendarListFeedFromString(
test_data.CALENDAR_FEED)
def testEntryCount(self):
# Assert the number of items in the feed of calendars
self.assertEquals(len(self.calendar_feed.entry),2)
def testToAndFromString(self):
# Assert the appropriate type for each entry
for an_entry in self.calendar_feed.entry:
self.assert_(isinstance(an_entry, gdata.calendar.CalendarListEntry),
'Entry must be an instance of CalendarListEntry')
# Regenerate feed from xml text
new_calendar_feed = (
gdata.calendar.CalendarListFeedFromString(str(self.calendar_feed)))
for an_entry in new_calendar_feed.entry:
self.assert_(isinstance(an_entry, gdata.calendar.CalendarListEntry),
'Entry in regenerated feed must be an instance of CalendarListEntry')
def testAuthor(self):
"""Tests the existence of a <atom:author> and verifies the name and email"""
# Assert that each element in the feed author list is an atom.Author
for an_author in self.calendar_feed.author:
self.assert_(isinstance(an_author, atom.Author),
"Calendar feed <atom:author> element must be an instance of " +
"atom.Author: %s" % an_author)
# Assert the feed author name is as expected
self.assertEquals(self.calendar_feed.author[0].name.text, 'GData Ops Demo')
# Assert the feed author name is as expected
self.assertEquals(self.calendar_feed.author[0].email.text,
'[email protected]')
# Assert one of the values for an entry author
self.assertEquals(self.calendar_feed.entry[0].author[0].name.text,
'GData Ops Demo')
self.assertEquals(self.calendar_feed.entry[0].author[0].email.text,
'[email protected]')
def testId(self):
"""Tests the existence of a <atom:id> in the feed and entries
and verifies the value"""
# Assert the feed id exists and is an atom.Id
self.assert_(isinstance(self.calendar_feed.id, atom.Id),
"Calendar feed <atom:id> element must be an instance of atom.Id: %s" % (
self.calendar_feed.id))
# Assert the feed id value is as expected
self.assertEquals(self.calendar_feed.id.text,
'http://www.google.com/calendar/feeds/default')
# Assert that each entry has an id which is an atom.Id
for an_entry in self.calendar_feed.entry:
self.assert_(isinstance(an_entry.id, atom.Id),
"Calendar entry <atom:id> element must be an instance of " +
"atom.Id: %s" % an_entry.id)
# Assert one of the values for an id
self.assertEquals(self.calendar_feed.entry[1].id.text,
'http://www.google.com/calendar/feeds/default/' +
'jnh21ovnjgfph21h32gvms2758%40group.calendar.google.com')
def testPublished(self):
"""Tests the existence of a <atom:published> in the entries
and verifies the value"""
# Assert that each entry has a published value which is an atom.Published
for an_entry in self.calendar_feed.entry:
self.assert_(isinstance(an_entry.published, atom.Published),
"Calendar entry <atom:published> element must be an instance of " +
"atom.Published: %s" % an_entry.published)
# Assert one of the values for published is as expected
self.assertEquals(self.calendar_feed.entry[1].published.text,
'2007-03-20T22:48:57.837Z')
def testUpdated(self):
"""Tests the existence of a <atom:updated> in the feed and the entries
and verifies the value"""
# Assert that the feed updated element exists and is an atom.Updated
self.assert_(isinstance(self.calendar_feed.updated, atom.Updated),
"Calendar feed <atom:updated> element must be an instance of " +
"atom.Updated: %s" % self.calendar_feed.updated)
# Assert that each entry has a updated value which is an atom.Updated
for an_entry in self.calendar_feed.entry:
self.assert_(isinstance(an_entry.updated, atom.Updated),
"Calendar entry <atom:updated> element must be an instance of" +
"atom.Updated: %s" % an_entry.updated)
# Assert the feed updated value is as expected
self.assertEquals(self.calendar_feed.updated.text,
'2007-03-20T22:48:57.833Z')
# Assert one of the values for updated
self.assertEquals(self.calendar_feed.entry[0].updated.text,
'2007-03-20T22:48:52.000Z')
def testTitle(self):
"""Tests the existence of a <atom:title> in the feed and the entries and
verifies the value"""
# Assert that the feed title element exists and is an atom.Title
self.assert_(isinstance(self.calendar_feed.title, atom.Title),
"Calendar feed <atom:title> element must be an instance of " +
"atom.Title: %s" % self.calendar_feed.title)
# Assert that each entry has a title value which is an atom.Title
for an_entry in self.calendar_feed.entry:
self.assert_(isinstance(an_entry.title, atom.Title),
"Calendar entry <atom:title> element must be an instance of " +
"atom.Title: %s" % an_entry.title)
# Assert the feed title value is as expected
self.assertEquals(self.calendar_feed.title.text,
'GData Ops Demo\'s Calendar List')
# Assert one of the values for title
self.assertEquals(self.calendar_feed.entry[0].title.text, 'GData Ops Demo')
def testColor(self):
"""Tests the existence of a <gCal:color> and verifies the value"""
# Assert the color is present and is a gdata.calendar.Color
for an_entry in self.calendar_feed.entry:
self.assert_(isinstance(an_entry.color, gdata.calendar.Color),
"Calendar feed <gCal:color> element must be an instance of " +
"gdata.calendar.Color: %s" % an_entry.color)
# Assert the color value is as expected
self.assertEquals(self.calendar_feed.entry[0].color.value, '#2952A3')
def testAccessLevel(self):
"""Tests the existence of a <gCal:accesslevel> element and verifies the
value"""
# Assert the access_level is present and is a gdata.calendar.AccessLevel
for an_entry in self.calendar_feed.entry:
self.assert_(isinstance(an_entry.access_level, gdata.calendar.AccessLevel),
"Calendar feed <gCal:accesslevel> element must be an instance of " +
"gdata.calendar.AccessLevel: %s" % an_entry.access_level)
# Assert the access_level value is as expected
self.assertEquals(self.calendar_feed.entry[0].access_level.value, 'owner')
def testTimezone(self):
"""Tests the existence of a <gCal:timezone> element and verifies the
value"""
# Assert the timezone is present and is a gdata.calendar.Timezone
for an_entry in self.calendar_feed.entry:
self.assert_(isinstance(an_entry.timezone, gdata.calendar.Timezone),
"Calendar feed <gCal:timezone> element must be an instance of " +
"gdata.calendar.Timezone: %s" % an_entry.timezone)
# Assert the timezone value is as expected
self.assertEquals(self.calendar_feed.entry[0].timezone.value,
'America/Los_Angeles')
def testHidden(self):
"""Tests the existence of a <gCal:hidden> element and verifies the
value"""
# Assert the hidden is present and is a gdata.calendar.Hidden
for an_entry in self.calendar_feed.entry:
self.assert_(isinstance(an_entry.hidden, gdata.calendar.Hidden),
"Calendar feed <gCal:hidden> element must be an instance of " +
"gdata.calendar.Hidden: %s" % an_entry.hidden)
# Assert the hidden value is as expected
self.assertEquals(self.calendar_feed.entry[0].hidden.value, 'false')
def testOpenSearch(self):
"""Tests the existence of <openSearch:startIndex>"""
# Assert that the elements exist and are the appropriate type
self.assert_(isinstance(self.calendar_feed.start_index, gdata.StartIndex),
"Calendar feed <openSearch:startIndex> element must be an " +
"instance of gdata.StartIndex: %s" % self.calendar_feed.start_index)
# Assert the values for each openSearch element are as expected
self.assertEquals(self.calendar_feed.start_index.text, '1')
def testGenerator(self):
"""Tests the existence of <atom:generator> and verifies the value"""
# Assert that the element exists and is of the appropriate type
self.assert_(isinstance(self.calendar_feed.generator, atom.Generator),
"Calendar feed <atom:generator> element must be an instance of " +
"atom.Generator: %s" % self.calendar_feed.generator)
# Assert the generator version, uri and text are as expected
self.assertEquals(self.calendar_feed.generator.text, 'Google Calendar')
self.assertEquals(self.calendar_feed.generator.version, '1.0')
self.assertEquals(self.calendar_feed.generator.uri,
'http://www.google.com/calendar')
def testEntryLink(self):
"""Makes sure entry links in the private composite feed are parsed."""
entry = gdata.calendar.CalendarEventEntryFromString(
test_data.RECURRENCE_EXCEPTION_ENTRY)
self.assert_(isinstance(entry.recurrence_exception, list))
self.assert_(isinstance(entry.recurrence_exception[0].entry_link,
gdata.EntryLink))
self.assert_(isinstance(entry.recurrence_exception[0].entry_link.entry,
gdata.calendar.CalendarEventEntry))
self.assertEquals(
entry.recurrence_exception[0].entry_link.entry.author[0].name.text,
'gdata ops')
def testSequence(self):
entry = gdata.calendar.CalendarEventEntry(
sequence=gdata.calendar.Sequence(value='1'))
entry2 = gdata.calendar.CalendarEventEntryFromString(str(entry))
self.assertEqual(entry.sequence.value, entry2.sequence.value)
entry = gdata.calendar.CalendarEventEntryFromString(
'<entry xmlns="%s"><sequence xmlns="%s" value="7" /></entry>' % (
atom.ATOM_NAMESPACE, gdata.calendar.GCAL_NAMESPACE))
self.assertEqual(entry.sequence.value, '7')
def testOriginalEntry(self):
"""Make sure original entry in the private composite feed are parsed."""
entry = gdata.calendar.CalendarEventEntryFromString(
test_data.RECURRENCE_EXCEPTION_ENTRY)
self.assertEquals(
entry.recurrence_exception[0].entry_link.entry.original_event.id,
'i7lgfj69mjqjgnodklif3vbm7g')
class CalendarFeedTestRegenerated(CalendarFeedTest):
def setUp(self):
old_calendar_feed = (
gdata.calendar.CalendarListFeedFromString(test_data.CALENDAR_FEED))
self.calendar_feed = (
gdata.calendar.CalendarListFeedFromString(str(old_calendar_feed)))
tree = ElementTree.fromstring(str(old_calendar_feed))
class CalendarEventFeedTest(unittest.TestCase):
def setUp(self):
self.calendar_event_feed = (
gdata.calendar.CalendarEventFeedFromString(
test_data.CALENDAR_FULL_EVENT_FEED))
def testEntryCount(self):
# Assert the number of items in the feed of events
self.assertEquals(len(self.calendar_event_feed.entry),11)
def testToAndFromString(self):
# Assert the appropriate type for each entry
for an_entry in self.calendar_event_feed.entry:
self.assert_(isinstance(an_entry, gdata.calendar.CalendarEventEntry),
"Entry must be an instance of a CalendarEventEntry")
# Regenerate feed from xml text
new_calendar_event_feed = gdata.calendar.CalendarEventFeedFromString(
str(self.calendar_event_feed))
for an_entry in new_calendar_event_feed.entry:
self.assert_(isinstance(an_entry, gdata.calendar.CalendarEventEntry),
"Entry in regenerated feed must be an instance of CalendarEventEntry")
def testAuthor(self):
"""Tests the existence of a <atom:author> and verifies the name and email"""
# Assert that each element in the feed author list is an atom.Author
for an_author in self.calendar_event_feed.author:
self.assert_(isinstance(an_author, atom.Author),
"Calendar event feed <atom:author> element must be an instance of " +
"atom.Author: %s" % an_author)
# Assert the feed author name is as expected
self.assertEquals(self.calendar_event_feed.author[0].name.text,
'GData Ops Demo')
# Assert the feed author name is as expected
self.assertEquals(self.calendar_event_feed.author[0].email.text,
'[email protected]')
# Assert one of the values for an entry author
self.assertEquals(self.calendar_event_feed.entry[0].author[0].name.text,
'GData Ops Demo')
self.assertEquals(self.calendar_event_feed.entry[0].author[0].email.text,
'[email protected]')
def testId(self):
"""Tests the existence of a <atom:id> in the feed and entries and
verifies the value"""
# Assert the feed id exists and is an atom.Id
self.assert_(isinstance(self.calendar_event_feed.id, atom.Id),
"Calendar event feed <atom:id> element must be an instance of " +
"atom.Id: %s" % self.calendar_event_feed.id)
# Assert the feed id value is as expected
self.assertEquals(self.calendar_event_feed.id.text,
'http://www.google.com/calendar/feeds/default/private/full')
# Assert that each entry has an id which is an atom.Id
for an_entry in self.calendar_event_feed.entry:
self.assert_(isinstance(an_entry.id, atom.Id),
"Calendar event entry <atom:id> element must be an " +
"instance of atom.Id: %s" % an_entry.id)
# Assert one of the values for an id
self.assertEquals(self.calendar_event_feed.entry[1].id.text,
'http://www.google.com/calendar/feeds/default/private/full/' +
'2qt3ao5hbaq7m9igr5ak9esjo0')
def testPublished(self):
"""Tests the existence of a <atom:published> in the entries and
verifies the value"""
# Assert that each entry has a published value which is an atom.Published
for an_entry in self.calendar_event_feed.entry:
self.assert_(isinstance(an_entry.published, atom.Published),
"Calendar event entry <atom:published> element must be an instance " +
"of atom.Published: %s" % an_entry.published)
# Assert one of the values for published is as expected
self.assertEquals(self.calendar_event_feed.entry[1].published.text,
'2007-03-20T21:26:04.000Z')
def testUpdated(self):
"""Tests the existence of a <atom:updated> in the feed and the entries and
verifies the value"""
# Assert that the feed updated element exists and is an atom.Updated
self.assert_(isinstance(self.calendar_event_feed.updated, atom.Updated),
"Calendar feed <atom:updated> element must be an instance of " +
"atom.Updated: %s" % self.calendar_event_feed.updated)
# Assert that each entry has a updated value which is an atom.Updated
for an_entry in self.calendar_event_feed.entry:
self.assert_(isinstance(an_entry.updated, atom.Updated),
"Calendar event entry <atom:updated> element must be an instance " +
"of atom.Updated: %s" % an_entry.updated)
# Assert the feed updated value is as expected
self.assertEquals(self.calendar_event_feed.updated.text,
'2007-03-20T21:29:57.000Z')
# Assert one of the values for updated
self.assertEquals(self.calendar_event_feed.entry[3].updated.text,
'2007-03-20T21:25:46.000Z')
def testTitle(self):
"""Tests the existence of a <atom:title> in the feed and the entries
and verifies the value"""
# Assert that the feed title element exists and is an atom.Title
self.assert_(isinstance(self.calendar_event_feed.title, atom.Title),
"Calendar feed <atom:title> element must be an instance of " +
"atom.Title: %s" % self.calendar_event_feed.title)
# Assert that each entry has a title value which is an atom.Title
for an_entry in self.calendar_event_feed.entry:
self.assert_(isinstance(an_entry.title, atom.Title),
"Calendar event entry <atom:title> element must be an instance of " +
"atom.Title: %s" % an_entry.title)
# Assert the feed title value is as expected
self.assertEquals(self.calendar_event_feed.title.text, 'GData Ops Demo')
# Assert one of the values for title
self.assertEquals(self.calendar_event_feed.entry[0].title.text,
'test deleted')
def testPostLink(self):
"""Tests the existence of a <atom:link> with a rel='...#post'
and verifies the value"""
# Assert that each link in the feed is an atom.Link
for a_link in self.calendar_event_feed.link:
self.assert_(isinstance(a_link, atom.Link),
"Calendar event entry <atom:link> element must be an instance of " +
"atom.Link: %s" % a_link)
# Assert post link exists
self.assert_(self.calendar_event_feed.GetPostLink() is not None)
# Assert the post link value is as expected
self.assertEquals(self.calendar_event_feed.GetPostLink().href,
'http://www.google.com/calendar/feeds/default/private/full')
def testEditLink(self):
"""Tests the existence of a <atom:link> with a rel='edit' in each entry
and verifies the value"""
# Assert that each link in the feed is an atom.Link
for a_link in self.calendar_event_feed.link:
self.assert_(isinstance(a_link, atom.Link),
"Calendar event entry <atom:link> element must be an instance of " +
"atom.Link: %s" % a_link)
# Assert edit link exists
for a_entry in self.calendar_event_feed.entry:
self.assert_(a_entry.GetEditLink() is not None)
# Assert the edit link value is as expected
self.assertEquals(self.calendar_event_feed.entry[0].GetEditLink().href,
'http://www.google.com/calendar/feeds/default/private/full/o99flmgm' +
'kfkfrr8u745ghr3100/63310109397')
self.assertEquals(self.calendar_event_feed.entry[0].GetEditLink().type,
'application/atom+xml')
def testOpenSearch(self):
"""Tests the existence of <openSearch:totalResults>,
<openSearch:startIndex>, <openSearch:itemsPerPage>"""
# Assert that the elements exist and are the appropriate type
self.assert_(isinstance(self.calendar_event_feed.total_results,
gdata.TotalResults),
"Calendar event feed <openSearch:totalResults> element must be an " +
"instance of gdata.TotalResults: %s" % (
self.calendar_event_feed.total_results))
self.assert_(
isinstance(self.calendar_event_feed.start_index, gdata.StartIndex),
"Calendar event feed <openSearch:startIndex> element must be an " +
"instance of gdata.StartIndex: %s" % (
self.calendar_event_feed.start_index))
self.assert_(
isinstance(self.calendar_event_feed.items_per_page, gdata.ItemsPerPage),
"Calendar event feed <openSearch:itemsPerPage> element must be an " +
"instance of gdata.ItemsPerPage: %s" % (
self.calendar_event_feed.items_per_page))
# Assert the values for each openSearch element are as expected
self.assertEquals(self.calendar_event_feed.total_results.text, '10')
self.assertEquals(self.calendar_event_feed.start_index.text, '1')
self.assertEquals(self.calendar_event_feed.items_per_page.text, '25')
def testGenerator(self):
"""Tests the existence of <atom:generator> and verifies the value"""
# Assert that the element exists and is of the appropriate type
self.assert_(isinstance(self.calendar_event_feed.generator, atom.Generator),
"Calendar event feed <atom:generator> element must be an instance " +
"of atom.Generator: %s" % self.calendar_event_feed.generator)
# Assert the generator version, uri and text are as expected
self.assertEquals(self.calendar_event_feed.generator.text,
'Google Calendar')
self.assertEquals(self.calendar_event_feed.generator.version, '1.0')
self.assertEquals(self.calendar_event_feed.generator.uri,
'http://www.google.com/calendar')
def testCategory(self):
"""Tests the existence of <atom:category> and verifies the value"""
# Assert that the element exists and is of the appropriate type and value
for a_category in self.calendar_event_feed.category:
self.assert_(isinstance(a_category, atom.Category),
"Calendar event feed <atom:category> element must be an instance " +
"of atom.Category: %s" % a_category)
self.assertEquals(a_category.scheme,
'http://schemas.google.com/g/2005#kind')
self.assertEquals(a_category.term,
'http://schemas.google.com/g/2005#event')
for an_event in self.calendar_event_feed.entry:
for a_category in an_event.category:
self.assert_(isinstance(a_category, atom.Category),
"Calendar event feed entry <atom:category> element must be an " +
"instance of atom.Category: %s" % a_category)
self.assertEquals(a_category.scheme,
'http://schemas.google.com/g/2005#kind')
self.assertEquals(a_category.term,
'http://schemas.google.com/g/2005#event')
def testSendEventNotifications(self):
"""Test the existence of <gCal:sendEventNotifications>
and verifies the value"""
# Assert that the element exists and is of the appropriate type and value
for an_event in self.calendar_event_feed.entry:
self.assert_(isinstance(an_event.send_event_notifications,
gdata.calendar.SendEventNotifications),
("Calendar event feed entry <gCal:sendEventNotifications> element " +
"must be an instance of gdata.calendar.SendEventNotifications: %s") % (
an_event.send_event_notifications,))
# Assert the <gCal:sendEventNotifications> are as expected
self.assertEquals(
self.calendar_event_feed.entry[0].send_event_notifications.value,
'false')
self.assertEquals(
self.calendar_event_feed.entry[2].send_event_notifications.value,
'true')
def testQuickAdd(self):
"""Test the existence of <gCal:quickadd>
and verifies the value"""
entry = gdata.calendar.CalendarEventEntry()
entry.quick_add = gdata.calendar.QuickAdd(value='true')
unmarshalled_entry = entry.ToString()
tag = '{%s}quickadd' % (gdata.calendar.GCAL_NAMESPACE)
marshalled_entry = ElementTree.fromstring(unmarshalled_entry).find(tag)
self.assert_(marshalled_entry.attrib['value'],'true')
self.assert_(marshalled_entry.tag,tag)
def testEventStatus(self):
"""Test the existence of <gd:eventStatus>
and verifies the value"""
# Assert that the element exists and is of the appropriate type and value
for an_event in self.calendar_event_feed.entry:
self.assert_(isinstance(an_event.event_status,
gdata.calendar.EventStatus),
("Calendar event feed entry <gd:eventStatus> element " +
"must be an instance of gdata.calendar.EventStatus: %s") % (
an_event.event_status,))
# Assert the <gd:eventStatus> are as expected
self.assertEquals(
self.calendar_event_feed.entry[0].event_status.value,
'CANCELED')
self.assertEquals(
self.calendar_event_feed.entry[1].event_status.value,
'CONFIRMED')
def testComments(self):
"""Tests the existence of <atom:comments> and verifies the value"""
# Assert that the element exists and is of the appropriate type and value
for an_event in self.calendar_event_feed.entry:
self.assert_(an_event.comments is None or isinstance(an_event.comments,
gdata.calendar.Comments),
("Calendar event feed entry <gd:comments> element " +
"must be an instance of gdata.calendar.Comments: %s") % (
an_event.comments,))
def testVisibility(self):
"""Test the existence of <gd:visibility> and verifies the value"""
# Assert that the element exists and is of the appropriate type and value
for an_event in self.calendar_event_feed.entry:
self.assert_(isinstance(an_event.visibility,
gdata.calendar.Visibility),
("Calendar event feed entry <gd:visibility> element " +
"must be an instance of gdata.calendar.Visibility: %s") % (
an_event.visibility,))
# Assert the <gd:visibility> are as expected
self.assertEquals(
self.calendar_event_feed.entry[0].visibility.value,
'DEFAULT')
self.assertEquals(
self.calendar_event_feed.entry[1].visibility.value,
'PRIVATE')
self.assertEquals(
self.calendar_event_feed.entry[2].visibility.value,
'PUBLIC')
def testTransparency(self):
"""Test the existence of <gd:transparency> and verifies the value"""
# Assert that the element exists and is of the appropriate type and value
for an_event in self.calendar_event_feed.entry:
self.assert_(isinstance(an_event.transparency,
gdata.calendar.Transparency),
("Calendar event feed entry <gd:transparency> element " +
"must be an instance of gdata.calendar.Transparency: %s") % (
an_event.transparency,))
# Assert the <gd:transparency> are as expected
self.assertEquals(
self.calendar_event_feed.entry[0].transparency.value,
'OPAQUE')
self.assertEquals(
self.calendar_event_feed.entry[1].transparency.value,
'OPAQUE')
self.assertEquals(
self.calendar_event_feed.entry[2].transparency.value,
'OPAQUE')
# TODO: TEST VALUES OF VISIBILITY OTHER THAN OPAQUE
def testWhere(self):
"""Tests the existence of a <gd:where> in the entries
and verifies the value"""
# Assert that each entry has a where value which is an gdata.calendar.Where
for an_entry in self.calendar_event_feed.entry:
for a_where in an_entry.where:
self.assert_(isinstance(a_where, gdata.calendar.Where),
"Calendar event entry <gd:where> element must be an instance of " +
"gdata.calendar.Where: %s" % a_where)
# Assert one of the values for where is as expected
self.assertEquals(self.calendar_event_feed.entry[1].where[0].value_string,
'Dolores Park with Kim')
def testWhenAndReminder(self):
"""Tests the existence of a <gd:when> and <gd:reminder> in the entries
and verifies the values"""
# Assert that each entry's when value is a gdata.calendar.When
# Assert that each reminder is a gdata.calendar.Reminder
for an_entry in self.calendar_event_feed.entry:
for a_when in an_entry.when:
self.assert_(isinstance(a_when, gdata.calendar.When),
"Calendar event entry <gd:when> element must be an instance " +
"of gdata.calendar.When: %s" % a_when)
for a_reminder in a_when.reminder:
self.assert_(isinstance(a_reminder, gdata.calendar.Reminder),
"Calendar event entry <gd:reminder> element must be an " +
"instance of gdata.calendar.Reminder: %s" % a_reminder)
# Assert one of the values for when is as expected
self.assertEquals(self.calendar_event_feed.entry[0].when[0].start_time,
'2007-03-23T12:00:00.000-07:00')
self.assertEquals(self.calendar_event_feed.entry[0].when[0].end_time,
'2007-03-23T13:00:00.000-07:00')
# Assert the reminder child of when is as expected
self.assertEquals(
self.calendar_event_feed.entry[0].when[0].reminder[0].minutes, '10')
self.assertEquals(
self.calendar_event_feed.entry[1].when[0].reminder[0].minutes, '20')
def testBatchRequestParsing(self):
batch_request = gdata.calendar.CalendarEventFeedFromString(
test_data.CALENDAR_BATCH_REQUEST)
self.assertEquals(len(batch_request.entry), 4)
# Iterate over the batch request entries and match the operation with
# the batch id. These values are hard coded to match the test data.
for entry in batch_request.entry:
if entry.batch_id.text == '1':
self.assertEquals(entry.batch_operation.type, 'insert')
if entry.batch_id.text == '2':
self.assertEquals(entry.batch_operation.type, 'query')
if entry.batch_id.text == '3':
self.assertEquals(entry.batch_operation.type, 'update')
self.assertEquals(entry.title.text, 'Event updated via batch')
if entry.batch_id.text == '4':
self.assertEquals(entry.batch_operation.type, 'delete')
self.assertEquals(entry.id.text,
'http://www.google.com/calendar/feeds/default/'
'private/full/d8qbg9egk1n6lhsgq1sjbqffqc')
self.assertEquals(entry.GetEditLink().href,
'http://www.google.com/calendar/feeds/default/'
'private/full/d8qbg9egk1n6lhsgq1sjbqffqc/'
'63326018324')
def testBatchResponseParsing(self):
batch_response = gdata.calendar.CalendarEventFeedFromString(
test_data.CALENDAR_BATCH_RESPONSE)
self.assertEquals(len(batch_response.entry), 4)
for entry in batch_response.entry:
if entry.batch_id.text == '1':
self.assertEquals(entry.batch_operation.type, 'insert')
self.assertEquals(entry.batch_status.code, '201')
self.assertEquals(entry.batch_status.reason, 'Created')
self.assertEquals(entry.id.text, 'http://www.google.com/calendar/'
'feeds/default/private/full/'
'n9ug78gd9tv53ppn4hdjvk68ek')
if entry.batch_id.text == '2':
self.assertEquals(entry.batch_operation.type, 'query')
if entry.batch_id.text == '3':
self.assertEquals(entry.batch_operation.type, 'update')
if entry.batch_id.text == '4':
self.assertEquals(entry.batch_operation.type, 'delete')
self.assertEquals(entry.id.text, 'http://www.google.com/calendar/'
'feeds/default/private/full/'
'd8qbg9egk1n6lhsgq1sjbqffqc')
# TODO add reminder tests for absolute_time and hours/seconds (if possible)
# TODO test recurrence and recurrenceexception
# TODO test originalEvent
class CalendarWebContentTest(unittest.TestCase):
def setUp(self):
self.calendar_event_feed = (
gdata.calendar.CalendarEventFeedFromString(
test_data.CALENDAR_FULL_EVENT_FEED))
def testAddSimpleWebContentEventEntry(self):
"""Verifies that we can add a web content link to an event entry."""
title = "Al Einstein's Birthday!"
href = 'http://gdata.ops.demo.googlepages.com/birthdayicon.gif'
type = 'image/jpeg'
url = 'http://gdata.ops.demo.googlepages.com/einstein.jpg'
width = '300'
height = '225'
# Create a web content event
event = gdata.calendar.CalendarEventEntry()
web_content = gdata.calendar.WebContent(url=url, width=width, height=height)
web_content_link = gdata.calendar.WebContentLink(title=title,
href=href, link_type=type, web_content=web_content)
event.link.append(web_content_link)
# Verify the web content link exists and contains the expected data
web_content_link = event.GetWebContentLink()
self.assertValidWebContentLink(title, href, type, web_content_link)
# Verify the web content element exists and contains the expected data
web_content_element = web_content_link.web_content
self.assertValidSimpleWebContent(url, width, height, web_content_element)
def testAddWebContentGadgetEventEntry(self):
"""Verifies that we can add a web content gadget link to an event entry."""
title = "Date and Time Gadget"
href = 'http://gdata.ops.demo.googlepages.com/birthdayicon.gif'
url = 'http://google.com/ig/modules/datetime.xml'
type = 'application/x-google-gadgets+xml'
width = '300'
height = '200'
pref_name = 'color'
pref_value = 'green'
# Create a web content event
event = gdata.calendar.CalendarEventEntry()
web_content = gdata.calendar.WebContent(url=url, width=width, height=height)
web_content.gadget_pref.append(
gdata.calendar.WebContentGadgetPref(name=pref_name, value=pref_value))
web_content_link = gdata.calendar.WebContentLink(title=title,
href=href, web_content=web_content, link_type=type)
event.link.append(web_content_link)
# Verify the web content link exists and contains the expected data
web_content_link = event.GetWebContentLink()
self.assertValidWebContentLink(title, href, type, web_content_link)
# Verify the web content element exists and contains the expected data
web_content_element = web_content_link.web_content
self.assertValidWebContentGadget(url, width, height,
pref_name, pref_value, web_content_element)
def testFromXmlToSimpleWebContent(self):
"""Verifies that we can read a web content link from an event entry."""
# Expected values (from test_data.py file)
title = 'World Cup'
href = 'http://www.google.com/calendar/images/google-holiday.gif'
type = 'image/gif'
url = 'http://www.google.com/logos/worldcup06.gif'
width = '276'
height = '120'
# Note: The tenth event entry contains web content
web_content_event = self.calendar_event_feed.entry[9]
# Verify the web content link exists and contains the expected data
web_content_link = web_content_event.GetWebContentLink()
self.assertValidWebContentLink(title, href, type, web_content_link)
# Verify the web content element exists and contains the expected data
web_content_element = web_content_link.web_content
self.assertValidSimpleWebContent(url, width, height, web_content_element)
def testFromXmlToWebContentGadget(self):
"""Verifies that we can read a web content link from an event entry."""
# Expected values (from test_data.py file)
title = 'Date and Time Gadget'
href = 'http://gdata.ops.demo.googlepages.com/birthdayicon.gif'
url = 'http://google.com/ig/modules/datetime.xml'
type = 'application/x-google-gadgets+xml'
width = '300'
height = '136'
pref_name = 'color'
pref_value = 'green'
# Note: The eleventh event entry contains web content
web_content_event = self.calendar_event_feed.entry[10]
# Verify the web content link exists and contains the expected data
web_content_link = web_content_event.GetWebContentLink()
self.assertValidWebContentLink(title, href, type, web_content_link)
# Verify the web content element exists and contains the expected data
web_content_element = web_content_link.web_content
self.assertValidWebContentGadget(url, width, height, pref_name,
pref_value, web_content_element)
def assertValidWebContentLink(self, expected_title=None, expected_href=None,
expected_type=None, web_content_link=None):
"""Asserts that the web content link is the correct type and contains the
expected values"""
self.assert_(isinstance(web_content_link, gdata.calendar.WebContentLink),
"Web content link element must be an " +
"instance of gdata.calendar.WebContentLink: %s" % web_content_link)
expected_rel = '%s/%s' % (gdata.calendar.GCAL_NAMESPACE, 'webContent')
self.assertEquals(expected_rel, web_content_link.rel)
self.assertEqual(expected_title, web_content_link.title)
self.assertEqual(expected_href, web_content_link.href)
self.assertEqual(expected_type, web_content_link.type)
def assertValidSimpleWebContent(self, expected_url=None, expected_width=None,
expected_height=None, web_content_element=None):
"""Asserts that the web content element is the correct type and contains
the expected values"""
self.assert_(isinstance(web_content_element, gdata.calendar.WebContent),
"Calendar event entry <gCal:webContent> element must be an " +
"instance of gdata.calendar.WebContent: %s" % web_content_element)
self.assertEquals(expected_width, web_content_element.width)
self.assertEquals(expected_height, web_content_element.height)
self.assertEquals(expected_url, web_content_element.url)
def assertValidWebContentGadget(self, expected_url=None, expected_width=None,
expected_height=None, expected_pref_name=None, expected_pref_value=None,
web_content_element=None):
"""Asserts that the web content element is the correct type and contains
the expected values"""
self.assert_(isinstance(web_content_element, gdata.calendar.WebContent),
"Calendar event entry <gCal:webContent> element must be an " +
"instance of gdata.calendar.WebContent: %s" % web_content_element)
self.assertEquals(expected_width, web_content_element.width)
self.assertEquals(expected_height, web_content_element.height)
self.assertEquals(expected_url, web_content_element.url)
self.assertEquals(expected_pref_name,
web_content_element.gadget_pref[0].name)
self.assertEquals(expected_pref_value,
web_content_element.gadget_pref[0].value)
class ExtendedPropertyTest(unittest.TestCase):
def testExtendedPropertyToAndFromXml(self):
ep = gdata.calendar.ExtendedProperty(name='test')
ep.value = 'val'
xml_string = ep.ToString()
ep2 = gdata.ExtendedPropertyFromString(xml_string)
self.assertEquals(ep.name, ep2.name)
self.assertEquals(ep.value, ep2.value)
if __name__ == '__main__':
unittest.main()
|
apache-2.0
|
laszlocsomor/tensorflow
|
tensorflow/python/estimator/canned/optimizers.py
|
73
|
2924
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Methods related to optimizers used in canned_estimators."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import six
from tensorflow.python.training import adagrad
from tensorflow.python.training import adam
from tensorflow.python.training import ftrl
from tensorflow.python.training import gradient_descent
from tensorflow.python.training import optimizer as optimizer_lib
from tensorflow.python.training import rmsprop
_OPTIMIZER_CLS_NAMES = {
'Adagrad': adagrad.AdagradOptimizer,
'Adam': adam.AdamOptimizer,
'Ftrl': ftrl.FtrlOptimizer,
'RMSProp': rmsprop.RMSPropOptimizer,
'SGD': gradient_descent.GradientDescentOptimizer,
}
def get_optimizer_instance(opt, learning_rate=None):
"""Returns an optimizer instance.
Supports the following types for the given `opt`:
* An `Optimizer` instance: Returns the given `opt`.
* A string: Creates an `Optimizer` subclass with the given `learning_rate`.
Supported strings:
* 'Adagrad': Returns an `AdagradOptimizer`.
* 'Adam': Returns an `AdamOptimizer`.
* 'Ftrl': Returns an `FtrlOptimizer`.
* 'RMSProp': Returns an `RMSPropOptimizer`.
* 'SGD': Returns a `GradientDescentOptimizer`.
Args:
opt: An `Optimizer` instance, or string, as discussed above.
learning_rate: A float. Only used if `opt` is a string.
Returns:
An `Optimizer` instance.
Raises:
ValueError: If `opt` is an unsupported string.
ValueError: If `opt` is a supported string but `learning_rate` was not
specified.
ValueError: If `opt` is none of the above types.
"""
if isinstance(opt, six.string_types):
if opt in six.iterkeys(_OPTIMIZER_CLS_NAMES):
if not learning_rate:
raise ValueError('learning_rate must be specified when opt is string.')
return _OPTIMIZER_CLS_NAMES[opt](learning_rate=learning_rate)
raise ValueError(
'Unsupported optimizer name: {}. Supported names are: {}'.format(
opt, tuple(sorted(six.iterkeys(_OPTIMIZER_CLS_NAMES)))))
if not isinstance(opt, optimizer_lib.Optimizer):
raise ValueError(
'The given object is not an Optimizer instance. Given: {}'.format(opt))
return opt
|
apache-2.0
|
mitsuhiko/requests
|
requests/structures.py
|
46
|
1728
|
# -*- coding: utf-8 -*-
"""
requests.structures
~~~~~~~~~~~~~~~~~~~
Data structures that power Requests.
"""
class CaseInsensitiveDict(dict):
"""Case-insensitive Dictionary
For example, ``headers['content-encoding']`` will return the
value of a ``'Content-Encoding'`` response header."""
@property
def lower_keys(self):
if not hasattr(self, '_lower_keys') or not self._lower_keys:
self._lower_keys = dict((k.lower(), k) for k in list(self.keys()))
return self._lower_keys
def _clear_lower_keys(self):
if hasattr(self, '_lower_keys'):
self._lower_keys.clear()
def __setitem__(self, key, value):
dict.__setitem__(self, key, value)
self._clear_lower_keys()
def __delitem__(self, key):
dict.__delitem__(self, key)
self._lower_keys.clear()
def __contains__(self, key):
return key.lower() in self.lower_keys
def __getitem__(self, key):
# We allow fall-through here, so values default to None
if key in self:
return dict.__getitem__(self, self.lower_keys[key.lower()])
def get(self, key, default=None):
if key in self:
return self[key]
else:
return default
class LookupDict(dict):
"""Dictionary lookup object."""
def __init__(self, name=None):
self.name = name
super(LookupDict, self).__init__()
def __repr__(self):
return '<lookup \'%s\'>' % (self.name)
def __getitem__(self, key):
# We allow fall-through here, so values default to None
return self.__dict__.get(key, None)
def get(self, key, default=None):
return self.__dict__.get(key, default)
|
isc
|
kaikai581/NOvA
|
RecoValidation/hadd_reco_validation.py
|
1
|
1974
|
#
# This script is to make easy hadding the grid output of the RecoValidation jobs.
# This script uses the same configuration file as the submit_reco_validation.py script.
#
from ConfigParser import *
import argparse, os
#
# command line argument parsing
#
parser = argparse.ArgumentParser()
parser.add_argument('-c', '--config_file', type=str, help='configuration file name', default='submit_reco_validation.cfg')
args = parser.parse_args()
config_file = args.config_file
#
# read in the configuration file
#
parser = SafeConfigParser()
parser.read(config_file)
sam_datasets = [x.strip() for x in parser.get('sam', 'defnames').split(',')]
nfiles = parser.get('sam', 'number_of_files')
working_dir = parser.get('working folder', 'path')
# grid config variables
jobname = parser.get('grid configuration', 'jobname')
fcl = parser.get('grid configuration', 'c')
dest = parser.get('grid configuration', 'dest')
testrel = parser.get('grid configuration', 'testrel')
tag = parser.get('grid configuration', 'tag')
histTier = parser.get('grid configuration', 'histTier')
maxopt = parser.get('grid configuration', 'maxopt')
group = parser.get('grid configuration', 'G')
njobs = parser.get('grid configuration', 'njobs')
files_per_job = parser.get('grid configuration', 'files_per_job')
opportunistic = parser.get('grid configuration', 'opportunistic')
copyOut = parser.get('grid configuration', 'copyOut')
print_jobsub = parser.get('grid configuration', 'print_jobsub')
expected_lifetime = parser.get('grid configuration', 'expected_lifetime')
#
# Copy files to the working directory and hadd them.
#
for defname in sam_datasets:
grid_copy_dir = os.path.join(working_dir, defname, 'hadded_output', 'grid_copy')
os.system('mkdir -p ' + grid_copy_dir)
os.system('cp ' + os.path.join(dest, '*.root') + ' ' + grid_copy_dir)
os.system('. ~/.bashrc;setup_nova;hadd -T -f -k ' + os.path.join(grid_copy_dir,'../hadded_output.root') + ' ' + os.path.join(grid_copy_dir,'*.root'))
|
gpl-2.0
|
txemi/ansible
|
contrib/inventory/serf.py
|
395
|
3032
|
#!/usr/bin/env python
# (c) 2015, Marc Abramowitz <[email protected]>
#
# This file is part of Ansible.
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Dynamic inventory script which lets you use nodes discovered by Serf
# (https://serfdom.io/).
#
# Requires the `serfclient` Python module from
# https://pypi.python.org/pypi/serfclient
#
# Environment variables
# ---------------------
# - `SERF_RPC_ADDR`
# - `SERF_RPC_AUTH`
#
# These variables are described at https://www.serfdom.io/docs/commands/members.html#_rpc_addr
import argparse
import collections
import os
import sys
# https://pypi.python.org/pypi/serfclient
from serfclient import SerfClient, EnvironmentConfig
try:
import json
except ImportError:
import simplejson as json
_key = 'serf'
def _serf_client():
env = EnvironmentConfig()
return SerfClient(host=env.host, port=env.port, rpc_auth=env.auth_key)
def get_serf_members_data():
return _serf_client().members().body['Members']
def get_nodes(data):
return [node['Name'] for node in data]
def get_groups(data):
groups = collections.defaultdict(list)
for node in data:
for key, value in node['Tags'].items():
groups[value].append(node['Name'])
return groups
def get_meta(data):
meta = {'hostvars': {}}
for node in data:
meta['hostvars'][node['Name']] = node['Tags']
return meta
def print_list():
data = get_serf_members_data()
nodes = get_nodes(data)
groups = get_groups(data)
meta = get_meta(data)
inventory_data = {_key: nodes, '_meta': meta}
inventory_data.update(groups)
print(json.dumps(inventory_data))
def print_host(host):
data = get_serf_members_data()
meta = get_meta(data)
print(json.dumps(meta['hostvars'][host]))
def get_args(args_list):
parser = argparse.ArgumentParser(
description='ansible inventory script reading from serf cluster')
mutex_group = parser.add_mutually_exclusive_group(required=True)
help_list = 'list all hosts from serf cluster'
mutex_group.add_argument('--list', action='store_true', help=help_list)
help_host = 'display variables for a host'
mutex_group.add_argument('--host', help=help_host)
return parser.parse_args(args_list)
def main(args_list):
args = get_args(args_list)
if args.list:
print_list()
if args.host:
print_host(args.host)
if __name__ == '__main__':
main(sys.argv[1:])
|
gpl-3.0
|
mikalstill/nova
|
nova/tests/unit/scheduler/weights/test_weights_ioopsweight.py
|
9
|
2834
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Tests For Scheduler IoOpsWeigher weights
"""
from nova.scheduler import weights
from nova.scheduler.weights import io_ops
from nova import test
from nova.tests.unit.scheduler import fakes
class IoOpsWeigherTestCase(test.NoDBTestCase):
def setUp(self):
super(IoOpsWeigherTestCase, self).setUp()
self.weight_handler = weights.HostWeightHandler()
self.weighers = [io_ops.IoOpsWeigher()]
def _get_weighed_host(self, hosts, io_ops_weight_multiplier):
if io_ops_weight_multiplier is not None:
self.flags(io_ops_weight_multiplier=io_ops_weight_multiplier,
group='filter_scheduler')
return self.weight_handler.get_weighed_objects(self.weighers,
hosts, {})[0]
def _get_all_hosts(self):
host_values = [
('host1', 'node1', {'num_io_ops': 1}),
('host2', 'node2', {'num_io_ops': 2}),
('host3', 'node3', {'num_io_ops': 0}),
('host4', 'node4', {'num_io_ops': 4})
]
return [fakes.FakeHostState(host, node, values)
for host, node, values in host_values]
def _do_test(self, io_ops_weight_multiplier, expected_weight,
expected_host):
hostinfo_list = self._get_all_hosts()
weighed_host = self._get_weighed_host(hostinfo_list,
io_ops_weight_multiplier)
self.assertEqual(weighed_host.weight, expected_weight)
if expected_host:
self.assertEqual(weighed_host.obj.host, expected_host)
def test_io_ops_weight_multiplier_by_default(self):
self._do_test(io_ops_weight_multiplier=None,
expected_weight=0.0,
expected_host='host3')
def test_io_ops_weight_multiplier_zero_value(self):
# We do not know the host, all have same weight.
self._do_test(io_ops_weight_multiplier=0.0,
expected_weight=0.0,
expected_host=None)
def test_io_ops_weight_multiplier_positive_value(self):
self._do_test(io_ops_weight_multiplier=2.0,
expected_weight=2.0,
expected_host='host4')
|
apache-2.0
|
chiffa/Image_pipe
|
examples/xi_support.py
|
1
|
5228
|
import os
from _csv import writer as csv_writer
from collections import defaultdict
import numpy as np
import scipy
from chiffatools.dataviz import better2D_desisty_plot
from matplotlib import pyplot as plt
from scipy.stats import linregress
import imagepipe.raw_functions
import imagepipe.tools.helpers
import imagepipe.wrapped_functions
from imagepipe import core_functions as cf
from imagepipe.core_functions import generator_wrapper
def xi_traverse(main_root, matching_map=None):
"""
Traverses the main_root directory, looking for all the '.tif/.TIF' files, performs name matching
then iterates through the resulting matched dictironary.
Matching assumption is that except for the matching keys, the names are identical
:param main_root: folder from which will be traversed in depth
:param matching_rule: name modification to type mapping. Currently '' for no matching, 'color' for colors
:param matching_map: {'pattern in the file name': color channel number}
:return:
"""
matched_images = defaultdict(lambda: defaultdict(lambda: defaultdict(dict)))
tags_dict = defaultdict(lambda: [])
assert(matching_map is not None)
for current_location, sub_directories, files in os.walk(main_root):
for img in files:
if ('.TIF' in img or '.tif' in img) and '_thumb_' not in img:
prefix = imagepipe.raw_functions.split_and_trim(current_location, main_root)
pre_name = '-'.join(img.split('-')[1:])[:-4]
# print pre_name[-10:]
_time, _z = pre_name[-9:].split('_')
time_stamp = int(_time[1:])
z_position = int(_z[1:])
color = matching_map[img.split('-')[0]]
name_pattern = ' - '.join(prefix + [pre_name[:-10]])
matched_images[name_pattern][time_stamp][color][z_position] = os.path.join(current_location, img)
# print name_pattern
# print time_stamp, color, z_position
for name_pattern, time_dict in matched_images.iteritems():
for time_stamp, color_dict in time_dict.iteritems():
channels = ['', '']
for color, z_position_dict in color_dict.iteritems():
z_collector = []
for z_position, file_name in sorted(z_position_dict.items()):
z_collector.append(
imagepipe.tools.helpers.tiff_stack_2_np_arr(file_name)[0, :, :])
channels[color] = np.array(z_collector)
yield name_pattern, str(time_stamp), channels
@generator_wrapper(in_dims=(None, 2, 2, 2, 2, 2, 3, 3, None), out_dims=(None,))
def xi_pre_render(name_pattern, proj_gfp, qual_gfp, cell_labels, average_gfp_pad, proj_mch,
mch, gfp, timestamp,
save=False, directory_to_save_to='verification', mch_cutoff=0.2, slector_cutoff=0.1):
plt.figure(figsize=(20, 15))
plt.suptitle(name_pattern)
main_ax = plt.subplot(231)
plt.title('GFP')
plt.imshow(proj_gfp, interpolation='nearest')
plt.contour(cell_labels > 0, [0.5], colors='w')
plt.subplot(232, sharex=main_ax, sharey=main_ax)
plt.title('log-GFP')
plt.imshow(np.log(proj_gfp + np.min(proj_gfp[proj_gfp > 0])), cmap='hot', interpolation='nearest')
plt.contour(cell_labels > 0, [0.5], colors='w')
plt.subplot(233, sharex=main_ax, sharey=main_ax)
plt.title('raw segmentation')
plt.imshow(qual_gfp, cmap='gray', interpolation='nearest')
plt.contour(cell_labels > 0, [0.5], colors='w')
ax = plt.subplot(234, sharex=main_ax, sharey=main_ax)
plt.title('labeled segmentation')
plt.imshow(cell_labels, cmap=plt.cm.spectral, interpolation='nearest')
unique = np.unique(cell_labels)
for i in unique:
mask = cell_labels == i
x, y = scipy.ndimage.measurements.center_of_mass(mask)
ax.text(y-8, x+8, '%s' % i, fontsize=10)
plt.subplot(235)
selector = np.logical_and(mch > slector_cutoff, gfp > slector_cutoff)
plt.title('mCh-GFP correlation - %s, qual GFP intensity: %s' %
(np.corrcoef(mch[selector], gfp[selector])[0, 1], np.median(gfp[mch > mch_cutoff])))
slope, intercept, rvalue, pvalue, stderr = linregress(mch[selector], gfp[selector])
better2D_desisty_plot(mch[selector], gfp[selector])
linarray = np.arange(0.1, 0.5, 0.05)
plt.plot(linarray, intercept+slope*linarray, 'r')
plt.xlabel('mCherry')
plt.ylabel('GFP')
plt.subplot(236, sharex=main_ax, sharey=main_ax)
plt.title('mCherry')
plt.imshow(proj_mch, interpolation='nearest')
plt.contour(cell_labels > 0, [0.5], colors='w')
with open('xi_analys_results.csv', 'ab') as output_file:
writer = csv_writer(output_file)
puck = [name_pattern, timestamp,
np.corrcoef(mch[selector], gfp[selector])[0, 1],
np.median(gfp[mch > mch_cutoff]), np.average(gfp[mch > mch_cutoff]),
slope, rvalue, pvalue]
writer.writerow(puck)
if not save:
plt.show()
else:
name_puck = directory_to_save_to+'/'+'xi_pre_render-'+timestamp+'-'+name_pattern+'.png'
plt.savefig(name_puck)
plt.close()
|
bsd-3-clause
|
MrChoclate/tripping_sansa
|
api/views.py
|
1
|
3318
|
from rest_framework_bulk import BulkModelViewSet
from api.serializers import *
from api.models import *
class ProductViewSet(BulkModelViewSet):
queryset = Product.objects.all()
serializer_class = ProductSerializer
class Product_relationshipViewSet(BulkModelViewSet):
queryset = Product_relationship.objects.all()
serializer_class = Product_relationshipSerializer
class Product_versionViewSet(BulkModelViewSet):
queryset = Product_version.objects.all()
serializer_class = Product_versionSerializer
class Product_version_relationshipViewSet(BulkModelViewSet):
queryset = Product_version_relationship.objects.all()
serializer_class = Product_version_relationshipSerializer
class Product_categoryViewSet(BulkModelViewSet):
queryset = Product_category.objects.all()
serializer_class = Product_categorySerializer
class Product_category_assignmentViewSet(BulkModelViewSet):
queryset = Product_category_assignment.objects.all()
serializer_class = Product_category_assignmentSerializer
class Product_category_hierarchyViewSet(BulkModelViewSet):
queryset = Product_category_hierarchy.objects.all()
serializer_class = Product_category_hierarchySerializer
class Product_view_definitionViewSet(BulkModelViewSet):
queryset = Product_view_definition.objects.all()
serializer_class = Product_view_definitionSerializer
class View_definition_contextViewSet(BulkModelViewSet):
queryset = View_definition_context.objects.all()
serializer_class = View_definition_contextSerializer
class Product_property_assignmentViewSet(BulkModelViewSet):
queryset = Product_property_assignment.objects.all()
serializer_class = Product_property_assignmentSerializer
class View_definition_relationshipViewSet(BulkModelViewSet):
queryset = View_definition_relationship.objects.all()
serializer_class = View_definition_relationshipSerializer
class View_definition_usageViewSet(BulkModelViewSet):
queryset = View_definition_usage.objects.all()
serializer_class = View_definition_usageSerializer
class Assembly_component_relationshipViewSet(BulkModelViewSet):
queryset = Assembly_component_relationship.objects.all()
serializer_class = Assembly_component_relationshipSerializer
class Next_assembly_usageViewSet(BulkModelViewSet):
queryset = Next_assembly_usage.objects.all()
serializer_class = Next_assembly_usageSerializer
class Component_upper_level_identificationViewSet(BulkModelViewSet):
queryset = Component_upper_level_identification.objects.all()
serializer_class = Component_upper_level_identificationSerializer
class Promissory_usageViewSet(BulkModelViewSet):
queryset = Promissory_usage.objects.all()
serializer_class = Promissory_usageSerializer
class Value_with_unitViewSet(BulkModelViewSet):
queryset = Value_with_unit.objects.all()
serializer_class = Value_with_unitSerializer
class UnitViewSet(BulkModelViewSet):
queryset = Unit.objects.all()
serializer_class = UnitSerializer
class Measure_valueViewSet(BulkModelViewSet):
queryset = Measure_value.objects.all()
serializer_class = Measure_valueSerializer
class any_number_valueViewSet(BulkModelViewSet):
queryset = any_number_value.objects.all()
serializer_class = any_number_valueSerializer
|
agpl-3.0
|
neuroidss/cloudbrain
|
src/cloudbrain/connectors/muse.py
|
3
|
1199
|
import logging
from pythonosc import osc_server, dispatcher
from cloudbrain.connectors.museio import _start_muse_io
_LOGGER = logging.getLogger(__name__)
_LOGGER.level = logging.DEBUG
_LOGGER.addHandler(logging.StreamHandler())
class MuseConnector(object):
"""
Get OSC messages from the Muse
"""
def __init__(self, ip, port, start_muse_io, callback_functions):
self.port = port
self.ip = ip
self.callback_functions = callback_functions
if start_muse_io:
_start_muse_io(port)
def start(self):
muse_dispatcher = dispatcher.Dispatcher()
for routing_key, callback in self.callback_functions.items():
# routing_key is "user_key:metric_name" here
print(self.callback_functions)
metric_name = routing_key.split(":")[-1]
_LOGGER.info('Mapping %s' % metric_name)
muse_dispatcher.map(metric_name, callback)
_LOGGER.debug('Dispatcher: %s' % muse_dispatcher)
server = osc_server.ThreadingOSCUDPServer(
(self.ip, self.port), muse_dispatcher)
print("Serving on {}".format(server.server_address))
server.serve_forever()
|
agpl-3.0
|
kailIII/geraldo
|
site/newsite/site-geraldo/django/db/backends/postgresql/base.py
|
15
|
5921
|
"""
PostgreSQL database backend for Django.
Requires psycopg 1: http://initd.org/projects/psycopg1
"""
from django.db.backends import *
from django.db.backends.postgresql.client import DatabaseClient
from django.db.backends.postgresql.creation import DatabaseCreation
from django.db.backends.postgresql.introspection import DatabaseIntrospection
from django.db.backends.postgresql.operations import DatabaseOperations
from django.db.backends.postgresql.version import get_version
from django.utils.encoding import smart_str, smart_unicode
try:
import psycopg as Database
except ImportError, e:
from django.core.exceptions import ImproperlyConfigured
raise ImproperlyConfigured("Error loading psycopg module: %s" % e)
DatabaseError = Database.DatabaseError
IntegrityError = Database.IntegrityError
class UnicodeCursorWrapper(object):
"""
A thin wrapper around psycopg cursors that allows them to accept Unicode
strings as params.
This is necessary because psycopg doesn't apply any DB quoting to
parameters that are Unicode strings. If a param is Unicode, this will
convert it to a bytestring using database client's encoding before passing
it to psycopg.
All results retrieved from the database are converted into Unicode strings
before being returned to the caller.
"""
def __init__(self, cursor, charset):
self.cursor = cursor
self.charset = charset
def format_params(self, params):
if isinstance(params, dict):
result = {}
charset = self.charset
for key, value in params.items():
result[smart_str(key, charset)] = smart_str(value, charset)
return result
else:
return tuple([smart_str(p, self.charset, True) for p in params])
def execute(self, sql, params=()):
return self.cursor.execute(smart_str(sql, self.charset), self.format_params(params))
def executemany(self, sql, param_list):
new_param_list = [self.format_params(params) for params in param_list]
return self.cursor.executemany(sql, new_param_list)
def __getattr__(self, attr):
if attr in self.__dict__:
return self.__dict__[attr]
else:
return getattr(self.cursor, attr)
def __iter__(self):
return iter(self.cursor)
class DatabaseFeatures(BaseDatabaseFeatures):
uses_savepoints = True
class DatabaseWrapper(BaseDatabaseWrapper):
operators = {
'exact': '= %s',
'iexact': '= UPPER(%s)',
'contains': 'LIKE %s',
'icontains': 'LIKE UPPER(%s)',
'regex': '~ %s',
'iregex': '~* %s',
'gt': '> %s',
'gte': '>= %s',
'lt': '< %s',
'lte': '<= %s',
'startswith': 'LIKE %s',
'endswith': 'LIKE %s',
'istartswith': 'LIKE UPPER(%s)',
'iendswith': 'LIKE UPPER(%s)',
}
def __init__(self, *args, **kwargs):
super(DatabaseWrapper, self).__init__(*args, **kwargs)
self.features = DatabaseFeatures()
self.ops = DatabaseOperations()
self.client = DatabaseClient()
self.creation = DatabaseCreation(self)
self.introspection = DatabaseIntrospection(self)
self.validation = BaseDatabaseValidation()
def _cursor(self, settings):
set_tz = False
if self.connection is None:
set_tz = True
if settings.DATABASE_NAME == '':
from django.core.exceptions import ImproperlyConfigured
raise ImproperlyConfigured("You need to specify DATABASE_NAME in your Django settings file.")
conn_string = "dbname=%s" % settings.DATABASE_NAME
if settings.DATABASE_USER:
conn_string = "user=%s %s" % (settings.DATABASE_USER, conn_string)
if settings.DATABASE_PASSWORD:
conn_string += " password='%s'" % settings.DATABASE_PASSWORD
if settings.DATABASE_HOST:
conn_string += " host=%s" % settings.DATABASE_HOST
if settings.DATABASE_PORT:
conn_string += " port=%s" % settings.DATABASE_PORT
self.connection = Database.connect(conn_string, **self.options)
self.connection.set_isolation_level(1) # make transactions transparent to all cursors
cursor = self.connection.cursor()
if set_tz:
cursor.execute("SET TIME ZONE %s", [settings.TIME_ZONE])
if not hasattr(self, '_version'):
self.__class__._version = get_version(cursor)
if self._version < (8, 0):
# No savepoint support for earlier version of PostgreSQL.
self.features.uses_savepoints = False
cursor.execute("SET client_encoding to 'UNICODE'")
cursor = UnicodeCursorWrapper(cursor, 'utf-8')
return cursor
def typecast_string(s):
"""
Cast all returned strings to unicode strings.
"""
if not s and not isinstance(s, str):
return s
return smart_unicode(s)
# Register these custom typecasts, because Django expects dates/times to be
# in Python's native (standard-library) datetime/time format, whereas psycopg
# use mx.DateTime by default.
try:
Database.register_type(Database.new_type((1082,), "DATE", util.typecast_date))
except AttributeError:
raise Exception("You appear to be using psycopg version 2. Set your DATABASE_ENGINE to 'postgresql_psycopg2' instead of 'postgresql'.")
Database.register_type(Database.new_type((1083,1266), "TIME", util.typecast_time))
Database.register_type(Database.new_type((1114,1184), "TIMESTAMP", util.typecast_timestamp))
Database.register_type(Database.new_type((16,), "BOOLEAN", util.typecast_boolean))
Database.register_type(Database.new_type((1700,), "NUMERIC", util.typecast_decimal))
Database.register_type(Database.new_type(Database.types[1043].values, 'STRING', typecast_string))
|
lgpl-3.0
|
danielja/mudpy
|
filter/filter.py
|
1
|
2938
|
import meta
import sage
import re
from sage import player, triggers, aliases
from sage.signals import pre_shutdown
from sage.signals.gmcp import skills
import time
import MySQLdb as mysql
import MySQLdb.cursors
class FilterMap(object):
def __init__(self):
with open('mapper/mysql.cfg') as f:
self.login = [x.strip().split(':') for x in f.readlines()][0]
self.sofar = ""
def check(self, line):
if line.strip() == '' or line == '\n' or line == ' ':
return (1, False)
db = mysql.connect(host=self.login[0], user=self.login[1],passwd=self.login[2],
db='achaea',cursorclass=MySQLdb.cursors.DictCursor)
cur=db.cursor()
cur.execute('SELECT `name` from achaea.rooms '
' WHERE %s = concat(`name`,".")', line)
roomres = cur.fetchall()
cur.execute('SELECT rawtxt, mytxt, regex, replace_str, hold, gag from achaea.filter '
' WHERE %s = rawtxt or %s rlike mytxt', (line, line))
allres = cur.fetchall()
cur.close()
db.commit()
db.close()
if len(roomres) > 0:
return(1,False)
if allres is None or len(allres) != 1:
sage.echo("No match found : %s"%len(allres))
if (len(allres) > 1):
for res in allres:
print line
sage.echo(line)
sage.echo(res)
return (len(allres), False)
for res in allres:
if res['replace_str'] == "":
return (1, res['gag']==1)
m = re.match(res['replace_str'], line)
if m is None:
sage.echo("LINE NOT MATCHED!")
return (1, False)
newline = res['replace_str']%m.groups()
self.sofar = self.sofar + newline + " "
if res['hold'] == 1:
self.sofar = self.sofar + " " + newline
else:
sage.echo(self.sofar)
selfsofar = ""
if res['gag'] == 1:
return (1, True)
return (1, False)
def add(self, line):
db = mysql.connect(host=self.login[0], user=self.login[1],passwd=self.login[2],
db='achaea',cursorclass=MySQLdb.cursors.DictCursor)
cur=db.cursor()
cur.execute('INSERT into achaea.filter'
' (rawtxt,count) '
' VALUES '
' (%s, %s) '
' ON DUPLICATE KEY UPDATE rawtxt=rawtxt, count=count+1'
';',(line, 1))
cur.close()
db.commit()
db.close()
filter_triggers = triggers.create_group('filter', app='filter')
filt = FilterMap()
@filter_triggers.regex("^(.*)$",enabled=True)
def all_match(trigger):
(rescount, gag) = filt.check(trigger.groups[0])
if(rescount == 0):
filt.add(trigger.groups[0])
elif gag:
trigger.line.gag()
|
gpl-2.0
|
alessio/devedeng
|
src/devedeng/file_movie.py
|
2
|
31271
|
# Copyright 2014 (C) Raster Software Vigo (Sergio Costas)
#
# This file is part of DeVeDe-NG
#
# DeVeDe-NG is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# DeVeDe-NG is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>
from gi.repository import Gtk,GObject
import os
import devedeng.configuration_data
import devedeng.interface_manager
import devedeng.converter
import devedeng.ask_subtitles
import devedeng.preview
import devedeng.file_copy
import devedeng.subtitles_mux
class file_movie(devedeng.interface_manager.interface_manager):
__gsignals__ = {'title_changed': (GObject.SIGNAL_RUN_FIRST, None,(str,))}
def __init__(self,file_name, list_files = None):
self.list_files = list_files
devedeng.interface_manager.interface_manager.__init__(self)
self.wfile_properties = None
self.builder = None
self.config = devedeng.configuration_data.configuration.get_config()
self.set_type(None, self.config.disc_type)
self.config.connect('disc_type',self.set_type)
if list_files == None:
self.add_text("file_name", file_name)
self.add_text("title_name", os.path.splitext(os.path.basename(file_name))[0])
self.add_label("original_size",None)
self.add_label("original_length",None)
self.add_label("original_videorate",None)
self.add_label("original_audiorate",None)
self.add_label("original_aspect_ratio",None)
self.add_label("original_fps",None)
self.add_toggle("show_in_menu", True)
else:
self.original_aspect_ratio = 1.777 # dummy value
self.add_text("chapter_list_entry", None);
self.add_dualtoggle("format_pal","format_ntsc",self.config.PAL)
self.add_toggle("video_rate_automatic", True)
self.add_toggle("audio_rate_automatic", True)
self.add_toggle("divide_in_chapters", True)
self.add_toggle("force_subtitles", False)
self.add_toggle("mirror_horizontal", False)
self.add_toggle("mirror_vertical", False)
self.add_toggle("two_pass_encoding", False)
self.add_toggle("sound5_1", False)
self.add_toggle("copy_sound", False)
self.add_toggle("is_mpeg_ps", False)
self.add_toggle("no_reencode_audio_video", False)
if (self.disc_type == "divx") or (self.disc_type == "mkv"):
self.add_toggle("gop12", False)
else:
self.add_toggle("gop12", True)
self.add_group("final_size_pal", ["size_auto", "size_1920x1080", "size_1280x720", "size_720x576", "size_704x576", "size_480x576","size_352x576", "size_352x288"], "size_auto")
self.add_group("final_size_ntsc", ["size_auto_ntsc", "size_1920x1080_ntsc", "size_1280x720_ntsc", "size_720x480_ntsc", "size_704x480_ntsc", "size_480x480_ntsc","size_352x480_ntsc", "size_352x240_ntsc"], "size_auto_ntsc")
self.add_group("aspect_ratio", ["aspect_auto", "aspect_classic", "aspect_wide"], "aspect_auto")
self.add_group("scaling", ["add_black_bars", "scale_picture" ,"cut_picture"], "add_black_bars")
self.add_group("rotation",["rotation_0","rotation_90","rotation_180","rotation_270"], "rotation_0")
self.add_group("deinterlace", ["deinterlace_none", "deinterlace_ffmpeg", "deinterlace_yadif"], "deinterlace_none")
self.add_group("actions", ["action_stop","action_play_first","action_play_previous","action_play_again","action_play_next","action_play_last"], "action_stop")
self.add_integer_adjustment("volume", 100)
if (self.disc_type == "dvd"):
self.add_integer_adjustment("video_rate", 5000)
elif (self.disc_type == "vcd"):
self.add_integer_adjustment("video_rate", 1152)
else:
self.add_integer_adjustment("video_rate", 2000)
self.add_integer_adjustment("audio_rate", 224)
self.add_integer_adjustment("subt_font_size", 28)
self.add_float_adjustment("audio_delay", 0.0)
self.add_integer_adjustment("chapter_size", 5)
self.add_colorbutton("subt_fill_color", self.config.subt_fill_color)
self.add_colorbutton("subt_outline_color", self.config.subt_outline_color)
self.add_float_adjustment("subt_thickness", self.config.subt_outline_thickness)
if list_files == None:
self.add_list("subtitles_list")
else:
self.add_list("files_to_set")
for e in list_files:
self.files_to_set.append([e.title_name, e])
self.add_show_hide("format_pal", ["size_pal"], ["size_ntsc"])
self.add_enable_disable("divide_in_chapters", ["chapter_size_spinbutton", "chapter_list_entry"], [])
self.add_enable_disable("video_rate_automatic", [], ["video_spinbutton"])
self.add_enable_disable("audio_rate_automatic", [], ["audio_spinbutton"])
self.add_enable_disable("sound5_1", ["copy_sound"], [])
if (self.disc_type == "dvd"):
self.add_enable_disable("aspect_wide", [], ["size_704x576", "size_480x576","size_352x576", "size_352x288","size_704x480_ntsc", "size_480x480_ntsc","size_352x480_ntsc", "size_352x240_ntsc"])
self.add_enable_disable("copy_sound", [], ["audio_delay_spinbutton","audio_rate_automatic","audio_spinbutton","spinbutton_volume","scale_volume","reset_volume"])
common_elements = ["gop12","video_rate_automatic","video_spinbutton","audio_rate_automatic","audio_spinbutton","format_pal","format_ntsc","spinbutton_volume","scale_volume","reset_volume",
"size_auto", "size_1920x1080", "size_1280x720", "size_720x576", "size_704x576", "size_480x576","size_352x576", "size_352x288",
"size_auto_ntsc", "size_1920x1080_ntsc", "size_1280x720_ntsc", "size_720x480_ntsc", "size_704x480_ntsc", "size_480x480_ntsc","size_352x480_ntsc", "size_352x240_ntsc",
"aspect_auto","aspect_classic","aspect_wide","mirror_horizontal","mirror_vertical","add_black_bars","scale_picture","cut_picture",
"rotation_0","rotation_90","rotation_180","rotation_270","two_pass_encoding","deinterlace_none","deinterlace_ffmpeg","deinterlace_yadif",
"audio_delay_spinbutton","sound5_1","copy_sound"]
is_mpeg_ps_list = common_elements[:]
is_mpeg_ps_list.append("no_reencode_audio_video")
is_mpeg_ps_list.append("font_size_spinbutton")
is_mpeg_ps_list.append("force_subtitles")
is_mpeg_ps_list.append("add_subtitles")
is_mpeg_ps_list.append("del_subtitles")
no_reencode_audio_video_list = common_elements[:]
no_reencode_audio_video_list.append("is_mpeg_ps")
self.add_enable_disable("is_mpeg_ps", [], is_mpeg_ps_list)
self.add_enable_disable("no_reencode_audio_video", [], no_reencode_audio_video_list)
if list_files == None:
cv = devedeng.converter.converter.get_converter()
film_analizer = (cv.get_film_analizer())()
if (film_analizer.get_film_data(self.file_name)):
self.error = True
else:
self.error = False
self.audio_list = film_analizer.audio_list[:]
self.video_list = film_analizer.video_list[:]
self.audio_streams = film_analizer.audio_streams
self.video_streams = film_analizer.video_streams
self.original_width = film_analizer.original_width
self.original_height = film_analizer.original_height
self.original_length = film_analizer.original_length
self.original_size = film_analizer.original_size
self.original_aspect_ratio = film_analizer.original_aspect_ratio
self.original_videorate = film_analizer.original_videorate
self.original_audiorate = film_analizer.original_audiorate
self.original_audiorate_uncompressed = film_analizer.original_audiorate_uncompressed
self.original_fps = film_analizer.original_fps
self.original_file_size = film_analizer.original_file_size
if self.original_audiorate <= 0:
# just a guess, but usually correct
self.original_audiorate = 224
if self.original_videorate <= 0:
# presume that there are only video and audio streams
self.original_videorate = ((8 * self.original_file_size) / self.original_length) - (self.original_audiorate * self.audio_streams)
self.width_midle = -1
self.height_midle = -1
self.width_final = -1
self.height_final = -1
self.video_rate_auto = self.video_rate
self.audio_rate_auto = self.audio_rate
self.video_rate_final = self.video_rate
self.audio_rate_final = self.audio_rate
self.aspect_ratio_final = None
self.converted_filename = None
def set_title(self,new_title):
self.title_name = new_title
self.emit('title_changed',self.title_name)
def get_duration(self):
return self.original_length
def get_estimated_size(self):
""" Returns the estimated final file size, in kBytes, based on the final audio and video rate, and the subtitles """
self.set_final_rates()
self.set_final_size_aspect()
if self.is_mpeg_ps:
estimated_size = self.original_file_size / 1000
else:
# let's asume 8kbps for each subtitle
sub_rate = 8 * len(self.subtitles_list)
estimated_size = ((self.video_rate_final + (self.audio_rate_final * self.audio_streams) + sub_rate) * self.original_length) / 8
return estimated_size
def get_size_data(self):
estimated_size = self.get_estimated_size()
if self.is_mpeg_ps or self.no_reencode_audio_video:
videorate_fixed_size = True
else:
videorate_fixed_size = False
# let's asume 8kbps for each subtitle
sub_rate = 8 * len(self.subtitles_list)
return estimated_size, videorate_fixed_size, self.audio_rate_final * self.audio_streams, sub_rate, self.width_final, self.height_final, self.original_length
def set_auto_video_audio_rate(self, new_video_rate, new_audio_rate):
self.video_rate_auto = int(new_video_rate)
self.audio_rate_auto = int(new_audio_rate)
def get_max_resolution(self,rx,ry,aspect):
tmpx = ry*aspect
tmpy = rx/aspect
if (tmpx > rx):
return tmpx,ry
else:
return rx,tmpy
def set_final_rates(self):
if (self.disc_type == "divx") or (self.disc_type == "mkv"):
self.audio_rate_auto = 192
elif (self.disc_type == "vcd") or (self.disc_type == "svcd") or (self.disc_type == "cvd"):
self.audio_rate_auto = 224
else: # dvd
if self.sound5_1:
self.audio_rate_auto = 384
else:
self.audio_rate_auto = 224
if self.is_mpeg_ps or self.no_reencode_audio_video:
self.video_rate_final = self.original_videorate
self.audio_rate_final = self.original_audiorate
else:
if self.video_rate_automatic:
self.video_rate_final = self.video_rate_auto
else:
self.video_rate_final = self.video_rate
if self.copy_sound:
self.audio_rate_final = self.original_audiorate
else:
if self.audio_rate_automatic:
self.audio_rate_final = self.audio_rate_auto
else:
self.audio_rate_final = self.audio_rate
def set_final_size_aspect(self):
if self.is_mpeg_ps:
self.width_midle = self.original_width
self.width_final = self.original_width
self.height_midle = self.original_height
self.height_final = self.original_height
self.aspect_ratio_final = self.original_aspect_ratio
return
if self.format_pal:
final_size = self.final_size_pal
else:
final_size = self.final_size_ntsc[:-5] # remove the "_ntsc" from the string
# for divx or matroska, if the size and the aspect ratio is automatic, just don't change them
if ((self.disc_type == "divx") or (self.disc_type == "mkv")) and (final_size == "size_auto") and (self.aspect_ratio == "aspect_auto"):
self.width_midle = self.original_width
self.width_final = self.original_width
self.height_midle = self.original_height
self.height_final = self.original_height
self.aspect_ratio_final = self.original_aspect_ratio
return
# The steps are:
# - Decide the final aspect ratio
# - Calculate the midle size: the original video will be cut to this size, or black bars will be added
# - Calculate the final size: the midle video will be scaled to this size
aspect_wide = False
# first, decide the final aspect ratio
if (self.disc_type == "vcd") or (self.disc_type == "svcd") or (self.disc_type == "cvd"):
self.aspect_ratio_final = 4.0/3.0
else:
if (self.aspect_ratio == "aspect_auto"):
if (self.disc_type != "dvd"):
self.aspect_ratio_final = self.original_aspect_ratio
else:
if self.original_aspect_ratio >= 1.7:
self.aspect_ratio_final = 16.0/9.0
aspect_wide = True
else:
self.aspect_ratio_final = 4.0/3.0
elif (self.aspect_ratio == "aspect_classic"):
self.aspect_ratio_final = 4.0/3.0
else:
self.aspect_ratio_final = 16.0/9.0
aspect_wide = True
# now, the final resolution
if self.disc_type == "vcd":
self.width_final = 352
if (self.format_pal):
self.height_final = 288
else:
self.height_final = 240
else:
if final_size == "size_auto":
if self.disc_type == "svcd":
self.width_final =480
if (self.format_pal):
self.height_final = 576
else:
self.height_final = 480
elif self.disc_type == "cvd":
self.width_final =352
if (self.format_pal):
self.height_final = 576
else:
self.height_final = 480
elif self.disc_type == "dvd":
if aspect_wide:
self.width_final = 720
if (self.format_pal):
self.height_final = 576
else:
self.height_final = 480
else:
tx, ty = self.get_max_resolution(self.original_width,self.original_height,self.original_aspect_ratio)
if (self.format_pal):
th = 576
th2 = 288
else:
th = 480
th2 = 240
if ( tx <= 352 ) and (ty <= th2):
self.width_final = 352
self.height_final = th2
elif (tx <= 352) and (ty <= th):
self.width_final = 352
self.height_final = th
elif (tx <= 704) and (ty <= th):
self.width_final = 704
self.height_final = th
else:
self.width_final = 720
self.height_final = th
else:
self.width_final , self.height_final = self.get_max_resolution(self.original_width,self.original_height,self.original_aspect_ratio)
else:
values = final_size[5:].split("x")
self.width_final = int(values[0])
self.height_final = int(values[1])
self.width_final = int(self.width_final)
self.height_final = int(self.height_final)
# finally, calculate the midle size
if (self.rotation == "rotation_90") or (self.rotation == "rotation_270"):
midle_aspect_ratio = 1.0 / self.original_aspect_ratio
else:
midle_aspect_ratio = self.original_aspect_ratio
if self.scaling == "scale_picture":
self.width_midle = int(self.original_width)
self.height_midle = int(self.original_height)
elif self.scaling == "add_black_bars":
if midle_aspect_ratio > self.aspect_ratio_final: # add horizontal black bars, at top and bottom
self.width_midle = int(self.original_width)
self.height_midle = int(self.original_height * midle_aspect_ratio / self.aspect_ratio_final)
else: # add vertical black bars, at left and right
self.width_midle = int(self.original_width * self.aspect_ratio_final / midle_aspect_ratio)
self.height_midle = int(self.original_height)
else: # cut picture
if midle_aspect_ratio > self.aspect_ratio_final:
self.width_midle = int(self.original_width * self.aspect_ratio_final / midle_aspect_ratio)
self.height_midle = int(self.original_height)
else:
self.width_midle = int(self.original_width)
self.height_midle = int(self.original_height * midle_aspect_ratio / self.aspect_ratio_final)
def set_type(self,obj = None,disc_type = None):
if (disc_type != None):
self.disc_type = disc_type
def delete_file(self):
return
def on_help_clicked(self,b):
help_file = devedeng.help.help("file.html")
def properties(self):
if (self.wfile_properties != None):
self.wfile_properties.present()
return
self.builder = Gtk.Builder()
self.builder.set_translation_domain(self.config.gettext_domain)
self.builder.add_from_file(os.path.join(self.config.glade,"wfile_properties.ui"))
self.builder.connect_signals(self)
self.wfile_properties = self.builder.get_object("file_properties")
self.wfile_properties.show_all()
self.wframe_title = self.builder.get_object("frame_title")
self.wframe_fileinfo = self.builder.get_object("frame_fileinfo")
self.wframe_multiproperties = self.builder.get_object("frame_multiproperties")
self.wtreeview_multiproperties = self.builder.get_object("treeview_multiproperties")
self.wbutton_preview = self.builder.get_object("button_preview")
self.wshow_in_menu = self.builder.get_object("show_in_menu")
self.wnotebook = self.builder.get_object("notebook")
# elements in page GENERAL
self.wformat_pal = self.builder.get_object("format_pal")
self.wformat_ntsc = self.builder.get_object("format_ntsc")
self.wframe_video_rate = self.builder.get_object("frame_video_rate")
self.wframe_audio_rate = self.builder.get_object("frame_audio_rate")
self.waudio_rate = self.builder.get_object("audio_rate")
self.wframe_division_chapters = self.builder.get_object("frame_division_chapters")
if (self.disc_type == "dvd") or (self.disc_type == "divx") or (self.disc_type == "mkv"):
self.waudio_rate.set_upper(448.0)
else:
self.waudio_rate.set_upper(384.0)
# elements in page SUBTITLES
self.wsubtitles_list = self.builder.get_object("subtitles_list")
self.wtreview_subtitles = self.builder.get_object("treeview_subtitles")
self.wscrolledwindow_subtitles = self.builder.get_object("scrolledwindow_subtitles")
self.wadd_subtitles = self.builder.get_object("add_subtitles")
self.wdel_subtitles = self.builder.get_object("del_subtitles")
selection = self.wtreview_subtitles.get_selection()
selection.set_mode(Gtk.SelectionMode.BROWSE)
# elements in page VIDEO OPTIONS
self.wsize_1920x1080 = self.builder.get_object("size_1920x1080")
self.wsize_1280x720 = self.builder.get_object("size_1280x720")
self.wsize_1920x1080_ntsc = self.builder.get_object("size_1920x1080_ntsc")
self.wsize_1280x720_ntsc = self.builder.get_object("size_1280x720_ntsc")
self.wframe_final_size = self.builder.get_object("frame_final_size")
self.wframe_aspect_ratio = self.builder.get_object("frame_aspect_ratio")
self.waspect_classic = self.builder.get_object("aspect_classic")
self.waspect_wide = self.builder.get_object("aspect_wide")
self.wadd_black_bars_pic = self.builder.get_object("add_black_bars_pic")
self.wscale_picture_pic = self.builder.get_object("scale_picture_pic")
self.wcut_picture_pic = self.builder.get_object("cut_picture_pic")
# elements in page AUDIO
self.wsound5_1 = self.builder.get_object("sound5_1")
self.wcopy_sound = self.builder.get_object("copy_sound")
# Adjust the interface UI to the kind of disc
if (self.disc_type == 'dvd'):
self.wsize_1920x1080.hide()
self.wsize_1280x720.hide()
self.wsize_1920x1080_ntsc.hide()
self.wsize_1280x720_ntsc.hide()
elif (self.disc_type == 'vcd'):
self.wshow_in_menu.hide()
self.wframe_video_rate.hide()
self.wframe_audio_rate.hide()
self.wframe_division_chapters.hide()
self.wframe_final_size.hide()
self.wframe_aspect_ratio.hide()
self.wsound5_1.hide()
self.wcopy_sound.hide()
self.wnotebook.remove_page(5)
elif (self.disc_type == 'svcd'):
self.wsize_1920x1080.hide()
self.wsize_1280x720.hide()
self.wsize_1920x1080_ntsc.hide()
self.wsize_1280x720_ntsc.hide()
self.wshow_in_menu.hide()
self.wframe_division_chapters.hide()
self.wframe_aspect_ratio.hide()
self.wsound5_1.hide()
self.wcopy_sound.hide()
self.wnotebook.remove_page(5)
elif (self.disc_type == 'cvd'):
self.wsize_1920x1080.hide()
self.wsize_1280x720.hide()
self.wsize_1920x1080_ntsc.hide()
self.wsize_1280x720_ntsc.hide()
self.wshow_in_menu.hide()
self.wframe_division_chapters.hide()
self.wframe_aspect_ratio.hide()
self.wsound5_1.hide()
self.wcopy_sound.hide()
self.wnotebook.remove_page(5)
elif (self.disc_type == 'divx'):
self.wshow_in_menu.hide()
self.wframe_division_chapters.hide()
self.wsound5_1.hide()
self.wcopy_sound.hide()
self.wnotebook.remove_page(5)
self.wnotebook.remove_page(1)
elif (self.disc_type == 'mkv'):
self.wshow_in_menu.hide()
self.wframe_division_chapters.hide()
self.wnotebook.remove_page(5)
if self.list_files == None:
self.wframe_title.show()
self.wframe_fileinfo.show()
self.wframe_multiproperties.hide()
else:
self.wframe_title.hide()
self.wframe_fileinfo.hide()
self.wframe_multiproperties.show()
self.wscrolledwindow_subtitles.hide()
self.wbutton_preview.hide()
self.wadd_subtitles.hide()
self.wdel_subtitles.hide()
sel = self.wtreeview_multiproperties.get_selection()
sel.set_mode(Gtk.SelectionMode.MULTIPLE)
self.wtreeview_multiproperties.set_rubber_banding(True)
self.save_ui()
self.update_ui(self.builder)
self.on_aspect_classic_toggled(None)
self.on_treeview_subtitles_cursor_changed(None)
def on_aspect_classic_toggled(self,b):
status1 = self.waspect_classic.get_active()
status2 = self.waspect_wide.get_active()
if (status1):
final_aspect = 4.0/3.0
elif (status2):
final_aspect = 16.0/9.0
else:
if self.original_aspect_ratio >= 1.7:
final_aspect = 16.0/9.0
else:
final_aspect = 4.0/3.0
if (final_aspect < self.original_aspect_ratio):
self.wadd_black_bars_pic.set_from_file(os.path.join(self.config.pic_path,"to_classic_blackbars.png"))
self.wcut_picture_pic.set_from_file(os.path.join(self.config.pic_path,"to_classic_cut.png"))
self.wscale_picture_pic.set_from_file(os.path.join(self.config.pic_path,"to_classic_scale.png"))
else:
self.wadd_black_bars_pic.set_from_file(os.path.join(self.config.pic_path,"to_wide_blackbars.png"))
self.wcut_picture_pic.set_from_file(os.path.join(self.config.pic_path,"to_wide_cut.png"))
self.wscale_picture_pic.set_from_file(os.path.join(self.config.pic_path,"to_wide_scale.png"))
def on_button_accept_clicked(self,b):
self.store_ui(self.builder)
self.config.subt_fill_color = self.subt_fill_color
self.config.subt_outline_color = self.subt_outline_color
self.config.subt_outline_thickness = self.subt_thickness
if self.list_files == None:
# editing file properties
self.set_final_rates()
self.set_final_size_aspect()
self.emit('title_changed',self.title_name)
else:
# editing properties for a group of files
data = self.store_file()
sel = self.wtreeview_multiproperties.get_selection()
model, pathlist = sel.get_selected_rows()
for file_path in pathlist:
obj = model[file_path][1]
obj.restore_file(data)
self.wfile_properties.destroy()
self.wfile_properties = None
self.builder = None
def on_button_cancel_clicked(self,b):
if self.list_files == None:
self.restore_ui()
self.wfile_properties.destroy()
self.wfile_properties = None
self.builder = None
def on_add_subtitles_clicked(self,b):
subt = devedeng.ask_subtitles.ask_subtitles()
if (subt.run()):
self.wsubtitles_list.append([subt.filename, subt.encoding, subt.language, subt.put_upper])
def get_selected_subtitle(self):
selection = self.wtreview_subtitles.get_selection()
model, treeiter = selection.get_selected()
if treeiter != None:
return ( (model, treeiter) )
else:
return ( (None, None) )
def on_del_subtitles_clicked(self,b):
model, treeiter = self.get_selected_subtitle()
if (model != None):
model.remove(treeiter)
def on_treeview_subtitles_cursor_changed(self,b):
model, treeiter = self.get_selected_subtitle()
if (model == None):
self.wdel_subtitles.set_sensitive(False)
else:
self.wdel_subtitles.set_sensitive(True)
def do_conversion(self, output_path, duration = 0):
self.converted_filename = output_path
if self.is_mpeg_ps:
converter = devedeng.file_copy.file_copy(self.file_name,output_path)
else:
self.set_final_size_aspect()
self.set_final_rates()
cv = devedeng.converter.converter.get_converter()
disc_converter = cv.get_disc_converter()
converter = disc_converter()
converter.convert_file(self,output_path,duration)
if len(self.subtitles_list) != 0:
last_process = converter
#if duration == 0:
# it seems that SPUMUX always fills the entire subtitles
duration2 = self.original_length
#else:
# duration2 = duration
stream_id = 0
for subt in self.subtitles_list:
subt_file = subt[0]
subt_codepage = subt[1]
subt_lang = subt[2]
subt_upper = subt[3]
if self.aspect_ratio_final >= 1.7:
final_aspect = "16:9"
else:
final_aspect = "4:3"
subt_mux = devedeng.subtitles_mux.subtitles_mux()
subt_mux.multiplex_subtitles( output_path, subt_file, subt_codepage, subt_lang, subt_upper,
self.subt_font_size,self.format_pal,self.force_subtitles,
final_aspect, duration2, stream_id,
self.subt_fill_color, self.subt_outline_color, self.subt_thickness)
subt_mux.add_dependency(last_process)
converter.add_child_process(subt_mux)
last_process = subt_mux
stream_id += 1
return converter
def on_button_preview_clicked(self,b):
self.store_ui(self.builder)
self.do_preview()
def do_preview(self):
wpreview = devedeng.preview.preview_window()
if (wpreview.run() == False):
return
run_window = devedeng.runner.runner()
p = self.do_conversion(os.path.join(self.config.tmp_folder,"movie_preview.mpg"),wpreview.lvalue)
run_window.add_process(p)
run_window.connect("done",self.preview_done)
run_window.run()
def preview_done(self,o,retval):
if (retval == 0):
cv = devedeng.converter.converter.get_converter()
disc_player = (cv.get_film_player())()
disc_player.play_film(os.path.join(self.config.tmp_folder,"movie_preview.mpg"))
def store_file(self):
data = self.serialize()
if "files_to_set" in data:
del data["files_to_set"]
return data
def restore_file(self,data):
self.unserialize(data)
def on_select_all_clicked(self,b):
sel = self.wtreeview_multiproperties.get_selection()
sel.select_all()
def on_unselect_all_clicked(self,b):
sel = self.wtreeview_multiproperties.get_selection()
sel.unselect_all()
|
gpl-3.0
|
yanheven/console
|
horizon/tables/formset.py
|
9
|
6931
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import itertools
import logging
import sys
from django import template
from django.template import loader
from django.utils import datastructures
from horizon.tables import base as horizon_tables
LOG = logging.getLogger(__name__)
class FormsetCell(horizon_tables.Cell):
"""A DataTable cell that knows about its field from the formset."""
def __init__(self, *args, **kwargs):
super(FormsetCell, self).__init__(*args, **kwargs)
try:
self.field = (self.row.form or {})[self.column.name]
except KeyError:
self.field = None
else:
if self.field.errors:
self.attrs['class'] = (self.attrs.get('class', '') +
' error control-group')
self.attrs['title'] = ' '.join(
unicode(error) for error in self.field.errors)
class FormsetRow(horizon_tables.Row):
"""A DataTable row that knows about its form from the formset."""
template_path = 'horizon/common/_formset_table_row.html'
def __init__(self, column, datum, form):
self.form = form
super(FormsetRow, self).__init__(column, datum)
if self.cells == []:
# We need to be able to handle empty rows, because there may
# be extra empty forms in a formset. The original DataTable breaks
# on this, because it sets self.cells to [], but later expects a
# SortedDict. We just fill self.cells with empty Cells.
cells = []
for column in self.table.columns.values():
cell = self.table._meta.cell_class(None, column, self)
cells.append((column.name or column.auto, cell))
self.cells = datastructures.SortedDict(cells)
def render(self):
return loader.render_to_string(self.template_path,
{"row": self, "form": self.form})
class FormsetDataTableMixin(object):
"""A mixin for DataTable to support Django Formsets.
This works the same as the ``FormsetDataTable`` below, but can be used
to add to existing DataTable subclasses.
"""
formset_class = None
def __init__(self, *args, **kwargs):
super(FormsetDataTableMixin, self).__init__(*args, **kwargs)
self._formset = None
# Override Meta settings, because we need custom Form and Cell classes,
# and also our own template.
self._meta.row_class = FormsetRow
self._meta.cell_class = FormsetCell
self._meta.template = 'horizon/common/_formset_table.html'
def get_required_columns(self):
"""Lists names of columns that have required fields."""
required_columns = []
if self.formset_class:
empty_form = self.get_formset().empty_form
for column in self.columns.values():
field = empty_form.fields.get(column.name)
if field and field.required:
required_columns.append(column.name)
return required_columns
def _get_formset_data(self):
"""Formats the self.filtered_data in a way suitable for a formset."""
data = []
for datum in self.filtered_data:
form_data = {}
for column in self.columns.values():
value = column.get_data(datum)
form_data[column.name] = value
form_data['id'] = self.get_object_id(datum)
data.append(form_data)
return data
def get_formset(self):
"""Provide the formset corresponding to this DataTable.
Use this to validate the formset and to get the submitted data back.
"""
if self._formset is None:
self._formset = self.formset_class(
self.request.POST or None,
initial=self._get_formset_data(),
prefix=self._meta.name)
return self._formset
def get_empty_row(self):
"""Return a row with no data, for adding at the end of the table."""
return self._meta.row_class(self, None, self.get_formset().empty_form)
def get_rows(self):
"""Return the row data for this table broken out by columns.
The row objects get an additional ``form`` parameter, with the
formset form corresponding to that row.
"""
try:
rows = []
if self.formset_class is None:
formset = []
else:
formset = self.get_formset()
formset.is_valid()
for datum, form in itertools.izip_longest(self.filtered_data,
formset):
row = self._meta.row_class(self, datum, form)
if self.get_object_id(datum) == self.current_item_id:
self.selected = True
row.classes.append('current_selected')
rows.append(row)
except Exception:
# Exceptions can be swallowed at the template level here,
# re-raising as a TemplateSyntaxError makes them visible.
LOG.exception("Error while rendering table rows.")
exc_info = sys.exc_info()
raise template.TemplateSyntaxError, exc_info[1], exc_info[2]
return rows
def get_object_id(self, datum):
# We need to support ``None`` when there are more forms than data.
if datum is None:
return None
return super(FormsetDataTableMixin, self).get_object_id(datum)
class FormsetDataTable(FormsetDataTableMixin, horizon_tables.DataTable):
"""A DataTable with support for Django Formsets.
Note that :attr:`horizon.tables.DataTableOptions.row_class` and
:attr:`horizon.tables.DataTaleOptions.cell_class` are overwritten in this
class, so setting them in ``Meta`` has no effect.
.. attribute:: formset_class
A class made with ``django.forms.formsets.formset_factory``
containing the definition of the formset to use with this data table.
The columns that are named the same as the formset fields will be
replaced with form widgets in the table. Any hidden fields from the
formset will also be included. The fields that are not hidden and
don't correspond to any column will not be included in the form.
"""
|
apache-2.0
|
sarvex/tensorflow
|
tensorflow/lite/python/op_hint.py
|
5
|
53155
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Define tflite op hints (intrinsic operations).
This essentially allows defining a TensorFlow API for tflite operations in
Python with hints on how they are represented in TensorFlow Lite. This basically
is a form of tflite intrinsic. It wraps a subpart of a TensorFlow execution
graph and is useful for LSTMs and other complicated TensorFlow constructions
that are difficult to pattern match in TOCO, but are represented by a single
accelerated tflite op.
Example:
def tflite_cool_activation(input):
# A cool activation function.
custom = tf.lite.OpHint("cool_activation")
input, = custom.add_inputs(input)
output = tf.sigmoid(input) * input
output, = custom.add_outputs(output)
return output
image = tf.compat.v1.placeholder(tf.float32, (1, 16, 16, 1))
output = tf.identity(tflite_cool_activation(image))
session = tf.compat.v1.Session()
graphdef_to_convert = tf.lite.experimental.convert_op_hints_to_stubs(session)
tflite_graph = tf.compat.v1.lite.toco_convert(
graphdef_to_convert, [image], [output], allow_custom_ops=True)
with open("/tmp/graph.fb", "wb") as fp:
fp.write(tflite_graph)
How does it work?:
OpHint is a helper that you use when defining a vanilla python function.
It allows you to wrap arguments with tf.identities with some custom attributes.
These attributes allow you to find the original block of ops that was created.
For example, if you use cool_activation above you essentially get:
a_input = tf.identity()
result = tf.multiply(tf.sigmoid(a_input), a_input)
output = tf.identity()
a_input, output are identities that have parameters representing
what argument they are, what the name of the function they should turn into
in tf lite as well as a guid that uniquely identifies a particular invocation.
Once you have built your whole tensorflow graph, you can run it and train it
as usual, but after you have done that, you need to convert the graph into
a form that replaces these subgraphs wrapped in identities to stub ops. These
ops don't actually exist in the normal TensorFlow runtime, but will be
understood by toco later. The generated TensorFlow Lite flatbuffer file will
contain a custom operator called "cool_activation". Developer needs to implement
and register this operator in TensorFlow Lite in order to do inference.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections as _collections
import copy as _copy
import json as _json
import uuid as _uuid
import six as _six
from tensorflow.core.framework import attr_value_pb2 as _attr_value_pb2
from tensorflow.core.framework import graph_pb2 as _graph_pb2
from tensorflow.core.framework import node_def_pb2 as _node_def_pb2
from tensorflow.python.framework import dtypes as _dtypes
from tensorflow.python.framework import ops as _ops
from tensorflow.python.framework import tensor_util as _tensor_util
from tensorflow.python.framework.graph_util_impl import _bfs_for_reachable_nodes
from tensorflow.python.framework.graph_util_impl import _extract_graph_summary
from tensorflow.python.ops import array_ops as _array_ops
from tensorflow.python.util import compat as _compat
from tensorflow.python.util import deprecation as _deprecation
from tensorflow.python.util.all_util import remove_undocumented
from tensorflow.python.util.tf_export import tf_export as _tf_export
@_tf_export(v1=["lite.OpHint"])
@_deprecation.deprecated(
None,
"Please follow instructions under "
"https://www.tensorflow.org/lite/convert/operation_fusion for operation"
"fusion in tflite."
)
class OpHint(object):
"""A class that helps build tflite function invocations.
It allows you to take a bunch of TensorFlow ops and annotate the construction
such that toco knows how to convert it to tflite. This embeds a pseudo
function in a TensorFlow graph. This allows embedding high-level API usage
information in a lower level TensorFlow implementation so that an alternative
implementation can be substituted later.
Essentially, any "input" into this pseudo op is fed into an identity, and
attributes are added to that input before being used by the constituent ops
that make up the pseudo op. A similar process is done to any output that
is to be exported from the current op.
"""
# Attr constants that are used for representation in the GraphDef. These
# will be used on every Identity op that is involved in a total OpHint.
# Name of the OpHint function (cosmetic).
FUNCTION_NAME_ATTR = "_tflite_function_name"
# UUID of the function (each OpHint gets a new uuid).
FUNCTION_UUID_ATTR = "_tflite_function_uuid"
# The input index of the input (or nothing if it is an output).
FUNCTION_INPUT_INDEX_ATTR = "_tflite_function_input_index"
# The output index of the output (or nothing if it is an input).
FUNCTION_OUTPUT_INDEX_ATTR = "_tflite_function_output_index"
# An index that orders aggregate arguments. Aggregate arguments are ones
# that are separate but will be fused horizontally. For example a static LSTM
# has a lstm cell for each time step. Each one has a separate opHint, but a
# fused SequentialLSTM will treat this as a single tensor.
FUNCTION_SORT_INDEX_ATTR = "_tflite_function_sort_index"
# The way in which multiple parts of the aggregate argument will be joined
# into a fused operand. Valid options are OpHint.AGGREGATE_FIRST,
# OpHint.AGGREGATE_LAST, OpHint.AGGREGATE_STACK.
FUNCTION_AGGREGATE_ATTR = "_tflite_function_aggregate"
# On fused OpHint stub, the order of inputs that the final LSTM call will
# have. What this means is that the TensorFlow order might be
# "foo", "bar", "stuff" and you might want the TF lite op order to be
# "stuff", "foo", "bar", -1 (where -1 is unused). So you would set this
# attribute to [2, 0, 1, -1].
TFLITE_INPUT_INDICES = "_tflite_input_indices"
# OpHint level.
FUNCTION_LEVEL_ATTR = "_tflite_ophint_level"
# Ophint internal mapping, this is for high level Ophint only.
# This basically contains three kinds of mapping:
# 1) How parental ophinted inputs map to the first child ophinted inputs;
# 2) How internal children nodes are connected;
# 3) How parental ophinted outputs map to the last child ophinted outputs.
CHILDREN_INPUTS_MAPPINGS = "_tflite_children_ophint_inputs_mapping"
# Types of aggregations
# stack: stacks all ophints with matching tags. i.e. for a static rnn.
# specifically, this is good for an input or output to a static rnn cell.
AGGREGATE_STACK = "stack"
# first: only takes the first output (one with lowest sort index)
# of matching tags. This is good for the input state to an RNN.
AGGREGATE_FIRST = "first"
# aggregation last takes only the last tag (one with highest sort index).
# This is good for an output value on the last stack item of a
# static rnn.
AGGREGATE_LAST = "last"
class OpHintArgumentTracker(object):
"""Conceptually tracks indices of arguments of "OpHint functions".
The inputs and arguments of these functions both use an instance
of the class so they can have independent numbering.
"""
def __init__(self,
function_name,
unique_function_id,
node_name_prefix,
attr_name,
level=1,
children_inputs_mappings=None):
"""Initialize ophint argument.
Args:
function_name: Name of the function that this tracks arguments for.
unique_function_id: UUID of function that this tracks arguments for.
node_name_prefix: How identities that are created are named.
attr_name: Name of attribute to use to store the index for this hint.
i.e. FUNCTION_INPUT_INDEX or FUNCTION_OUTPUT_INDEX
level: Hierarchical level of the Ophint node, a number.
children_inputs_mappings: Inputs/Outputs mapping for children hints.
"""
# The global index is the argument index of the op. This is in contrast
# to the sort index which is the sequence number of a particular instance
# of a given global index. For example, you may have called add hint
# twice with the tag "foo". Then the global index will be 0 for both
# and the sort index will be 0 for the first added and 1 for the second.
self._function_name = function_name
self._unique_function_id = unique_function_id
self._next_global_index = 0 # The absolute global index
self._used_global_indices = set()
self._tag_to_global_index = {} # The argument index a given tag maps to
self._tag_to_next_sort_index = {} # The current index for each tag
self._node_name_prefix = node_name_prefix
self._attr_name = attr_name
self._level = level
self._children_inputs_mappings = children_inputs_mappings
def _get_new_global_index(self, index_override):
"""Return the next unused argument index in order or use an override.
Args:
index_override: An index to use instead of the next available or None
to use the next available.
Returns:
A valid global_index to use for the next hint argument.
Raises:
ValueError: If the index_override is already used by another hint.
"""
if index_override is None:
global_index = self._next_global_index
else:
if index_override in self._used_global_indices:
raise ValueError("Index %d was already used by another call to add")
global_index = index_override
# Make next_global_index valid
self._used_global_indices.add(global_index)
while self._next_global_index in self._used_global_indices:
self._next_global_index += 1
return global_index
def add(self, arg, tag=None, name=None, aggregate=None,
index_override=None):
"""Return a wrapped tensor of an input tensor as an argument.
Args:
arg: A TensorFlow tensor that should be considered an argument.
tag: String tag to identify arguments that should be packed.
name: Name of argument. This is included in the Identity hint op names.
aggregate: Strategy to aggregate.
Acceptable values are OpHint.AGGREGATE_FIRST, OpHint.AGGREGATE_LAST,
and OpHint.AGGREGATE_STACK.
Note, aggregate is only valid if tag is specified.
index_override: Specify what input/output index should this be in the
final stub. i.e. add(arg0, index=1); add(arg1, index=0) will make the
final stub be as stub_func(inputs[arg1, arg0], outputs=[]) rather than
the default call order based ordering.
Returns:
A tensor representing the wrapped argument.
Raises:
ValueError: When indices are not consistent.
"""
# Find the appropriate index
if tag is None:
if aggregate is not None:
raise ValueError("You must specify `tag` if using aggregate.")
global_index = self._get_new_global_index(index_override)
sort_index = None
else:
if aggregate is None:
raise ValueError("You must specify `aggregate` if using tag.")
if tag not in self._tag_to_global_index:
self._tag_to_global_index[tag] = (
self._get_new_global_index(index_override))
self._tag_to_next_sort_index[tag] = 0
elif (index_override and
index_override != self._tag_to_global_index[tag]):
raise ValueError(
"Tag %r was called with two indices %r and %r" %
(tag, index_override, self._tag_to_global_index[tag]))
global_index = self._tag_to_global_index[tag]
sort_index = self._tag_to_next_sort_index[tag]
self._tag_to_next_sort_index[tag] += 1
uuid = self._unique_function_id
name = "%s-%s-%s-%r-%r-%s" % (self._node_name_prefix, self._function_name,
uuid, global_index, sort_index, name)
identity_op = _array_ops.identity(arg, name=name)
# pylint: disable=protected-access
identity_op.op._set_attr(
OpHint.FUNCTION_NAME_ATTR,
_attr_value_pb2.AttrValue(
s=_compat.as_bytes(self._function_name)))
identity_op.op._set_attr(
OpHint.FUNCTION_UUID_ATTR,
_attr_value_pb2.AttrValue(
s=_compat.as_bytes(self._unique_function_id)))
identity_op.op._set_attr(
self._attr_name, _attr_value_pb2.AttrValue(i=global_index))
identity_op.op._set_attr(OpHint.FUNCTION_LEVEL_ATTR,
_attr_value_pb2.AttrValue(i=self._level))
if self._children_inputs_mappings:
identity_op.op._set_attr(
OpHint.CHILDREN_INPUTS_MAPPINGS,
_attr_value_pb2.AttrValue(
s=_compat.as_bytes(_json.dumps(
self._children_inputs_mappings))))
if sort_index is not None:
identity_op.op._set_attr(
OpHint.FUNCTION_SORT_INDEX_ATTR,
_attr_value_pb2.AttrValue(i=sort_index))
if aggregate is not None:
identity_op.op._set_attr(
OpHint.FUNCTION_AGGREGATE_ATTR,
_attr_value_pb2.AttrValue(s=_compat.as_bytes((aggregate))))
# pylint: enable=protected-access
return identity_op
def __init__(self,
function_name,
level=1,
children_inputs_mappings=None,
**kwargs):
"""Create a OpHint.
Args:
function_name: Name of the function (the custom op name in tflite)
level: OpHint level.
children_inputs_mappings: Children OpHint inputs/outputs mapping.
children_inputs_mappings should like below:
"parent_first_child_input":
[{"parent_input_index": num, "child_input_index": num}, ...]
"parent_last_child_output":
[{"parent_output_index": num, "child_output_index": num}, ...]
"internal_children_input_output":
[{"child_input_index": num, "child_output_index": num}, ...]
**kwargs: Keyword arguments of any constant attributes for the function.
"""
self._function_name = function_name
self._level = level
if self._level == 1:
assert children_inputs_mappings is None
else:
assert isinstance(children_inputs_mappings, dict)
self._children_inputs_mappings = children_inputs_mappings
if self._children_inputs_mappings is not None:
self._validate_children_inputs_mappings(self._children_inputs_mappings)
self._unique_function_id = _uuid.uuid1().hex
self._attrs_to_store_later = kwargs
self._stored_attrs = False
self._inputs = OpHint.OpHintArgumentTracker(
self._function_name, self._unique_function_id, "InputHint",
OpHint.FUNCTION_INPUT_INDEX_ATTR, level, self._children_inputs_mappings)
self._outputs = OpHint.OpHintArgumentTracker(
self._function_name, self._unique_function_id, "OutputHint",
OpHint.FUNCTION_OUTPUT_INDEX_ATTR, level,
self._children_inputs_mappings)
def _validate_children_inputs_mappings(self, children_inputs_mappings):
"""Validate children inputs mappings is in the right format.
Args:
children_inputs_mappings: the Children ophint inputs/outputs mapping.
"""
assert isinstance(children_inputs_mappings, dict)
assert "parent_first_child_input" in children_inputs_mappings
assert "parent_last_child_output" in children_inputs_mappings
assert "internal_children_input_output" in children_inputs_mappings
# validate parent_first_child_input.
def assert_dictlist_has_keys(dictlist, keys):
for dikt in dictlist:
assert isinstance(dikt, dict)
for key in keys:
assert key in dikt
assert_dictlist_has_keys(
children_inputs_mappings["parent_first_child_input"],
["parent_ophint_input_index", "first_child_ophint_input_index"])
assert_dictlist_has_keys(
children_inputs_mappings["parent_last_child_output"],
["parent_output_index", "child_output_index"])
assert_dictlist_has_keys(
children_inputs_mappings["internal_children_input_output"],
["child_input_index", "child_output_index"])
def _setattr(self, dest_op, name, value):
tensor_value = _ops.convert_to_tensor(value)
# pylint: disable=protected-access
dest_op.op._set_attr(name, _attr_value_pb2.AttrValue(
tensor=tensor_value.op.node_def.attr["value"].tensor))
# pylint: enable=protected-access
def add_input(self, *args, **kwargs):
"""Add a wrapped input argument to the hint.
Args:
*args: The input tensor.
**kwargs:
"name" label
"tag" a tag to group multiple arguments that will be aggregated. I.e.
a string like 'cool_input'. Basically multiple inputs can be added
to the same hint for parallel operations that will eventually be
combined. An example would be static_rnn which creates multiple copies
of state or inputs.
"aggregate" aggregation strategy that is valid only for tag non None.
Acceptable values are OpHint.AGGREGATE_FIRST, OpHint.AGGREGATE_LAST,
and OpHint.AGGREGATE_STACK.
"index_override" The global index to use. This corresponds to the
argument order in the final stub that will be generated.
Returns:
The wrapped input tensor.
"""
return self._inputs.add(*args, **kwargs)
def add_output(self, *args, **kwargs):
"""Add a wrapped output argument to the hint.
Args:
*args: The output tensor.
**kwargs:
"name" label
"tag" a tag to group multiple arguments that will be aggregated. I.e.
a string like 'cool_input'. Basically multiple inputs can be added
to the same hint for parallel operations that will eventually be
combined. An example would be static_rnn which creates multiple copies
of state or inputs.
"aggregate" aggregation strategy that is valid only for tag non None.
Acceptable values are OpHint.AGGREGATE_FIRST, OpHint.AGGREGATE_LAST,
and OpHint.AGGREGATE_STACK.
"index_override" The global index to use. This corresponds to the
argument order in the final stub that will be generated.
Returns:
The wrapped output tensor.
"""
return self._outputs.add(*args, **kwargs)
def add_inputs(self, *args, **kwargs):
"""Add a sequence of inputs to the function invocation.
Args:
*args: List of inputs to be converted (should be Tf.Tensor).
**kwargs: This allows 'names' which should be a list of names.
Returns:
Wrapped inputs (identity standins that have additional metadata). These
are also are also tf.Tensor's.
"""
if "names" in kwargs:
return [
self._inputs.add(arg, name=name)
for arg, name in zip(args, kwargs["names"])
]
else:
return [self._inputs.add(arg) for arg in args]
def add_outputs(self, *args, **kwargs):
"""Add a sequence of outputs to the function invocation.
Args:
*args: List of outputs to be converted (should be tf.Tensor).
**kwargs: See
Returns:
Wrapped outputs (identity standins that have additional metadata). These
are also tf.Tensor's.
"""
if "names" in kwargs:
return [
self._outputs.add(arg, name=name)
for arg, name in zip(args, kwargs["names"])
]
else:
return [self._outputs.add(arg) for arg in args]
class _LiteOperand(object):
"""Abstract operand for a tflite hint function._dynamic_rnn_loop.
This is a base class that handles representing arguments to an OpHint.
It also is able to serialize operands to the stubbed graph_def.
Child classes are responsible for being able to
store information about the hint identity operators. They are also responsible
for knowing how to serialize to output graphdefs.
Typically this will be implemented by holding one or more identity nodes
that were previously discovered as hints.
"""
def aggregate_and_return_name_for_input(self, out_graphdef):
"""This adds the node(s) to out_graphdef and returns the input node name.
Args:
out_graphdef: A graphdef that is ready to have this input added.
Returns:
The output that the stub should use as an input for this operand.
Raises:
RuntimeError: if the method is not implemented.
"""
del out_graphdef
raise RuntimeError("Unimplemented abstract method.")
def aggregate_and_return_name_for_output(self, fused_op_name, output_index,
out_graphdef):
"""Add node(s) to graph representing output operands and returns type.
Args:
fused_op_name: name of the fused op stub name.
output_index: Output index that we are currently processing from stub.
out_graphdef: The destination graphdef we are currently building up.
Returns:
The datatype of this identity.
Raises:
RuntimeError: if the method is not implemented.
"""
del fused_op_name, output_index, out_graphdef
raise RuntimeError("Unimplemented abstract method.")
class _LiteSingleOperand(_LiteOperand):
"""A simple operand that is non-aggregated (i.e. most hints)."""
def __init__(self, node):
_LiteOperand.__init__(self)
self.node = node
self.name = _tensor_name_base(node.name)
def flatten(self):
return [self.name]
def aggregate_and_return_name_for_input(self, out_graphdef):
return self.name
def aggregate_and_return_name_for_output(self, fused_op_name, index,
out_graphdef):
output_node = _copy.deepcopy(self.node)
del output_node.input[:]
output_node.input.append(_tensorflow_output_name(fused_op_name, index))
out_graphdef.node.extend([output_node])
return self.node.attr["type"].i
def __str__(self):
return str(self.name)
class _LiteAggregateOperand(_LiteOperand):
"""An operand for a tflite hint function that is aggregated from many.
For example, an LSTM is a grid of operators that are all related. Inputs
going into them may need to be fused, so they should all be tracked as
related arguments.
"""
def __init__(self, aggregation):
_LiteOperand.__init__(self)
self.aggregation = aggregation
self.names = {}
self.nodes = {}
self.flattened = None
def add(self, sort, node):
self.names[sort] = _tensor_name_base(node.name)
self.nodes[sort] = node
def flatten_nodes(self):
"""Return a list of all the node protos in aggregation sorted order."""
if not self.flattened:
self.flattened = [None] * len(self.nodes)
for idx, node in _six.iteritems(self.nodes):
self.flattened[idx] = node
for n in self.nodes:
if n is None:
raise RuntimeError("Aggregate was missing argument.")
if self.aggregation == OpHint.AGGREGATE_FIRST:
self.flattened = self.flattened[:1]
elif self.aggregation == OpHint.AGGREGATE_LAST:
self.flattened = self.flattened[-1:]
elif self.aggregation == OpHint.AGGREGATE_STACK:
pass
else:
raise ValueError("Invalid aggregation type %r specified" %
self.aggregation)
return self.flattened
def flatten(self):
"""Return a list of all node names in aggregation sorted sorter."""
return [_tensor_name_base(x.name) for x in self.flatten_nodes()]
def aggregate_and_return_name_for_input(self, out_graphdef):
"""This adds the nodes to out_graphdef and returns an aggregated output.
In particular, if you have 4 inputs to a hint stub, this will be the
node that you can use as an output. I.e. you have 4 timesteps from a
static rnn, then a fused UnidirectionalLSTM will expect 1 input with
all 4 time steps. So here we make a pack and return the output name of
that pack.
Args:
out_graphdef: A graphdef that is ready to have this input added.
Returns:
The name of a pack that aggregates this node.
"""
flattened = self.flatten_nodes()
if (self.aggregation == OpHint.AGGREGATE_FIRST) or (
self.aggregation == OpHint.AGGREGATE_LAST):
assert len(flattened) == 1
if len(flattened) == 1 and self.aggregation != OpHint.AGGREGATE_STACK:
return _tensor_name_base(flattened[0].name)
else:
new_node = _node_def_pb2.NodeDef()
new_node.op = "Pack"
new_node.name = "OpHintStack-%s" % flattened[0].name
new_node.attr["N"].i = len(flattened)
new_node.attr["T"].type = flattened[0].attr["T"].type
for discrete in flattened:
new_node.input.append(_tensor_name_base(discrete.name))
out_graphdef.node.extend([new_node])
return new_node.name
def aggregate_and_return_name_for_output(self, fused_op_name, output_index,
out_graphdef):
"""This adds to `out_graphdef` all the unaggregated outputs.
I.e. we are outputting from a fused stub, but we need to make it compatible
with the unfused original graph so we insert an unpack. Ideally in a later
stage the unpack -> pack sequences will be removed.
Args:
fused_op_name: The name of the stub we are in the process of fusing.
output_index: The output output_index this object represents.
out_graphdef: The graphdef we are in the process of buildings
Returns:
The type of the aggregated output (so we can finish building the stub
op).
"""
flattened = self.flatten_nodes()
if (self.aggregation == OpHint.AGGREGATE_FIRST) or (
self.aggregation == OpHint.AGGREGATE_LAST):
assert len(flattened) == 1
if len(flattened) == 1 and self.aggregation != OpHint.AGGREGATE_STACK:
temp_op = _LiteSingleOperand(flattened[0])
return temp_op.aggregate_and_return_name_for_output(
fused_op_name, output_index, out_graphdef)
else:
stack_node = _node_def_pb2.NodeDef()
stack_node.op = "Unpack"
stack_node.name = "OpHintUnstack-%s" % flattened[0].name
stack_node.attr["num"].i = len(flattened)
output_type = flattened[0].attr["T"].type
stack_node.attr["T"].type = output_type
stack_node.input.append(
_tensorflow_output_name(fused_op_name, output_index))
out_graphdef.node.extend([stack_node])
for idx, discrete in enumerate(flattened):
output_node = _copy.deepcopy(discrete)
del output_node.input[:]
output_node.input.append(_tensorflow_output_name(stack_node.name, idx))
out_graphdef.node.extend([output_node])
return output_type
def __str__(self):
s = "\t\t\tAGGREGATE %s\n" % self.aggregation
for sort, val in self.names.iteritems():
s += "\t\t\t%d: %s\n" % (sort, val)
return s
class _LiteFuncCall(object):
"""Represent a TensorFlow Lite custom function.
This is uses to accumulate found hints in the graphdef into a single
conceptual unit.
Attributes:
inputs: inputs to the op (hash from index # to argument)
outputs: outputs to the op (hash from index # to argument)
function_name: the tflite custom op name to use
uuid: a unique call id for this particular call (i.e. multiple function
calls would have the same function_name but different uuids.
params: A param name to key value for op constant data. I.e. for axis on a
reduction, strides on a convolution, etc.
level: Level of the OpHint.
children_inputs_mappings: If the Ophint has children, children inputs
mappings indicate how their inputs & outputs are mapped.
"""
def __init__(self):
self.inputs = {}
self.outputs = {}
self.function_name = None
self.uuid = None
self.params = {}
self.level = -1
self.children_inputs_mappings = {}
def flattened_inputs_and_outputs(self):
"""Return a list of inputs and outputs in a flattened format.
Returns:
Tuple of (inputs, outputs). where input and output i a list of names.
"""
def _flatten(input_or_output_dict):
flattened_items = []
for item in input_or_output_dict.values():
flattened_items.extend(item.flatten())
return flattened_items
return _flatten(self.inputs), _flatten(self.outputs)
def __str__(self):
def format_args(items):
s = ""
for idx, item in items.iteritems():
s += ("\t\t%d:\n" % idx) + str(item)
return s
inputs_str = "\tInputs\n" + format_args(self.inputs)
outputs_str = "\tOutputs\n" + format_args(self.outputs)
return (
"tflite function %s call %s level %d "
"\n\tinputs:\n\t\t%s\n\toutputs:\n\t\t%s" %
(self.function_name, self.uuid, self.level, inputs_str, outputs_str))
def _find_all_hints_in_nodes(nodes):
"""Look at the all the input nodes and return a list of LiteFuncCall objs.
Args:
nodes: A TensorFlow graph_def to look for LiteFuncCalls.
Returns:
a list of `LifeFuncCall` objects in the form
"""
func_calls = _collections.defaultdict(_LiteFuncCall)
for node in nodes:
attr = node.attr
# This is an op hint if it has a FUNCTION_UUID_ATTR, otherwise skip
if (OpHint.FUNCTION_UUID_ATTR not in attr or
not attr[OpHint.FUNCTION_UUID_ATTR].s):
continue
uuid = attr[OpHint.FUNCTION_UUID_ATTR].s
# Start building function
call_def = func_calls[uuid]
call_def.uuid = uuid
call_def.function_name = attr[OpHint.FUNCTION_NAME_ATTR].s
call_def.level = attr[OpHint.FUNCTION_LEVEL_ATTR].i
# Get sorting and aggregation information
sort = (
attr[OpHint.FUNCTION_SORT_INDEX_ATTR].i
if OpHint.FUNCTION_SORT_INDEX_ATTR in attr else None)
if sort == -1:
sort = None
aggregation = None
if OpHint.FUNCTION_AGGREGATE_ATTR in attr:
aggregation = _compat.as_text(attr[OpHint.FUNCTION_AGGREGATE_ATTR].s)
if OpHint.CHILDREN_INPUTS_MAPPINGS in attr:
call_def.children_inputs_mappings = _json.loads(
_compat.as_text(attr[OpHint.CHILDREN_INPUTS_MAPPINGS].s))
# Add the input or output
def put_operand(stuff, index, sort, operand, aggregation):
"""Add a given index into the function structure."""
if sort is None:
stuff[index] = _LiteSingleOperand(operand)
else:
if index not in stuff:
stuff[index] = _LiteAggregateOperand(aggregation)
stuff[index].add(sort, operand)
if OpHint.FUNCTION_INPUT_INDEX_ATTR in attr:
put_operand(call_def.inputs, attr[OpHint.FUNCTION_INPUT_INDEX_ATTR].i,
sort, node, aggregation)
if OpHint.FUNCTION_OUTPUT_INDEX_ATTR in attr:
put_operand(call_def.outputs, attr[OpHint.FUNCTION_OUTPUT_INDEX_ATTR].i,
sort, node, aggregation)
# Remember attributes
for a in attr:
if a.startswith("_tflite_attr_"):
call_def.params[a.replace("_tflite_attr_,", "")] = attr[a].tensor
return func_calls
def _extract_topology_sequence_mapping(nodes):
return dict(
(_tensor_name_base(node.name), idx) for idx, node in enumerate(nodes))
def _find_children_hints_in_while_loop(function_def, nodes_mapping):
"""Find children hints and all nodes inside the while loop.
Args:
function_def: Function def of the while loop.
nodes_mapping: While loop input_arg : real node name.
Returns:
Ordered children hints and all re-mapped nodes inside the while loop.
"""
new_nodes = []
# Make nodes inside function def inputs point to the real nodes.
for node in function_def.node_def:
for i, _ in enumerate(node.input):
if node.input[i] in nodes_mapping:
node.input[i] = nodes_mapping[node.input[i]]
new_nodes.append(_copy.deepcopy(node))
name_to_seq_num = _extract_topology_sequence_mapping(function_def.node_def)
children_hints = _find_all_hints_in_nodes(new_nodes)
children_hints_q = []
# Ordered by the outputs.
for hint in _six.itervalues(children_hints):
_, output_names = hint.flattened_inputs_and_outputs()
seq = name_to_seq_num[output_names[0]]
for output_name in output_names:
seq = min(seq, name_to_seq_num[output_name])
children_hints_q.append((seq, hint))
children_hints_q.sort(key=lambda tup: tup[0])
ordered_children_hints = [x[1] for x in children_hints_q]
return ordered_children_hints, new_nodes
def _find_children_hints(call, graph_def):
"""Find all children hints.
For a given OpHint, we find all children hints inside it, we also copy all the
nodes inside function defs (if applicable) to the original graph_def, they are
returned in a list as well.
Args:
call: Parent OpHint that contains children ophints.
graph_def: Original graph def.
Returns:
Ordered children hints inside the parent ophint; new graph def that contains
nodes inside function defs (if applicable); nodes inside function defs.
"""
name_to_input_name, _, _ = _extract_graph_summary(graph_def)
input_names, output_names = call.flattened_inputs_and_outputs()
reachable_by_input = _bfs_for_reachable_nodes(input_names, name_to_input_name)
reachable_by_output = _bfs_for_reachable_nodes(output_names,
name_to_input_name)
output_nodes_set = set(output_names)
children_hints = []
out = _graph_pb2.GraphDef()
out.library.CopyFrom(graph_def.library)
out.versions.CopyFrom(graph_def.versions)
function_def_nodes = set()
for node in graph_def.node:
out.node.extend([_copy.deepcopy(node)])
n = _tensor_name_base(node.name)
if n in reachable_by_output:
if n not in reachable_by_input and n not in output_nodes_set:
# special handle for while loop function def.
if node.op == "While" or node.op == "StatelessWhile":
body_name = node.attr["body"].func.name
inputs_outside_loop = node.input
for function_def in graph_def.library.function:
if function_def.signature.name == body_name:
function_inputs = function_def.signature.input_arg
assert len(inputs_outside_loop) == len(function_inputs)
nodes_mapping = {}
for i, function_input in enumerate(function_inputs):
nodes_mapping[function_input.name] = inputs_outside_loop[i]
(children_hints_in_loop,
new_nodes) = _find_children_hints_in_while_loop(
function_def, nodes_mapping)
function_def_nodes.update([x.name for x in new_nodes])
children_hints.extend(children_hints_in_loop)
out.node.extend(new_nodes)
return children_hints, out, function_def_nodes
def _tensor_name_base(full_tensor_name):
"""Removes the device assignment code from a tensor.
e.g. _tensor_name_base("foo:3") => "foo"
Args:
full_tensor_name: A tensor name that is annotated with a device placement
(this is what tensor flow introspection gives).
Returns:
A name without any device assignment.
"""
if full_tensor_name.startswith("^"):
return full_tensor_name[1:]
return full_tensor_name.split(":")[0]
def _tensorflow_output_name(tensor_name, output_index):
return tensor_name if output_index == 0 else "%s:%d" % (tensor_name,
output_index)
def _check_subgraph_closed(n, reachable_by_input, input_nodes_set,
name_to_input_name):
"""Checks to make sure node only connects to predecessor graph through inputs.
Args:
n: Node to check
reachable_by_input: Nodes that are reachable by all inputs of subgraph
input_nodes_set: The set of nodes that are "inputs".
name_to_input_name: Maps from name to the list of inputs.
Raises:
TypeError: If the given node uses items past inputs directly.
"""
next_to_visit = [n]
visited = set()
while next_to_visit:
current_node = next_to_visit.pop()
visited.add(current_node)
if (current_node in reachable_by_input and
current_node not in input_nodes_set):
raise TypeError("Node %s uses input %s not in input_nodes." %
(n, current_node))
if current_node not in input_nodes_set:
next_to_visit += [
input_node for input_node in name_to_input_name[current_node]
if input_node not in visited
]
def _convert_single_op_hint_to_stub(call,
graph_def,
function_def_nodes=None,
is_last_run=True):
"""Given a graph_def, converts `call` into a stub and returns a new graph_def.
Args:
call: A single function call to be converted.
graph_def: A graph_def to use as input (that has call obviously).
function_def_nodes: Nodes inside the function def those are not connected to
the graph.
is_last_run: Whether it is the last run for a given pass (for OpHint has
children).
Returns:
A new transformed graph-def that has call as a stub (single op).
Note: after this process, the graph_def can no longer be loaded into
the tensorflow runtime, so all future manipulations are done in graph_def
level.
"""
if function_def_nodes is None:
function_def_nodes = set()
name_to_input_name, name_to_node, name_to_seq_num = _extract_graph_summary(
graph_def)
input_names, output_names = call.flattened_inputs_and_outputs()
reachable_by_input = _bfs_for_reachable_nodes(input_names, name_to_input_name)
reachable_by_output = _bfs_for_reachable_nodes(output_names,
name_to_input_name)
output_nodes_set = set(output_names)
nodes_after_fuse = []
nodes_deleted_by_fuse = set()
# Classify each node. We want to keep everything reachable by input, but
# we don't know if things that are not reachable by output or input (things
# after fusing).
for node in graph_def.node:
n = _tensor_name_base(node.name)
if n in reachable_by_output:
if n not in reachable_by_input and n not in output_nodes_set:
nodes_deleted_by_fuse.add(n)
elif n not in reachable_by_input and n not in function_def_nodes:
# n is a node that after all the fusings, so keep it.
nodes_after_fuse.append(n)
else:
# In the last run, n is a node that is randomly in the graph but not
# connected to the chain of dependencies, we will delete n, otherwise
# we keep them.
if not is_last_run:
nodes_after_fuse.append(n)
# Make a new graphdef with all the pre-input and input nodes
out = _graph_pb2.GraphDef()
reachable_by_input_sorted = sorted(
list(reachable_by_input), key=lambda n: name_to_seq_num[n])
for node in reachable_by_input_sorted:
out.node.extend([_copy.deepcopy(name_to_node[node])])
# Create any stacks to aggregate arguments into to a single input
# i.e. for static_rnn's.
sorted_input_indices = list(call.inputs.keys())
sorted_input_indices.sort()
sorted_output_indices = list(call.outputs.keys())
sorted_output_indices.sort()
new_node = _node_def_pb2.NodeDef()
# Delegate to each operand to produce the proper new input for this stub node.
# In particular, an aggregate input will now be a Pack of some previously
# non-fused things.
optional_input_node = _node_def_pb2.NodeDef()
optional_input_node.name = "Const" + str(_uuid.uuid1().hex)
optional_input_node.op = "Const"
optional_input_node.attr["dtype"].CopyFrom(
_attr_value_pb2.AttrValue(type=_dtypes.float32.as_datatype_enum))
optional_input_node.attr["value"].CopyFrom(
_attr_value_pb2.AttrValue(
tensor=_tensor_util.make_tensor_proto([-1], _dtypes.float32, [1])))
out.node.extend([optional_input_node])
max_index = max(sorted_input_indices) + 1
for cur_index in range(max_index):
if cur_index in sorted_input_indices:
inputs = call.inputs[cur_index]
input_name = inputs.aggregate_and_return_name_for_input(out)
new_node.input.append(input_name)
else:
new_node.input.append(optional_input_node.name)
new_node.attr[OpHint.TFLITE_INPUT_INDICES].list.i.extend(sorted_input_indices)
# Create the function
new_node.op = call.function_name
new_node.name = call.uuid
out.node.extend([new_node])
# Now call each output argument to give them a chance to make the proper
# output type and add it to our new_node.
output_dtypes = []
max_output_index = max(sorted_output_indices) + 1
for cur_index in range(max_output_index):
if cur_index in sorted_output_indices:
output = call.outputs[cur_index]
output_dtype = (
output.aggregate_and_return_name_for_output(new_node.name, cur_index,
out))
else:
output_dtype = optional_input_node.attr["type"].i
output_dtypes.append(output_dtype)
new_node.attr["_output_types"].list.type[:] = output_dtypes
new_node.attr["_output_quantized"].b = False
# Add post output nodes that do not depend on the outputs
for n in nodes_after_fuse:
should_keep = True
for input_name in name_to_input_name[n]:
if input_name in nodes_deleted_by_fuse:
should_keep = False
if should_keep:
out.node.extend([_copy.deepcopy(name_to_node[n])])
# Misc. graph_def data that needs copying.
out.library.CopyFrom(graph_def.library)
out.versions.CopyFrom(graph_def.versions)
return out
def _remove_one_redundant_stack_unstack(in_graph_def):
"""Removes a stack->unstack pattern from in_graph_def in a returned graph.
Args:
in_graph_def: Graph def to use as input.
Returns:
Simplified tuple (graph_def, changed_something) where changed_something
is true if anything was done.
"""
name_to_input_name, name_to_node, name_to_seq_num = _extract_graph_summary(
in_graph_def)
del name_to_seq_num
do_generic_pack_unpack = True
out = _graph_pb2.GraphDef()
out.library.CopyFrom(in_graph_def.library)
out.versions.CopyFrom(in_graph_def.versions)
for n in in_graph_def.node:
node_name = _tensor_name_base(n.name)
if not node_name.startswith("OpHintStack") and not n.op.startswith("Pack"):
continue
next_to_visit = [node_name]
visited = set()
unpack_nodes = set()
pack_node = node_name
# Find a pattern of unstack connected to a stack (with identities
# in between.
matches_pattern = True
is_hint_created_stack = False
while next_to_visit:
current_node_name = next_to_visit[0]
visited.add(current_node_name)
del next_to_visit[0]
node = name_to_node[current_node_name]
is_op_hint_stack = node.name.startswith("OpHintStack")
is_op_hint_unstack = node.name.startswith("OpHintUnstack")
if (node.op == "Identity" or is_op_hint_stack or
(do_generic_pack_unpack and node.op == "Pack")):
is_hint_created_stack |= is_op_hint_stack
next_to_visit += [
input_node for input_node in name_to_input_name[current_node_name]
if input_node not in visited
]
elif (is_op_hint_unstack or
(do_generic_pack_unpack and node.op == "Unpack")):
unpack_nodes.add(node.name)
is_hint_created_stack &= is_op_hint_unstack
else:
matches_pattern = False
break
visited.add(node.name)
if matches_pattern and len(unpack_nodes) == 1:
pack_node = node_name
# Check to see if anyone depends on the intermediate identity or the
# Unstacked form
no_external_dependency = True
for other_n in in_graph_def.node:
if other_n.name in visited:
continue
for input_tensor in name_to_input_name[other_n.name]:
input_op = _tensor_name_base(input_tensor)
if input_op in visited and input_op != pack_node:
no_external_dependency = False
# Proceed with the substitution if the stack/unstack pair was created
# through hints, or that it was not, but nobody is consuming things
# between the stack and unstack.
if is_hint_created_stack or no_external_dependency:
end = unpack_nodes.pop()
end_input = name_to_node[end].input[0]
# All nodes that depend on the final stack need to be redone to use
for other_n in in_graph_def.node:
node_name = _tensor_name_base(other_n.name)
if node_name not in visited:
new_node = _copy.deepcopy(other_n)
new_node.input[:] = [
(end_input if stripped == pack_node else non_stripped)
for stripped, non_stripped in zip(name_to_input_name[node_name],
new_node.input[:])
]
out.node.extend([new_node])
return out, True
return in_graph_def, False
def _remove_redundant_stack_unstack(graph_def):
curr = graph_def
del graph_def
changed_stuff = True
while changed_stuff:
curr, changed_stuff = _remove_one_redundant_stack_unstack(curr)
return curr
def _get_correct_mapping(original_index, nodes):
# Special handle for the index is -1 case.
# If it is -1, return the last index.
if original_index == -1:
node_indices = nodes.keys()
node_indices = sorted(node_indices)
return node_indices[-1]
return original_index
def _convert_op_hints_to_stubs_helper(
graph_def, write_callback=lambda sess, graph_def: None):
"""Converts a graph_def to a new graph_def where all op hints are stubbed.
Args:
graph_def: A graph def that we should convert.
write_callback: A function pointer that can be used to write intermediate
steps of graph transformation (optional).
Returns:
A new stubbed graph_def.
"""
hints = _find_all_hints_in_nodes(graph_def.node)
hints_q = []
for hint in _six.itervalues(hints):
hints_q.append((hint.level, hint.uuid))
hints_q.sort(key=lambda tup: tup[0])
for i in range(len(hints_q) - 1, -1, -1):
level, hint_uuid = hints_q[i]
curr_graph_def = graph_def
del graph_def # prevent using graph_def again (common source of error)
for i in range(len(hints_q) - 1, -1, -1):
level, hint_uuid = hints_q[i]
if level >= 2:
children_hints, curr_graph_def, function_def_nodes = _find_children_hints(
hints[hint_uuid], curr_graph_def)
# pylint: disable=superfluous-parens
assert (len(children_hints) > 0) # pylint: disable=g-explicit-length-test
# pylint: enable=superfluous-parens
# Re-wire the children hints inputs/outputs, so latter child's inputs
# connect to previous child node's outputs.
children_inputs_mappings = hints[hint_uuid].children_inputs_mappings
for j, child_hint in enumerate(children_hints):
if j == 0:
for mapping in children_inputs_mappings["parent_first_child_input"]:
parent_input_index = _get_correct_mapping(
mapping["parent_ophint_input_index"], hints[hint_uuid].inputs)
child_input_index = _get_correct_mapping(
mapping["first_child_ophint_input_index"], child_hint.inputs)
child_hint.inputs[child_input_index] = hints[hint_uuid].inputs[
parent_input_index]
else:
for mapping in children_inputs_mappings[
"internal_children_input_output"]:
input_index = _get_correct_mapping(mapping["child_input_index"],
child_hint.inputs)
output_index = _get_correct_mapping(mapping["child_output_index"],
children_hints[j - 1].outputs)
child_hint.inputs[input_index] = children_hints[
j - 1].outputs[output_index]
if j == len(children_hints) - 1:
for mapping in children_inputs_mappings["parent_last_child_output"]:
parent_output_index = _get_correct_mapping(
mapping["parent_output_index"], hints[hint_uuid].outputs)
child_output_index = _get_correct_mapping(
mapping["child_output_index"], child_hint.outputs)
child_hint.outputs[child_output_index] = hints[hint_uuid].outputs[
parent_output_index]
for j, child_hint in enumerate(children_hints):
curr_graph_def = _convert_single_op_hint_to_stub(
child_hint, curr_graph_def, function_def_nodes,
j == len(children_hints) - 1)
else:
curr_graph_def = _convert_single_op_hint_to_stub(hints[hint_uuid],
curr_graph_def)
write_callback(curr_graph_def, "initial")
# The stubbing process can create stacks/unstacks in the case of LSTMs
# remove them.
curr_graph_def = _remove_redundant_stack_unstack(curr_graph_def)
return curr_graph_def
def find_all_hinted_output_nodes(session=None, graph_def=None):
"""Find all Ophints output nodes in the graph.
This is used to get all the output nodes those are ophinted, it is important
for operation like convert_variables_to_constants keep all ophints structure.
Note: only one of session or graph_def should be used, not both.
Why this can be useful? Some TensorFlow ops (e.g. bidirectional rnn), can
generate multiple outputs for unfused subgraph. If not all output nodes are
consumed, graph optimization can potentially drop the unused nodes and cause
ophints in an invalid states (due to missing ophinted output nodes). So it's
important for us to find all those hinted output nodes and make sure they're
not discarded away.
Args:
session: A TensorFlow session that contains the graph to convert.
graph_def: A graph def that we should convert.
Returns:
A list of OpHints output nodes.
Raises:
ValueError: If both session and graph_def are provided.
"""
if session is not None and graph_def is not None:
raise ValueError("Provide only one of session and graph_def.")
hinted_outputs_nodes = []
if session is not None:
hints = _find_all_hints_in_nodes(session.graph_def.node)
elif graph_def is not None:
hints = _find_all_hints_in_nodes(graph_def.node)
for hint in _six.itervalues(hints):
_, output_nodes = hint.flattened_inputs_and_outputs()
hinted_outputs_nodes.extend(output_nodes)
return hinted_outputs_nodes
def is_ophint_converted(graph_def):
if graph_def is None:
raise ValueError("Must provide the graph_def.")
ophint_converted = False
for node in graph_def.node:
attr = node.attr
if OpHint.FUNCTION_INPUT_INDEX_ATTR in attr:
ophint_converted = True
break
return ophint_converted
@_tf_export(v1=["lite.experimental.convert_op_hints_to_stubs"])
@_deprecation.deprecated(
None,
"Please follow instructions under "
"https://www.tensorflow.org/lite/convert/operation_fusion for operation"
"fusion in tflite."
)
def convert_op_hints_to_stubs(session=None,
graph_def=None,
write_callback=lambda graph_def, comments: None):
"""Converts a graphdef with LiteOp hints into stub operations.
This is used to prepare for toco conversion of complex intrinsic usages.
Note: only one of session or graph_def should be used, not both.
Args:
session: A TensorFlow session that contains the graph to convert.
graph_def: A graph def that we should convert.
write_callback: A function pointer that can be used to write intermediate
steps of graph transformation (optional).
Returns:
A new graphdef with all ops contained in OpHints being replaced by
a single op call with the right parameters.
Raises:
ValueError: If both session and graph_def are provided.
"""
if session is not None and graph_def is not None:
raise ValueError("Provide only one of session and graph_def.")
if session is not None:
return _convert_op_hints_to_stubs_helper(session.graph_def, write_callback)
elif graph_def is not None:
return _convert_op_hints_to_stubs_helper(graph_def, write_callback)
else:
raise ValueError("Must specify session or graph_def as input.")
_allowed_symbols = [
"OpHint",
"convert_op_hints_to_stubs",
"convert_op_hints_to_stubs_new",
"find_all_hinted_output_nodes",
"is_ophint_converted",
]
remove_undocumented(__name__, _allowed_symbols)
|
apache-2.0
|
mortada/scipy
|
scipy/sparse/coo.py
|
17
|
18025
|
""" A sparse matrix in COOrdinate or 'triplet' format"""
from __future__ import division, print_function, absolute_import
__docformat__ = "restructuredtext en"
__all__ = ['coo_matrix', 'isspmatrix_coo']
from warnings import warn
import numpy as np
from scipy._lib.six import xrange, zip as izip
from ._sparsetools import coo_tocsr, coo_todense, coo_matvec
from .base import isspmatrix
from .data import _data_matrix, _minmax_mixin
from .sputils import (upcast, upcast_char, to_native, isshape, getdtype,
isintlike, get_index_dtype, downcast_intp_index)
class coo_matrix(_data_matrix, _minmax_mixin):
"""
A sparse matrix in COOrdinate format.
Also known as the 'ijv' or 'triplet' format.
This can be instantiated in several ways:
coo_matrix(D)
with a dense matrix D
coo_matrix(S)
with another sparse matrix S (equivalent to S.tocoo())
coo_matrix((M, N), [dtype])
to construct an empty matrix with shape (M, N)
dtype is optional, defaulting to dtype='d'.
coo_matrix((data, (i, j)), [shape=(M, N)])
to construct from three arrays:
1. data[:] the entries of the matrix, in any order
2. i[:] the row indices of the matrix entries
3. j[:] the column indices of the matrix entries
Where ``A[i[k], j[k]] = data[k]``. When shape is not
specified, it is inferred from the index arrays
Attributes
----------
dtype : dtype
Data type of the matrix
shape : 2-tuple
Shape of the matrix
ndim : int
Number of dimensions (this is always 2)
nnz
Number of nonzero elements
data
COO format data array of the matrix
row
COO format row index array of the matrix
col
COO format column index array of the matrix
Notes
-----
Sparse matrices can be used in arithmetic operations: they support
addition, subtraction, multiplication, division, and matrix power.
Advantages of the COO format
- facilitates fast conversion among sparse formats
- permits duplicate entries (see example)
- very fast conversion to and from CSR/CSC formats
Disadvantages of the COO format
- does not directly support:
+ arithmetic operations
+ slicing
Intended Usage
- COO is a fast format for constructing sparse matrices
- Once a matrix has been constructed, convert to CSR or
CSC format for fast arithmetic and matrix vector operations
- By default when converting to CSR or CSC format, duplicate (i,j)
entries will be summed together. This facilitates efficient
construction of finite element matrices and the like. (see example)
Examples
--------
>>> from scipy.sparse import coo_matrix
>>> coo_matrix((3, 4), dtype=np.int8).toarray()
array([[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0]], dtype=int8)
>>> row = np.array([0, 3, 1, 0])
>>> col = np.array([0, 3, 1, 2])
>>> data = np.array([4, 5, 7, 9])
>>> coo_matrix((data, (row, col)), shape=(4, 4)).toarray()
array([[4, 0, 9, 0],
[0, 7, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 5]])
>>> # example with duplicates
>>> row = np.array([0, 0, 1, 3, 1, 0, 0])
>>> col = np.array([0, 2, 1, 3, 1, 0, 0])
>>> data = np.array([1, 1, 1, 1, 1, 1, 1])
>>> coo_matrix((data, (row, col)), shape=(4, 4)).toarray()
array([[3, 0, 1, 0],
[0, 2, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 1]])
"""
def __init__(self, arg1, shape=None, dtype=None, copy=False):
_data_matrix.__init__(self)
if isinstance(arg1, tuple):
if isshape(arg1):
M, N = arg1
self.shape = (M,N)
idx_dtype = get_index_dtype(maxval=max(M, N))
self.row = np.array([], dtype=idx_dtype)
self.col = np.array([], dtype=idx_dtype)
self.data = np.array([], getdtype(dtype, default=float))
self.has_canonical_format = True
else:
try:
obj, ij = arg1
except:
raise TypeError('invalid input format')
try:
if len(ij) != 2:
raise TypeError
except TypeError:
raise TypeError('invalid input format')
self.row = np.array(ij[0], copy=copy)
self.col = np.array(ij[1], copy=copy)
self.data = np.array(obj, copy=copy)
if shape is None:
if len(self.row) == 0 or len(self.col) == 0:
raise ValueError('cannot infer dimensions from zero '
'sized index arrays')
M = self.row.max() + 1
N = self.col.max() + 1
self.shape = (M, N)
else:
# Use 2 steps to ensure shape has length 2.
M, N = shape
self.shape = (M, N)
idx_dtype = get_index_dtype(maxval=max(self.shape))
self.row = self.row.astype(idx_dtype)
self.col = self.col.astype(idx_dtype)
self.has_canonical_format = False
elif arg1 is None:
# Initialize an empty matrix.
if not isinstance(shape, tuple) or not isintlike(shape[0]):
raise TypeError('dimensions not understood')
warn('coo_matrix(None, shape=(M,N)) is deprecated, '
'use coo_matrix( (M,N) ) instead', DeprecationWarning)
idx_dtype = get_index_dtype(maxval=max(M, N))
self.shape = shape
self.data = np.array([], getdtype(dtype, default=float))
self.row = np.array([], dtype=idx_dtype)
self.col = np.array([], dtype=idx_dtype)
self.has_canonical_format = True
else:
if isspmatrix(arg1):
if isspmatrix_coo(arg1) and copy:
self.row = arg1.row.copy()
self.col = arg1.col.copy()
self.data = arg1.data.copy()
self.shape = arg1.shape
else:
coo = arg1.tocoo()
self.row = coo.row
self.col = coo.col
self.data = coo.data
self.shape = coo.shape
self.has_canonical_format = False
else:
#dense argument
try:
M = np.atleast_2d(np.asarray(arg1))
except:
raise TypeError('invalid input format')
if M.ndim != 2:
raise TypeError('expected dimension <= 2 array or matrix')
else:
self.shape = M.shape
self.row, self.col = M.nonzero()
self.data = M[self.row, self.col]
self.has_canonical_format = True
if dtype is not None:
self.data = self.data.astype(dtype)
self._check()
def getnnz(self, axis=None):
"""Get the count of explicitly-stored values (nonzeros)
Parameters
----------
axis : None, 0, or 1
Select between the number of values across the whole matrix, in
each column, or in each row.
"""
if axis is None:
nnz = len(self.data)
if nnz != len(self.row) or nnz != len(self.col):
raise ValueError('row, column, and data array must all be the '
'same length')
if self.data.ndim != 1 or self.row.ndim != 1 or \
self.col.ndim != 1:
raise ValueError('row, column, and data arrays must be 1-D')
return int(nnz)
if axis < 0:
axis += 2
if axis == 0:
return np.bincount(downcast_intp_index(self.col),
minlength=self.shape[1])
elif axis == 1:
return np.bincount(downcast_intp_index(self.row),
minlength=self.shape[0])
else:
raise ValueError('axis out of bounds')
nnz = property(fget=getnnz)
def _check(self):
""" Checks data structure for consistency """
nnz = self.nnz
# index arrays should have integer data types
if self.row.dtype.kind != 'i':
warn("row index array has non-integer dtype (%s) "
% self.row.dtype.name)
if self.col.dtype.kind != 'i':
warn("col index array has non-integer dtype (%s) "
% self.col.dtype.name)
idx_dtype = get_index_dtype(maxval=max(self.shape))
self.row = np.asarray(self.row, dtype=idx_dtype)
self.col = np.asarray(self.col, dtype=idx_dtype)
self.data = to_native(self.data)
if nnz > 0:
if self.row.max() >= self.shape[0]:
raise ValueError('row index exceeds matrix dimensions')
if self.col.max() >= self.shape[1]:
raise ValueError('column index exceeds matrix dimensions')
if self.row.min() < 0:
raise ValueError('negative row index found')
if self.col.min() < 0:
raise ValueError('negative column index found')
def transpose(self, copy=False):
M,N = self.shape
return coo_matrix((self.data, (self.col, self.row)), shape=(N,M), copy=copy)
def toarray(self, order=None, out=None):
"""See the docstring for `spmatrix.toarray`."""
B = self._process_toarray_args(order, out)
fortran = int(B.flags.f_contiguous)
if not fortran and not B.flags.c_contiguous:
raise ValueError("Output array must be C or F contiguous")
M,N = self.shape
coo_todense(M, N, self.nnz, self.row, self.col, self.data,
B.ravel('A'), fortran)
return B
def tocsc(self):
"""Return a copy of this matrix in Compressed Sparse Column format
Duplicate entries will be summed together.
Examples
--------
>>> from numpy import array
>>> from scipy.sparse import coo_matrix
>>> row = array([0, 0, 1, 3, 1, 0, 0])
>>> col = array([0, 2, 1, 3, 1, 0, 0])
>>> data = array([1, 1, 1, 1, 1, 1, 1])
>>> A = coo_matrix((data, (row, col)), shape=(4, 4)).tocsc()
>>> A.toarray()
array([[3, 0, 1, 0],
[0, 2, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 1]])
"""
from .csc import csc_matrix
if self.nnz == 0:
return csc_matrix(self.shape, dtype=self.dtype)
else:
M,N = self.shape
idx_dtype = get_index_dtype((self.col, self.row),
maxval=max(self.nnz, M))
indptr = np.empty(N + 1, dtype=idx_dtype)
indices = np.empty(self.nnz, dtype=idx_dtype)
data = np.empty(self.nnz, dtype=upcast(self.dtype))
coo_tocsr(N, M, self.nnz,
self.col.astype(idx_dtype),
self.row.astype(idx_dtype),
self.data,
indptr, indices, data)
A = csc_matrix((data, indices, indptr), shape=self.shape)
A.sum_duplicates()
return A
def tocsr(self):
"""Return a copy of this matrix in Compressed Sparse Row format
Duplicate entries will be summed together.
Examples
--------
>>> from numpy import array
>>> from scipy.sparse import coo_matrix
>>> row = array([0, 0, 1, 3, 1, 0, 0])
>>> col = array([0, 2, 1, 3, 1, 0, 0])
>>> data = array([1, 1, 1, 1, 1, 1, 1])
>>> A = coo_matrix((data, (row, col)), shape=(4, 4)).tocsr()
>>> A.toarray()
array([[3, 0, 1, 0],
[0, 2, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 1]])
"""
from .csr import csr_matrix
if self.nnz == 0:
return csr_matrix(self.shape, dtype=self.dtype)
else:
M,N = self.shape
idx_dtype = get_index_dtype((self.row, self.col),
maxval=max(self.nnz, N))
indptr = np.empty(M + 1, dtype=idx_dtype)
indices = np.empty(self.nnz, dtype=idx_dtype)
data = np.empty(self.nnz, dtype=upcast(self.dtype))
coo_tocsr(M, N, self.nnz,
self.row.astype(idx_dtype),
self.col.astype(idx_dtype),
self.data,
indptr,
indices,
data)
A = csr_matrix((data, indices, indptr), shape=self.shape)
A.sum_duplicates()
return A
def tocoo(self, copy=False):
if copy:
return self.copy()
else:
return self
def todia(self):
from .dia import dia_matrix
ks = self.col - self.row # the diagonal for each nonzero
diags = np.unique(ks)
if len(diags) > 100:
#probably undesired, should we do something?
#should todia() have a maxdiags parameter?
pass
#initialize and fill in data array
if self.data.size == 0:
data = np.zeros((0, 0), dtype=self.dtype)
else:
data = np.zeros((len(diags), self.col.max()+1), dtype=self.dtype)
data[np.searchsorted(diags,ks), self.col] = self.data
return dia_matrix((data,diags), shape=self.shape)
def todok(self):
from .dok import dok_matrix
self.sum_duplicates()
dok = dok_matrix((self.shape), dtype=self.dtype)
dok.update(izip(izip(self.row,self.col),self.data))
return dok
def diagonal(self):
# Could be rewritten without the python loop.
# Data entries at the same (row, col) are summed.
n = min(self.shape)
ndata = self.data.shape[0]
d = np.zeros(n, dtype=self.dtype)
for i in xrange(ndata):
r = self.row[i]
if r == self.col[i]:
d[r] += self.data[i]
return d
diagonal.__doc__ = _data_matrix.diagonal.__doc__
def _setdiag(self, values, k):
M, N = self.shape
if values.ndim and not len(values):
return
idx_dtype = self.row.dtype
# Determine which triples to keep and where to put the new ones.
full_keep = self.col - self.row != k
if k < 0:
max_index = min(M+k, N)
if values.ndim:
max_index = min(max_index, len(values))
keep = np.logical_or(full_keep, self.col >= max_index)
new_row = np.arange(-k, -k + max_index, dtype=idx_dtype)
new_col = np.arange(max_index, dtype=idx_dtype)
else:
max_index = min(M, N-k)
if values.ndim:
max_index = min(max_index, len(values))
keep = np.logical_or(full_keep, self.row >= max_index)
new_row = np.arange(max_index, dtype=idx_dtype)
new_col = np.arange(k, k + max_index, dtype=idx_dtype)
# Define the array of data consisting of the entries to be added.
if values.ndim:
new_data = values[:max_index]
else:
new_data = np.empty(max_index, dtype=self.dtype)
new_data[:] = values
# Update the internal structure.
self.row = np.concatenate((self.row[keep], new_row))
self.col = np.concatenate((self.col[keep], new_col))
self.data = np.concatenate((self.data[keep], new_data))
self.has_canonical_format = False
# needed by _data_matrix
def _with_data(self,data,copy=True):
"""Returns a matrix with the same sparsity structure as self,
but with different data. By default the index arrays
(i.e. .row and .col) are copied.
"""
if copy:
return coo_matrix((data, (self.row.copy(), self.col.copy())),
shape=self.shape, dtype=data.dtype)
else:
return coo_matrix((data, (self.row, self.col)),
shape=self.shape, dtype=data.dtype)
def sum_duplicates(self):
"""Eliminate duplicate matrix entries by adding them together
This is an *in place* operation
"""
if self.has_canonical_format or len(self.data) == 0:
return
order = np.lexsort((self.row,self.col))
self.row = self.row[order]
self.col = self.col[order]
self.data = self.data[order]
unique_mask = ((self.row[1:] != self.row[:-1]) |
(self.col[1:] != self.col[:-1]))
unique_mask = np.append(True, unique_mask)
self.row = self.row[unique_mask]
self.col = self.col[unique_mask]
unique_inds, = np.nonzero(unique_mask)
self.data = np.add.reduceat(self.data, unique_inds, dtype=self.dtype)
self.has_canonical_format = True
###########################
# Multiplication handlers #
###########################
def _mul_vector(self, other):
#output array
result = np.zeros(self.shape[0], dtype=upcast_char(self.dtype.char,
other.dtype.char))
coo_matvec(self.nnz, self.row, self.col, self.data, other, result)
return result
def _mul_multivector(self, other):
return np.hstack([self._mul_vector(col).reshape(-1,1) for col in other.T])
def isspmatrix_coo(x):
return isinstance(x, coo_matrix)
|
bsd-3-clause
|
aviarypl/mozilla-l10n-addons-server
|
src/olympia/lib/git.py
|
2
|
12250
|
# -*- coding: utf-8 -*-
from collections import namedtuple
import uuid
import os
import shutil
import tempfile
import pygit2
from django.conf import settings
from django.utils import translation
from django.utils.functional import cached_property
import olympia.core.logger
from olympia import amo
from olympia.files.utils import id_to_path, extract_extension_to_dest
log = olympia.core.logger.getLogger('z.git_storage')
# A mixture of Blob and TreeEntry
TreeEntryWrapper = namedtuple('Entry', 'tree_entry, path, blob')
BRANCHES = {
amo.RELEASE_CHANNEL_LISTED: 'listed',
amo.RELEASE_CHANNEL_UNLISTED: 'unlisted'
}
class TemporaryWorktree(object):
def __init__(self, repository):
self.git_repository = repository
self.name = uuid.uuid4().hex
self.temp_directory = tempfile.mkdtemp(dir=settings.TMP_PATH)
self.path = os.path.join(self.temp_directory, self.name)
self.obj = None
self.repo = None
def __enter__(self):
self.obj = self.git_repository.add_worktree(self.name, self.path)
self.repo = pygit2.Repository(self.obj.path)
# Clean the workdir (of the newly created worktree)
for entry in self.repo[self.repo.head.target].tree:
path = os.path.join(self.path, entry.name)
if os.path.isfile(path):
os.unlink(path)
else:
shutil.rmtree(path)
return self
def __exit__(self, type, value, traceback):
# Remove temp directory
shutil.rmtree(self.temp_directory)
# Prune temp worktree
if self.obj is not None:
self.obj.prune(True)
# Remove worktree ref in upstream repository
self.git_repository.lookup_branch(self.name).delete()
class AddonGitRepository(object):
def __init__(self, addon_or_id, package_type='package'):
from olympia.addons.models import Addon
assert package_type in ('package', 'source')
# Always enforce the search path being set to our ROOT
# setting. This is sad, libgit tries to fetch the global git
# config file (~/.gitconfig) and falls over permission errors while
# doing so in our web-environment.
# We are setting this here to avoid creating a unnecessary global
# state but since this is overwriting a global value in pygit2 it
# affects all pygit2 calls.
# https://github.com/libgit2/pygit2/issues/339
# https://github.com/libgit2/libgit2/issues/2122
git_home = settings.ROOT
pygit2.settings.search_path[pygit2.GIT_CONFIG_LEVEL_GLOBAL] = git_home
addon_id = (
addon_or_id.pk
if isinstance(addon_or_id, Addon)
else addon_or_id)
self.git_repository_path = os.path.join(
settings.GIT_FILE_STORAGE_PATH,
id_to_path(addon_id),
package_type)
@property
def is_extracted(self):
return os.path.exists(self.git_repository_path)
@cached_property
def git_repository(self):
if not self.is_extracted:
os.makedirs(self.git_repository_path)
git_repository = pygit2.init_repository(
path=self.git_repository_path,
bare=False)
# Write first commit to 'master' to act as HEAD
tree = self.git_repository.TreeBuilder().write()
git_repository.create_commit(
'HEAD', # ref
self.get_author(), # author, using addons-robot
self.get_author(), # commiter, using addons-robot
'Initializing repository', # message
tree, # tree
[]) # parents
log.debug('Initialized git repository {path}'.format(
path=self.git_repository_path))
else:
git_repository = pygit2.Repository(self.git_repository_path)
return git_repository
@classmethod
def extract_and_commit_from_version(cls, version, author=None):
"""Extract the XPI from `version` and comit it.
This is doing the following:
* Create a temporary `git worktree`_
* Remove all files in that worktree
* Extract the xpi behind `version` into the worktree
* Commit all files
Kinda like doing::
$ workdir_name=$(uuid)
$ mkdir /tmp/$workdir_name
$ git worktree add /tmp/$workdir_name
Preparing worktree (new branch 'af4172e4-d8c7…')
HEAD is now at 8c5223e Initial commit
$ git worktree list
/tmp/addon-repository 8c5223e [master]
/tmp/af4172e4-d8c7-4486-a5f2-316458da91ff 8c5223e [af4172e4-d8c7…]
$ unzip dingrafowl-falcockalo-lockapionk.zip -d /tmp/$workdir_name
Archive: dingrafowl-falcockalo-lockapionk.zip
extracting: /tmp/af4172e4-d8c7…/manifest.json
$ pushd /tmp/$workdir_name
/tmp/af4172e4-d8c7-4486-a5f2-316458da91ff /tmp/addon-repository
$ git status
On branch af4172e4-d8c7-4486-a5f2-316458da91ff
Untracked files:
(use "git add <file>..." to include in what will be committed)
manifest.json
$ git add *
$ git commit -a -m "Creating new version"
[af4172e4-d8c7-4486-a5f2-316458da91ff c4285f8] Creating new version
…
$ cd addon-repository
$ git checkout -b listed
Switched to a new branch 'listed'
# We don't technically do a full cherry-pick but it's close enough
# and does almost what we do. We are technically commiting
# directly on top of the branch as if we checked out the branch
# in the worktree (via -b) but pygit doesn't properly support that
# so we "simply" set the parents correctly.
$ git cherry-pick c4285f8
[listed a4d0f63] Creating new version…
This ignores the fact that there may be a race-condition of two
versions being created at the same time. Since all relevant file based
work is done in a temporary worktree there won't be any conflicts and
usually the last upload simply wins the race and we're setting the
HEAD of the branch (listed/unlisted) to that specific commit.
.. _`git worktree`: https://git-scm.com/docs/git-worktree
"""
current_language = translation.get_language()
try:
# Make sure we're always using the en-US locale by default
# to have unified commit messages and avoid any __str__
# to give us wrong results
translation.activate('en-US')
repo = cls(version.addon.id)
file_obj = version.all_files[0]
branch = repo.find_or_create_branch(BRANCHES[version.channel])
commit = repo._commit_through_worktree(
path=file_obj.current_file_path,
message=(
'Create new version {version} ({version_id}) for '
'{addon} from {file_obj}'.format(
version=repr(version),
version_id=version.id,
addon=repr(version.addon),
file_obj=repr(file_obj))),
author=author,
branch=branch)
# Set the latest git hash on the related version.
version.update(git_hash=commit.hex)
finally:
translation.activate(current_language)
return repo
@classmethod
def extract_and_commit_source_from_version(cls, version, author=None):
"""Extract the source file from `version` and comit it.
This is doing the following:
* Create a temporary `git worktree`_
* Remove all files in that worktree
* Extract the xpi behind `version` into the worktree
* Commit all files
See `extract_and_commit_from_version` for more details.
"""
repo = cls(version.addon.id, package_type='source')
branch = repo.find_or_create_branch(BRANCHES[version.channel])
commit = repo._commit_through_worktree(
path=version.source.path,
message=(
'Create new version {version} ({version_id}) for '
'{addon} from source file'.format(
version=repr(version),
version_id=version.id,
addon=repr(version.addon))),
author=author,
branch=branch)
# Set the latest git hash on the related version.
version.update(source_git_hash=commit.hex)
return repo
def get_author(self, user=None):
if user is not None:
author_name = user.name
author_email = user.email
else:
author_name = 'Mozilla Add-ons Robot'
author_email = '[email protected]'
return pygit2.Signature(name=author_name, email=author_email)
def find_or_create_branch(self, name):
"""Lookup or create the branch named `name`"""
branch = self.git_repository.branches.get(name)
if branch is None:
branch = self.git_repository.create_branch(
name, self.git_repository.head.peel())
return branch
def _commit_through_worktree(self, path, message, author, branch):
"""
Create a temporary worktree that we can use to unpack the extension
without disturbing the current git workdir since it creates a new
temporary directory where we extract to.
"""
with TemporaryWorktree(self.git_repository) as worktree:
# Now extract the extension to the workdir
extract_extension_to_dest(
source=path,
dest=worktree.path,
force_fsync=True)
# Stage changes, `TemporaryWorktree` always cleans the whole
# directory so we can simply add all changes and have the correct
# state.
# Add all changes to the index (git add --all ...)
worktree.repo.index.add_all()
worktree.repo.index.write()
tree = worktree.repo.index.write_tree()
# Now create an commit directly on top of the respective branch
oid = worktree.repo.create_commit(
None,
# author, using the actual uploading user
self.get_author(author),
# committer, using addons-robot because that's the user
# actually doing the commit.
self.get_author(), # commiter, using addons-robot
message,
tree,
# Set the current branch HEAD as the parent of this commit
# so that it'll go straight into the branches commit log
[branch.target]
)
# Fetch the commit object
commit = worktree.repo.get(oid)
# And set the commit we just created as HEAD of the relevant
# branch, and updates the reflog. This does not require any
# merges.
branch.set_target(commit.hex)
return commit
def iter_tree(self, tree):
"""Recursively iterate through a tree.
This includes the directories.
"""
for tree_entry in tree:
tree_or_blob = self.git_repository[tree_entry.oid]
if isinstance(tree_or_blob, pygit2.Tree):
yield TreeEntryWrapper(
blob=None,
tree_entry=tree_entry,
path=tree_entry.name)
for child in self.iter_tree(tree_or_blob):
yield TreeEntryWrapper(
blob=child.blob,
tree_entry=child.tree_entry,
path=os.path.join(tree_entry.name, child.path))
else:
yield TreeEntryWrapper(
blob=tree_or_blob,
tree_entry=tree_entry,
path=tree_entry.name)
|
bsd-3-clause
|
zrax/moul-scripts
|
Python/system/site.py
|
10
|
19784
|
"""Append module search paths for third-party packages to sys.path.
****************************************************************
* This module is automatically imported during initialization. *
****************************************************************
In earlier versions of Python (up to 1.5a3), scripts or modules that
needed to use site-specific modules would place ``import site''
somewhere near the top of their code. Because of the automatic
import, this is no longer necessary (but code that does it still
works).
This will append site-specific paths to the module search path. On
Unix (including Mac OSX), it starts with sys.prefix and
sys.exec_prefix (if different) and appends
lib/python<version>/site-packages as well as lib/site-python.
On other platforms (such as Windows), it tries each of the
prefixes directly, as well as with lib/site-packages appended. The
resulting directories, if they exist, are appended to sys.path, and
also inspected for path configuration files.
A path configuration file is a file whose name has the form
<package>.pth; its contents are additional directories (one per line)
to be added to sys.path. Non-existing directories (or
non-directories) are never added to sys.path; no directory is added to
sys.path more than once. Blank lines and lines beginning with
'#' are skipped. Lines starting with 'import' are executed.
For example, suppose sys.prefix and sys.exec_prefix are set to
/usr/local and there is a directory /usr/local/lib/python2.5/site-packages
with three subdirectories, foo, bar and spam, and two path
configuration files, foo.pth and bar.pth. Assume foo.pth contains the
following:
# foo package configuration
foo
bar
bletch
and bar.pth contains:
# bar package configuration
bar
Then the following directories are added to sys.path, in this order:
/usr/local/lib/python2.5/site-packages/bar
/usr/local/lib/python2.5/site-packages/foo
Note that bletch is omitted because it doesn't exist; bar precedes foo
because bar.pth comes alphabetically before foo.pth; and spam is
omitted because it is not mentioned in either path configuration file.
After these path manipulations, an attempt is made to import a module
named sitecustomize, which can perform arbitrary additional
site-specific customizations. If this import fails with an
ImportError exception, it is silently ignored.
"""
import sys
import os
import __builtin__
# Prefixes for site-packages; add additional prefixes like /usr/local here
PREFIXES = [sys.prefix, sys.exec_prefix]
# Enable per user site-packages directory
# set it to False to disable the feature or True to force the feature
ENABLE_USER_SITE = None
# for distutils.commands.install
# These values are initialized by the getuserbase() and getusersitepackages()
# functions, through the main() function when Python starts.
USER_SITE = None
USER_BASE = None
def makepath(*paths):
dir = os.path.join(*paths)
try:
dir = os.path.abspath(dir)
except OSError:
pass
return dir, os.path.normcase(dir)
def abs__file__():
"""Set all module' __file__ attribute to an absolute path"""
for m in sys.modules.values():
if hasattr(m, '__loader__'):
continue # don't mess with a PEP 302-supplied __file__
try:
m.__file__ = os.path.abspath(m.__file__)
except (AttributeError, OSError):
pass
def removeduppaths():
""" Remove duplicate entries from sys.path along with making them
absolute"""
# This ensures that the initial path provided by the interpreter contains
# only absolute pathnames, even if we're running from the build directory.
L = []
known_paths = set()
for dir in sys.path:
# Filter out duplicate paths (on case-insensitive file systems also
# if they only differ in case); turn relative paths into absolute
# paths.
dir, dircase = makepath(dir)
if not dircase in known_paths:
L.append(dir)
known_paths.add(dircase)
sys.path[:] = L
return known_paths
# XXX This should not be part of site.py, since it is needed even when
# using the -S option for Python. See http://www.python.org/sf/586680
def addbuilddir():
"""Append ./build/lib.<platform> in case we're running in the build dir
(especially for Guido :-)"""
from sysconfig import get_platform
s = "build/lib.%s-%.3s" % (get_platform(), sys.version)
if hasattr(sys, 'gettotalrefcount'):
s += '-pydebug'
s = os.path.join(os.path.dirname(sys.path.pop()), s)
sys.path.append(s)
def _init_pathinfo():
"""Return a set containing all existing directory entries from sys.path"""
d = set()
for dir in sys.path:
try:
if os.path.isdir(dir):
dir, dircase = makepath(dir)
d.add(dircase)
except TypeError:
continue
return d
def addpackage(sitedir, name, known_paths):
"""Process a .pth file within the site-packages directory:
For each line in the file, either combine it with sitedir to a path
and add that to known_paths, or execute it if it starts with 'import '.
"""
if known_paths is None:
_init_pathinfo()
reset = 1
else:
reset = 0
fullname = os.path.join(sitedir, name)
try:
f = open(fullname, "rU")
except IOError:
return
with f:
for line in f:
if line.startswith("#"):
continue
if line.startswith(("import ", "import\t")):
exec line
continue
line = line.rstrip()
dir, dircase = makepath(sitedir, line)
if not dircase in known_paths and os.path.exists(dir):
sys.path.append(dir)
known_paths.add(dircase)
if reset:
known_paths = None
return known_paths
def addsitedir(sitedir, known_paths=None):
"""Add 'sitedir' argument to sys.path if missing and handle .pth files in
'sitedir'"""
if known_paths is None:
known_paths = _init_pathinfo()
reset = 1
else:
reset = 0
sitedir, sitedircase = makepath(sitedir)
if not sitedircase in known_paths:
sys.path.append(sitedir) # Add path component
try:
names = os.listdir(sitedir)
except os.error:
return
dotpth = os.extsep + "pth"
names = [name for name in names if name.endswith(dotpth)]
for name in sorted(names):
addpackage(sitedir, name, known_paths)
if reset:
known_paths = None
return known_paths
def check_enableusersite():
"""Check if user site directory is safe for inclusion
The function tests for the command line flag (including environment var),
process uid/gid equal to effective uid/gid.
None: Disabled for security reasons
False: Disabled by user (command line option)
True: Safe and enabled
"""
if sys.flags.no_user_site:
return False
if hasattr(os, "getuid") and hasattr(os, "geteuid"):
# check process uid == effective uid
if os.geteuid() != os.getuid():
return None
if hasattr(os, "getgid") and hasattr(os, "getegid"):
# check process gid == effective gid
if os.getegid() != os.getgid():
return None
return True
def getuserbase():
"""Returns the `user base` directory path.
The `user base` directory can be used to store data. If the global
variable ``USER_BASE`` is not initialized yet, this function will also set
it.
"""
global USER_BASE
if USER_BASE is not None:
return USER_BASE
from sysconfig import get_config_var
USER_BASE = get_config_var('userbase')
return USER_BASE
def getusersitepackages():
"""Returns the user-specific site-packages directory path.
If the global variable ``USER_SITE`` is not initialized yet, this
function will also set it.
"""
global USER_SITE
user_base = getuserbase() # this will also set USER_BASE
if USER_SITE is not None:
return USER_SITE
from sysconfig import get_path
import os
if sys.platform == 'darwin':
from sysconfig import get_config_var
if get_config_var('PYTHONFRAMEWORK'):
USER_SITE = get_path('purelib', 'osx_framework_user')
return USER_SITE
USER_SITE = get_path('purelib', '%s_user' % os.name)
return USER_SITE
def addusersitepackages(known_paths):
"""Add a per user site-package to sys.path
Each user has its own python directory with site-packages in the
home directory.
"""
# get the per user site-package path
# this call will also make sure USER_BASE and USER_SITE are set
user_site = getusersitepackages()
if ENABLE_USER_SITE and os.path.isdir(user_site):
addsitedir(user_site, known_paths)
return known_paths
def getsitepackages():
"""Returns a list containing all global site-packages directories
(and possibly site-python).
For each directory present in the global ``PREFIXES``, this function
will find its `site-packages` subdirectory depending on the system
environment, and will return a list of full paths.
"""
sitepackages = []
seen = set()
for prefix in PREFIXES:
if not prefix or prefix in seen:
continue
seen.add(prefix)
if sys.platform in ('os2emx', 'riscos'):
sitepackages.append(os.path.join(prefix, "Lib", "site-packages"))
elif os.sep == '/':
sitepackages.append(os.path.join(prefix, "lib",
"python" + sys.version[:3],
"site-packages"))
sitepackages.append(os.path.join(prefix, "lib", "site-python"))
else:
sitepackages.append(prefix)
sitepackages.append(os.path.join(prefix, "lib", "site-packages"))
if sys.platform == "darwin":
# for framework builds *only* we add the standard Apple
# locations.
from sysconfig import get_config_var
framework = get_config_var("PYTHONFRAMEWORK")
if framework and "/%s.framework/"%(framework,) in prefix:
sitepackages.append(
os.path.join("/Library", framework,
sys.version[:3], "site-packages"))
return sitepackages
def addsitepackages(known_paths):
"""Add site-packages (and possibly site-python) to sys.path"""
for sitedir in getsitepackages():
if os.path.isdir(sitedir):
addsitedir(sitedir, known_paths)
return known_paths
def setBEGINLIBPATH():
"""The OS/2 EMX port has optional extension modules that do double duty
as DLLs (and must use the .DLL file extension) for other extensions.
The library search path needs to be amended so these will be found
during module import. Use BEGINLIBPATH so that these are at the start
of the library search path.
"""
dllpath = os.path.join(sys.prefix, "Lib", "lib-dynload")
libpath = os.environ['BEGINLIBPATH'].split(';')
if libpath[-1]:
libpath.append(dllpath)
else:
libpath[-1] = dllpath
os.environ['BEGINLIBPATH'] = ';'.join(libpath)
def setquit():
"""Define new builtins 'quit' and 'exit'.
These are objects which make the interpreter exit when called.
The repr of each object contains a hint at how it works.
"""
if os.sep == ':':
eof = 'Cmd-Q'
elif os.sep == '\\':
eof = 'Ctrl-Z plus Return'
else:
eof = 'Ctrl-D (i.e. EOF)'
class Quitter(object):
def __init__(self, name):
self.name = name
def __repr__(self):
return 'Use %s() or %s to exit' % (self.name, eof)
def __call__(self, code=None):
# Shells like IDLE catch the SystemExit, but listen when their
# stdin wrapper is closed.
try:
sys.stdin.close()
except:
pass
raise SystemExit(code)
__builtin__.quit = Quitter('quit')
__builtin__.exit = Quitter('exit')
class _Printer(object):
"""interactive prompt objects for printing the license text, a list of
contributors and the copyright notice."""
MAXLINES = 23
def __init__(self, name, data, files=(), dirs=()):
self.__name = name
self.__data = data
self.__files = files
self.__dirs = dirs
self.__lines = None
def __setup(self):
if self.__lines:
return
data = None
for dir in self.__dirs:
for filename in self.__files:
filename = os.path.join(dir, filename)
try:
fp = file(filename, "rU")
data = fp.read()
fp.close()
break
except IOError:
pass
if data:
break
if not data:
data = self.__data
self.__lines = data.split('\n')
self.__linecnt = len(self.__lines)
def __repr__(self):
self.__setup()
if len(self.__lines) <= self.MAXLINES:
return "\n".join(self.__lines)
else:
return "Type %s() to see the full %s text" % ((self.__name,)*2)
def __call__(self):
self.__setup()
prompt = 'Hit Return for more, or q (and Return) to quit: '
lineno = 0
while 1:
try:
for i in range(lineno, lineno + self.MAXLINES):
print self.__lines[i]
except IndexError:
break
else:
lineno += self.MAXLINES
key = None
while key is None:
key = raw_input(prompt)
if key not in ('', 'q'):
key = None
if key == 'q':
break
def setcopyright():
"""Set 'copyright' and 'credits' in __builtin__"""
__builtin__.copyright = _Printer("copyright", sys.copyright)
if sys.platform[:4] == 'java':
__builtin__.credits = _Printer(
"credits",
"Jython is maintained by the Jython developers (www.jython.org).")
else:
__builtin__.credits = _Printer("credits", """\
Thanks to CWI, CNRI, BeOpen.com, Zope Corporation and a cast of thousands
for supporting Python development. See www.python.org for more information.""")
here = os.path.dirname(os.__file__)
__builtin__.license = _Printer(
"license", "See http://www.python.org/%.3s/license.html" % sys.version,
["LICENSE.txt", "LICENSE"],
[os.path.join(here, os.pardir), here, os.curdir])
class _Helper(object):
"""Define the builtin 'help'.
This is a wrapper around pydoc.help (with a twist).
"""
def __repr__(self):
return "Type help() for interactive help, " \
"or help(object) for help about object."
def __call__(self, *args, **kwds):
import pydoc
return pydoc.help(*args, **kwds)
def sethelper():
__builtin__.help = _Helper()
def aliasmbcs():
"""On Windows, some default encodings are not provided by Python,
while they are always available as "mbcs" in each locale. Make
them usable by aliasing to "mbcs" in such a case."""
if sys.platform == 'win32':
import locale, codecs
enc = locale.getdefaultlocale()[1]
if enc.startswith('cp'): # "cp***" ?
try:
codecs.lookup(enc)
except LookupError:
import encodings
encodings._cache[enc] = encodings._unknown
encodings.aliases.aliases[enc] = 'mbcs'
def setencoding():
"""Set the string encoding used by the Unicode implementation. The
default is 'ascii', but if you're willing to experiment, you can
change this."""
encoding = "ascii" # Default value set by _PyUnicode_Init()
if 0:
# Enable to support locale aware default string encodings.
import locale
loc = locale.getdefaultlocale()
if loc[1]:
encoding = loc[1]
if 0:
# Enable to switch off string to Unicode coercion and implicit
# Unicode to string conversion.
encoding = "undefined"
if encoding != "ascii":
# On Non-Unicode builds this will raise an AttributeError...
sys.setdefaultencoding(encoding) # Needs Python Unicode build !
def execsitecustomize():
"""Run custom site specific code, if available."""
try:
import sitecustomize
except ImportError:
pass
except Exception:
if sys.flags.verbose:
sys.excepthook(*sys.exc_info())
else:
print >>sys.stderr, \
"'import sitecustomize' failed; use -v for traceback"
def execusercustomize():
"""Run custom user specific code, if available."""
try:
import usercustomize
except ImportError:
pass
except Exception:
if sys.flags.verbose:
sys.excepthook(*sys.exc_info())
else:
print>>sys.stderr, \
"'import usercustomize' failed; use -v for traceback"
def main():
global ENABLE_USER_SITE
abs__file__()
known_paths = removeduppaths()
if (os.name == "posix" and sys.path and
os.path.basename(sys.path[-1]) == "Modules"):
addbuilddir()
if ENABLE_USER_SITE is None:
ENABLE_USER_SITE = check_enableusersite()
known_paths = addusersitepackages(known_paths)
known_paths = addsitepackages(known_paths)
if sys.platform == 'os2emx':
setBEGINLIBPATH()
setquit()
setcopyright()
sethelper()
aliasmbcs()
setencoding()
execsitecustomize()
if ENABLE_USER_SITE:
execusercustomize()
# Remove sys.setdefaultencoding() so that users cannot change the
# encoding after initialization. The test for presence is needed when
# this module is run as a script, because this code is executed twice.
if hasattr(sys, "setdefaultencoding"):
del sys.setdefaultencoding
main()
def _script():
help = """\
%s [--user-base] [--user-site]
Without arguments print some useful information
With arguments print the value of USER_BASE and/or USER_SITE separated
by '%s'.
Exit codes with --user-base or --user-site:
0 - user site directory is enabled
1 - user site directory is disabled by user
2 - uses site directory is disabled by super user
or for security reasons
>2 - unknown error
"""
args = sys.argv[1:]
if not args:
print "sys.path = ["
for dir in sys.path:
print " %r," % (dir,)
print "]"
print "USER_BASE: %r (%s)" % (USER_BASE,
"exists" if os.path.isdir(USER_BASE) else "doesn't exist")
print "USER_SITE: %r (%s)" % (USER_SITE,
"exists" if os.path.isdir(USER_SITE) else "doesn't exist")
print "ENABLE_USER_SITE: %r" % ENABLE_USER_SITE
sys.exit(0)
buffer = []
if '--user-base' in args:
buffer.append(USER_BASE)
if '--user-site' in args:
buffer.append(USER_SITE)
if buffer:
print os.pathsep.join(buffer)
if ENABLE_USER_SITE:
sys.exit(0)
elif ENABLE_USER_SITE is False:
sys.exit(1)
elif ENABLE_USER_SITE is None:
sys.exit(2)
else:
sys.exit(3)
else:
import textwrap
print textwrap.dedent(help % (sys.argv[0], os.pathsep))
sys.exit(10)
if __name__ == '__main__':
_script()
|
gpl-3.0
|
CatsAndDogsbvba/odoo
|
addons/account_asset/__openerp__.py
|
314
|
2182
|
# -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Assets Management',
'version': '1.0',
'depends': ['account'],
'author': 'OpenERP S.A.',
'description': """
Financial and accounting asset management.
==========================================
This Module manages the assets owned by a company or an individual. It will keep
track of depreciation's occurred on those assets. And it allows to create Move's
of the depreciation lines.
""",
'website': 'https://www.odoo.com/page/accounting',
'category': 'Accounting & Finance',
'sequence': 32,
'demo': [ 'account_asset_demo.xml'],
'test': [
'test/account_asset_demo.yml',
'test/account_asset.yml',
'test/account_asset_wizard.yml',
],
'data': [
'security/account_asset_security.xml',
'security/ir.model.access.csv',
'wizard/account_asset_change_duration_view.xml',
'wizard/wizard_asset_compute_view.xml',
'account_asset_view.xml',
'account_asset_invoice_view.xml',
'report/account_asset_report_view.xml',
],
'auto_install': False,
'installable': True,
'application': False,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
agpl-3.0
|
spherulitic/xerafin
|
getLeaderboardStats.py
|
1
|
38402
|
#!/usr/bin/python
import json, sys
import MySQLdb as mysql
import xerafinLib as xl
import xerafinSetup as xs
import datetime
import time
def getUsersByPeriod (period):
try:
if period in ["today", "yesterday"]:
dateMask = {"today": "curdate()", "yesterday": "curdate() - interval 1 day"}
command = "select count(*) from leaderboard where dateStamp = {0}".format(dateMask[period])
con.execute(command)
elif period == "eternity":
command = "select count(distinct userid) from leaderboard"
con.execute(command)
elif period == "sevenDays":
command = "select count(distinct userid) from leaderboard where dateStamp >= curdate() - interval 7 day"
con.execute(command)
else:
dateMask = {"thisWeek": "%Y%u", "lastWeek": "%Y%u", "thisMonth": "%Y%m", "thisYear": "%Y"}
if period == "lastWeek":
datePeriod = "curdate() - interval 7 day"
else:
datePeriod = "curdate()"
command = "select count(distinct userid) from leaderboard where DATE_FORMAT(dateStamp, %s) = DATE_FORMAT({0}, %s)".format(datePeriod)
con.execute(command, (dateMask[period], dateMask[period]))
return con.fetchone()[0]
except:
return 0
try:
params = json.load(sys.stdin)
except:
params = { }
try:
me = params["userid"]
except:
me = "0"
try:
getLeaderboard = params["leaderboard"]
except:
getLeaderboard = False
try:
timeframe = params["timeframe"]
except:
timeframe = "all"
try:
getGlobal = params["globe"]
except:
getGlobal = False
try:
getSiteRecords = params["siterecords"]
except:
getSiteRecords = False
try:
getUserRecords = (params["userrecords"] and me != "0")
except:
getUserRecords = False
try:
getUserRank = (params["userrank"] and me != "0")
except:
getUserRank = False
try:
getUserTotals = (params["usertotals"] and me != "0")
except:
getUserTotals = False
try:
getIndivRecords = params["indivrecords"]
except:
getIndivRecords = False
result = { }
error = { }
if getLeaderboard:
leaderboard = { }
leaderboard["myRank"] = { }
leaderboard["users"] = { }
try:
with xs.getMysqlCon() as con:
####
#### TODAY
####
if timeframe in ["today", "all"]:
leaderboard["users"]["today"] = getUsersByPeriod("today")
leaderboard["today"] = [ ]
command = "select name, photo, questionsAnswered, userid from leaderboard join login using (userid) where dateStamp = curdate() order by questionsAnswered desc limit 10"
con.execute(command)
meTopTen = False
i = 1
for row in con.fetchall():
if row[3] == me:
meTopTen = True
leaderboard["myRank"]["today"] = i
leaderboard["today"].append({"name": row[0], "photo": row[1], "answered": row[2]})
i = i + 1
if not meTopTen:
command = "select name, photo, questionsAnswered from leaderboard join login using (userid) where dateStamp = curdate() and userid = %s"
con.execute(command, me)
row = con.fetchone()
if row is not None:
myAnswered = row[2]
leaderboard["today"].append({"name": row[0], "photo": row[1], "answered": myAnswered })
command = "select count(*) from leaderboard join login using (userid) where dateStamp = curdate() and questionsAnswered > %s"
con.execute(command, myAnswered)
row = con.fetchone()
leaderboard["myRank"]["today"] = row[0]+1
command = "select count(*) from leaderboard where dateStamp = curdate()"
####
#### YESTERDAY
####
if timeframe in ["yesterday", "all"]:
leaderboard["users"]["yesterday"] = getUsersByPeriod("yesterday")
leaderboard["yesterday"] = [ ]
command = "select name, photo, questionsAnswered, userid from leaderboard join login using (userid) where dateStamp = curdate() - interval 1 day order by questionsAnswered desc limit 10"
con.execute(command)
meTopTen = False
i = 1
for row in con.fetchall():
if row[3] == me:
leaderboard["myRank"]["yesterday"] = i
meTopTen = True
leaderboard["yesterday"].append({"name": row[0], "photo": row[1], "answered": row[2]})
i = i+1
if not meTopTen:
command = "select name, photo, questionsAnswered from leaderboard join login using (userid) where dateStamp = curdate() - interval 1 day and userid = %s"
con.execute(command, me)
row = con.fetchone()
if row is not None:
myAnswered = row[2]
leaderboard["yesterday"].append({"name": row[0], "photo": row[1], "answered": myAnswered })
command = "select count(*) from leaderboard join login using (userid) where dateStamp = curdate() - interval 1 day and questionsAnswered > %s"
con.execute(command, myAnswered)
row = con.fetchone()
leaderboard["myRank"]["yesterday"] = row[0]+1
####
#### LAST SEVEN DAYS
####
if timeframe in ["sevenDays", "all"]:
leaderboard["users"]["sevenDays"] = getUsersByPeriod("sevenDays")
leaderboard["sevenDays"] = [ ]
command = "select name, photo, sum(questionsAnswered), userid from leaderboard join login using (userid) where dateStamp >= curdate() - interval 7 day group by name, photo, userid order by sum(questionsAnswered) desc limit 10"
con.execute(command)
meTopTen = False
i = 1
for row in con.fetchall():
if row[3] == me:
leaderboard["myRank"]["sevenDays"] = i
meTopTen = True
leaderboard["sevenDays"].append({"name": row[0], "photo": row[1], "answered": int(row[2])})
i = i + 1
if not meTopTen:
command = "select name, photo, sum(questionsAnswered) from leaderboard join login using (userid) where dateStamp >= curdate() - interval 7 day and userid = %s group by name, photo"
con.execute(command, me)
row = con.fetchone()
if row is not None:
myAnswered = int(row[2])
leaderboard["sevenDays"].append({"name": row[0], "photo": row[1], "answered": myAnswered })
command = "select userid from leaderboard where dateStamp >= curdate() - interval 7 day group by userid having sum(questionsAnswered) > %s"
con.execute(command, myAnswered)
leaderboard["myRank"]["sevenDays"] = con.rowcount+1
### This (current) week
if timeframe in ["thisWeek", "all"]:
leaderboard["users"]["thisWeek"] = getUsersByPeriod("thisWeek")
leaderboard["thisWeek"] = [ ]
command = "select name, photo, sum(questionsAnswered), userid from leaderboard join login using (userid) where DATE_FORMAT(dateStamp, '%Y%u') = DATE_FORMAT(curdate(), '%Y%u') group by name, photo, userid order by sum(questionsAnswered) desc limit 10"
con.execute(command)
meTopTen = False
i = 1
for row in con.fetchall():
if row[3] == me:
leaderboard["myRank"]["thisWeek"] = i
meTopTen = True
leaderboard["thisWeek"].append({"name": row[0], "photo": row[1], "answered": int(row[2])})
i = i + 1
if not meTopTen:
command = "select name, photo, sum(questionsAnswered) from leaderboard join login using (userid) where DATE_FORMAT(dateStamp, '%%Y%%u') = DATE_FORMAT(curdate(), '%%Y%%u') and userid = %s group by name, photo"
con.execute(command, me)
row = con.fetchone()
if row is not None:
myAnswered = int(row[2])
leaderboard["thisWeek"].append({"name": row[0], "photo": row[1], "answered": myAnswered })
command = "select userid from leaderboard where DATE_FORMAT(dateStamp, '%%Y%%u') = DATE_FORMAT(curdate(), '%%Y%%u') group by userid having sum(questionsAnswered) > %s"
con.execute(command, myAnswered)
leaderboard["myRank"]["thisWeek"] = con.rowcount+1
### Last week
if timeframe in ["lastWeek", "all"]:
leaderboard["users"]["lastWeek"] = getUsersByPeriod("lastWeek")
leaderboard["lastWeek"] = [ ]
command = "select name, photo, sum(questionsAnswered), userid from leaderboard join login using (userid) where DATE_FORMAT(dateStamp, '%Y%u') = DATE_FORMAT(curdate() - interval 7 day, '%Y%u') group by name, photo, userid order by sum(questionsAnswered) desc limit 10"
con.execute(command)
meTopTen = False
i = 1
for row in con.fetchall():
if row[3] == me:
leaderboard["myRank"]["lastWeek"] = i
meTopTen = True
leaderboard["lastWeek"].append({"name": row[0], "photo": row[1], "answered": int(row[2])})
i = i + 1
if not meTopTen:
command = "select name, photo, sum(questionsAnswered) from leaderboard join login using (userid) where DATE_FORMAT(dateStamp, '%%Y%%u') = DATE_FORMAT(curdate() - interval 7 day, '%%Y%%u') and userid = %s group by name, photo"
con.execute(command, me)
row = con.fetchone()
if row is not None:
myAnswered = int(row[2])
leaderboard["lastWeek"].append({"name": row[0], "photo": row[1], "answered": myAnswered })
command = "select userid from leaderboard where DATE_FORMAT(dateStamp, '%%Y%%u') = DATE_FORMAT(curdate() - interval 7 day, '%%Y%%u') group by userid having sum(questionsAnswered) > %s"
con.execute(command, myAnswered)
leaderboard["myRank"]["lastWeek"] = con.rowcount+1
# This Month
if timeframe in ["thisMonth", "all"]:
leaderboard["users"]["thisMonth"] = getUsersByPeriod("thisMonth")
leaderboard["thisMonth"] = [ ]
command = "select name, photo, sum(questionsAnswered), userid from leaderboard join login using (userid) where DATE_FORMAT(dateStamp, '%Y%m') = DATE_FORMAT(curdate(), '%Y%m') group by name, photo, userid order by sum(questionsAnswered) desc limit 10"
con.execute(command)
meTopTen = False
i = 1
for row in con.fetchall():
if row[3] == me:
leaderboard["myRank"]["thisMonth"] = i
meTopTen = True
leaderboard["thisMonth"].append({"name": row[0], "photo": row[1], "answered": int(row[2])})
i = i + 1
if not meTopTen:
command = "select name, photo, sum(questionsAnswered) from leaderboard join login using (userid) where DATE_FORMAT(dateStamp, '%%Y%%m') = DATE_FORMAT(curdate(), '%%Y%%m') and userid = %s group by name, photo"
con.execute(command, me)
row = con.fetchone()
if row is not None:
myAnswered = int(row[2])
leaderboard["thisMonth"].append({"name": row[0], "photo": row[1], "answered": myAnswered })
command = "select userid from leaderboard where DATE_FORMAT(dateStamp, '%%Y%%m') = DATE_FORMAT(curdate(), '%%Y%%m') group by userid having sum(questionsAnswered) > %s"
con.execute(command, myAnswered)
leaderboard["myRank"]["thisMonth"] = con.rowcount+1
# This Year
if timeframe in ["thisYear", "all"]:
leaderboard["users"]["thisYear"] = getUsersByPeriod("thisYear")
leaderboard["thisYear"] = [ ]
command = "select name, photo, sum(questionsAnswered), userid from leaderboard join login using (userid) where DATE_FORMAT(dateStamp, '%Y') = DATE_FORMAT(curdate(), '%Y') group by name, photo, userid order by sum(questionsAnswered) desc limit 10"
con.execute(command)
meTopTen = False
i = 1
for row in con.fetchall():
if row[3] == me:
leaderboard["myRank"]["thisYear"] = i
meTopTen = True
leaderboard["thisYear"].append({"name": row[0], "photo": row[1], "answered": int(row[2])})
i = i + 1
if not meTopTen:
command = "select name, photo, sum(questionsAnswered) from leaderboard join login using (userid) where DATE_FORMAT(dateStamp, '%%Y') = DATE_FORMAT(curdate(), '%%Y') and userid = %s group by name, photo"
con.execute(command, me)
row = con.fetchone()
if row is not None:
myAnswered = int(row[2])
leaderboard["thisYear"].append({"name": row[0], "photo": row[1], "answered": myAnswered })
command = "select userid from leaderboard where DATE_FORMAT(dateStamp, '%%Y') = DATE_FORMAT(curdate(), '%%Y') group by userid having sum(questionsAnswered) > %s"
con.execute(command, myAnswered)
leaderboard["myRank"]["thisYear"] = con.rowcount+1
except Exception as ex:
template = "Leaderboard: An exception of type {0} occured. Arguments:\n{1!r}"
message = template.format(type(ex).__name__, ex.args)
error["status"] = message
result["leaderboard"] = leaderboard
if getGlobal:
globe = {"questions": { }, "users": { } }
try:
with xs.getMysqlCon() as con:
# Today's sitewide totals
command = "select sum(questionsAnswered), count(distinct userid) from leaderboard where dateStamp = curdate()"
con.execute(command)
row = con.fetchone()
if row is not None:
todayQuestions = int(row[0])
todayUsers = row[1]
else:
todayQuestions = 0
todayUsers = 0
globe["questions"]["today"] = todayQuestions
globe["users"]["today"] = todayUsers
# at EOD, the summary isn't populated with previous day's total immediately
# yesterday's sitewide totals
command = "select sum(questionsAnswered), count(distinct userid) from leaderboard where dateStamp = curdate() - interval 1 day"
con.execute(command)
row = con.fetchone()
if row is not None:
yesterdayQuestions = int(row[0])
yesterdayUsers = row[1]
else:
yesterdayQuestions = 0
yesterdayUsers = 0
globe["questions"]["yesterday"] = yesterdayQuestions
globe["users"]["yesterday"] = yesterdayUsers
# Weekly totals
if time.strftime("%A") == "Monday":
thisWeekQuestions = todayQuestions
elif time.strftime("%A") == "Tuesday":
thisWeekQuestions = todayQuestions + yesterdayQuestions
else:
command = "select sum(questionsAnswered) from lb_summary where period = 'DAY' and dateStamp < curdate() - interval 1 day and DATE_FORMAT(dateStamp, '%Y%u') = DATE_FORMAT(curdate(), '%Y%u')"
con.execute(command)
row = con.fetchone()
if row is not None:
thisWeekQuestions = int(row[0]) + yesterdayQuestions + todayQuestions
else:
thisWeekQuestions = yesterdayQuestions + todayQuestions
globe["questions"]["thisWeek"] = thisWeekQuestions
globe["users"]["thisWeek"] = getUsersByPeriod("thisWeek")
# Monthly totals
if time.strftime("%d") == "01":
thisMonthQuestions = todayQuestions
elif time.strftime("%d") == "02":
thisMonthQuestions = todayQuestions + yesterdayQuestions
else:
command = "select sum(questionsAnswered) from lb_summary where period = 'DAY' and dateStamp < curdate() - interval 1 day and DATE_FORMAT(dateStamp, '%Y%m') = DATE_FORMAT(curdate(), '%Y%m')"
con.execute(command)
row = con.fetchone()
if row is not None:
thisMonthQuestions = int(row[0]) + yesterdayQuestions + todayQuestions
else:
thisMonthQuestions = yesterdayQuestions + todayQuestions
globe["questions"]["thisMonth"] = thisMonthQuestions
globe["users"]["thisMonth"] = getUsersByPeriod("thisMonth")
# Annual totals
if time.strftime("%m") == "01":
thisYearQuestions = thisMonthQuestions
thisYearUsers = thisMonthUsers
else:
command = "select sum(questionsAnswered) from lb_summary where period = 'MONTH' and DATE_FORMAT(dateStamp, '%Y') = DATE_FORMAT(curdate(), '%Y')"
con.execute(command)
row = con.fetchone()
if row is not None:
thisYearQuestions = int(row[0]) + thisMonthQuestions
else:
thisYearQuestions = thisMonthQuestions
globe["questions"]["thisYear"] = thisYearQuestions
globe["users"]["thisYear"] = getUsersByPeriod("thisYear")
# Eternity totals
command = "select sum(questionsAnswered) from lb_summary where period = 'YEAR'"
con.execute(command)
row = con.fetchone()
if row is not None:
eternityQuestions = int(row[0]) + thisYearQuestions
else:
eternityQuestions = thisYearQuestions
globe["questions"]["eternity"] = eternityQuestions
globe["users"]["eternity"] = getUsersByPeriod("eternity")
except Exception as ex:
template = "Globe: An exception of type {0} occured. Arguments:\n{1!r}"
message = template.format(type(ex).__name__, ex.args)
error["status"] = message
result["globe"] = globe
if getSiteRecords:
siterecords = {"maxUsers": { }, "maxQuestions": { } }
try:
with xs.getMysqlCon() as con:
for period in ['DAY', 'WEEK', 'MONTH', 'YEAR']:
command = "select dateStamp, questionsAnswered from lb_summary where period = %s order by questionsAnswered desc, dateStamp desc limit 1"
con.execute(command, period)
row = con.fetchone()
siterecords["maxQuestions"][period.lower()] = {"date": str(row[0]), "questions": row[1]}
command = "select dateStamp, numUsers from lb_summary where period = %s order by numUsers desc, dateStamp desc limit 1"
con.execute(command, period)
row = con.fetchone()
siterecords["maxUsers"][period.lower()] = {"date": str(row[0]), "users": row[1]}
command = "select dateStamp, questionsAnswered from lb_summary where DATE_FORMAT(dateStamp, '%a') = DATE_FORMAT(curdate(), '%a') and period = 'DAY' order by questionsAnswered desc, dateStamp desc limit 1"
con.execute(command)
row = con.fetchone()
if row is not None:
siterecords["maxQuestions"]["weekday"] = {"date": str(row[0]), "questions": row[1]}
command = "select dateStamp, numUsers from lb_summary where DATE_FORMAT(dateStamp, '%a') = DATE_FORMAT(curdate(), '%a') and period = 'DAY' order by numUsers desc, dateStamp desc limit 1"
con.execute(command)
row = con.fetchone()
if row is not None:
siterecords["maxUsers"]["weekday"] = {"date": str(row[0]), "users": row[1]}
# format dates
for d in ["maxQuestions", "maxUsers"]:
for e in [("year", "%Y"), ("month", "%b %Y"), ("week", "%d %b %Y"), ("day", "%d %b %Y"), ("weekday", "%d %b %Y")]:
dt = datetime.datetime.strptime(siterecords[d][e[0]]["date"], "%Y-%m-%d").date()
siterecords[d][e[0]]["date"] = dt.strftime(e[1])
if e[0] == "week":
week = datetime.timedelta(days=6)
dt = dt + week
siterecords[d][e[0]]["dateEnd"] = dt.strftime(e[1])
if e[0] == "weekday":
siterecords[d][e[0]]["weekday"] = dt.strftime("%A")
except Exception as ex:
template = "Siterecords: An exception of type {0} occured. Arguments:\n{1!r}"
message = template.format(type(ex).__name__, ex.args)
error["status"] = message
result["siterecords"] = siterecords
if getUserRecords:
userrecords = { }
try:
with xs.getMysqlCon() as con:
command = "select dateStamp, questionsAnswered from leaderboard where DATE_FORMAT(dateStamp, '%%a') = DATE_FORMAT(curdate(), '%%a') and userid = %s order by questionsAnswered desc, dateStamp desc limit 1"
con.execute(command, me)
row = con.fetchone()
if row is not None:
userrecords["weekday"] = {"date": str(row[0]), "questions": row[1]}
command = "select dateStamp, questionsAnswered from leaderboard where userid = %s order by questionsAnswered desc, dateStamp desc limit 1"
con.execute(command, me)
row = con.fetchone()
if row is not None:
userrecords["day"] = {"date": str(row[0]), "questions": row[1]}
command = "select min(dateStamp), sum(questionsAnswered) from leaderboard where userid = %s group by DATE_FORMAT(dateStamp, '%%u%%Y') order by sum(questionsAnswered) desc, min(dateStamp) desc limit 1"
con.execute(command, me)
row = con.fetchone()
if row is not None:
userrecords["week"] = {"date": str(row[0]), "questions": int(row[1])}
command = "select min(dateStamp), sum(questionsAnswered) from leaderboard where userid = %s group by DATE_FORMAT(dateStamp, '%%m%%Y') order by sum(questionsAnswered) desc, min(dateStamp) desc limit 1"
con.execute(command, me)
row = con.fetchone()
if row is not None:
userrecords["month"] = {"date": str(row[0]), "questions": int(row[1])}
command = "select min(dateStamp), sum(questionsAnswered) from leaderboard where userid = %s group by DATE_FORMAT(dateStamp, '%%Y') order by sum(questionsAnswered) desc, min(dateStamp) desc limit 1"
con.execute(command, me)
row = con.fetchone()
if row is not None:
userrecords["year"] = {"date": str(row[0]), "questions": int(row[1])}
# format dates
for e in [("year", "%Y"), ("month", "%b %Y"), ("week", "%d %b %Y"), ("day", "%d %b %Y"), ("weekday", "%d %b %Y")]:
dt = datetime.datetime.strptime(userrecords[e[0]]["date"], "%Y-%m-%d").date()
userrecords[e[0]]["date"] = dt.strftime(e[1])
if e[0] == "week":
week = datetime.timedelta(days=6)
dt = dt + week
userrecords[e[0]]["dateEnd"] = dt.strftime(e[1])
if e[0] == "weekday":
userrecords[e[0]]["weekday"] = dt.strftime("%A")
except Exception as ex:
template = "Userrecords: An exception of type {0} occured. Arguments:\n{1!r}"
message = template.format(type(ex).__name__, ex.args)
error["status"] = message
result["userrecords"] = userrecords
if getUserRank:
userrank = {"myRank": { }, "users": { } }
PLUSMINUS = 5
try:
with xs.getMysqlCon() as con:
# TODAY
if timeframe in ["today", "all"]:
userrank["users"]["today"] = getUsersByPeriod("today")
userrank["today"] = [ ]
command = "select questionsAnswered from leaderboard where userid = %s and dateStamp = curdate()"
con.execute(command, me)
try:
myAnswered = con.fetchone()[0]
except:
myAnswered = 0
command = "select count(*) from leaderboard where dateStamp = curdate() and questionsAnswered > %s"
con.execute(command, myAnswered)
row = con.fetchone()
userrank["myRank"]["today"] = row[0]+1
myrank = userrank["myRank"]["today"]
command = "select name, photo, questionsAnswered, userid from leaderboard join login using (userid) where dateStamp = curdate() order by questionsAnswered desc limit %s"
con.execute(command, max(myrank+PLUSMINUS, 11))
allResults = con.fetchall()
if myrank+PLUSMINUS > len(allResults):
rangeStart = max(0, len(allResults)-(2*PLUSMINUS)-1)
else:
rangeStart = max(0, myrank-PLUSMINUS-1)
rangeEnd = min(len(allResults),max(myrank+PLUSMINUS+1,11))
r = rangeStart+1
for row in allResults[rangeStart:rangeEnd]:
userrank["today"].append({"name": row[0], "photo": row[1], "answered": row[2], "rank": r})
r = r+1
# YESTERDAY
if timeframe in ["yesterday", "all"]:
userrank["users"]["yesterday"] = getUsersByPeriod("yesterday")
userrank["yesterday"] = [ ]
command = "select questionsAnswered from leaderboard where userid = %s and dateStamp = curdate() - interval 1 day"
con.execute(command, me)
try:
myAnswered = con.fetchone()[0]
except:
myAnswered = 0
command = "select count(*) from leaderboard where dateStamp = curdate() - interval 1 day and questionsAnswered > %s"
con.execute(command, myAnswered)
row = con.fetchone()
userrank["myRank"]["yesterday"] = row[0]+1
myrank = userrank["myRank"]["yesterday"]
command = "select name, photo, questionsAnswered, userid from leaderboard join login using (userid) where dateStamp = curdate() - interval 1 day order by questionsAnswered desc limit %s"
con.execute(command, max(myrank+PLUSMINUS, 11))
allResults = con.fetchall()
if myrank+PLUSMINUS > len(allResults):
rangeStart = max(0, len(allResults)-(2*PLUSMINUS)-1)
else:
rangeStart = max(0, myrank-PLUSMINUS-1)
rangeEnd = min(len(allResults),max(myrank+PLUSMINUS+1,11))
r = rangeStart+1
for row in allResults[rangeStart:rangeEnd]:
userrank["yesterday"].append({"name": row[0], "photo": row[1], "answered": row[2], "rank": r})
r = r+1
# LAST WEEK
if timeframe in ["sevenDays", "all"]:
userrank["users"]["sevenDays"] = getUsersByPeriod("sevenDays")
userrank["sevenDays"] = [ ]
command = "select sum(questionsAnswered) from leaderboard where userid = %s and dateStamp >= curdate() - interval 7 day"
con.execute(command, me)
try:
myAnswered = int(con.fetchone()[0])
except:
myAnswered = 0
command = "select userid from leaderboard where dateStamp >= curdate() - interval 7 day group by userid having sum(questionsAnswered) > %s"
con.execute(command, myAnswered)
myrank = con.rowcount+1
userrank["myRank"]["sevenDays"] = myrank
command = "select name, photo, sum(questionsAnswered), userid from leaderboard join login using (userid) where dateStamp >= curdate() - interval 7 day group by name, photo, userid order by sum(questionsAnswered) desc limit %s"
con.execute(command, max(myrank+PLUSMINUS, 11))
allResults = con.fetchall()
if myrank+PLUSMINUS > len(allResults):
rangeStart = max(0, len(allResults)-(2*PLUSMINUS)-1)
else:
rangeStart = max(0, myrank-PLUSMINUS-1)
rangeEnd = min(len(allResults),max(myrank+PLUSMINUS+1,11))
r = rangeStart+1
for row in allResults[rangeStart:rangeEnd]:
userrank["sevenDays"].append({"name": row[0], "photo": row[1], "answered": int(row[2]), "rank": r})
r = r+1
# THIS (CURRENT) WEEK
if timeframe in ["thisWeek", "all"]:
userrank["users"]["thisWeek"] = getUsersByPeriod("thisWeek")
userrank["thisWeek"] = [ ]
command = "select sum(questionsAnswered) from leaderboard where userid = %s and DATE_FORMAT(dateStamp, '%%Y%%u') = DATE_FORMAT(curdate(), '%%Y%%u')"
con.execute(command, me)
try:
myAnswered = int(con.fetchone()[0])
except:
myAnswered = 0
command = "select userid from leaderboard where DATE_FORMAT(dateStamp, '%%Y%%u') = DATE_FORMAT(curdate(), '%%Y%%u') group by userid having sum(questionsAnswered) > %s"
con.execute(command, myAnswered)
myrank = con.rowcount+1
userrank["myRank"]["thisWeek"] = myrank
command = "select name, photo, sum(questionsAnswered), userid from leaderboard join login using (userid) where DATE_FORMAT(dateStamp, '%%Y%%u') = DATE_FORMAT(curdate(), '%%Y%%u') group by name, photo, userid order by sum(questionsAnswered) desc limit %s"
con.execute(command, max(myrank+PLUSMINUS, 11))
allResults = con.fetchall()
if myrank+PLUSMINUS > len(allResults):
rangeStart = max(0, len(allResults)-(2*PLUSMINUS)-1)
else:
rangeStart = max(0, myrank-PLUSMINUS-1)
rangeEnd = min(len(allResults),max(myrank+PLUSMINUS+1,11))
r = rangeStart+1
for row in allResults[rangeStart:rangeEnd]:
userrank["thisWeek"].append({"name": row[0], "photo": row[1], "answered": int(row[2]), "rank": r})
r = r+1
# LAST WEEK
if timeframe in ["lastWeek", "all"]:
userrank["users"]["lastWeek"] = getUsersByPeriod("lastWeek")
userrank["lastWeek"] = [ ]
command = "select sum(questionsAnswered) from leaderboard where userid = %s and DATE_FORMAT(dateStamp, '%%Y%%u') = DATE_FORMAT(curdate() - interval 7 day, '%%Y%%u')"
con.execute(command, me)
try:
myAnswered = int(con.fetchone()[0])
except:
myAnswered = 0
command = "select userid from leaderboard where DATE_FORMAT(dateStamp, '%%Y%%u') = DATE_FORMAT(curdate() - interval 7 day, '%%Y%%u') group by userid having sum(questionsAnswered) > %s"
con.execute(command, myAnswered)
myrank = con.rowcount+1
userrank["myRank"]["lastWeek"] = myrank
command = "select name, photo, sum(questionsAnswered), userid from leaderboard join login using (userid) where DATE_FORMAT(dateStamp, '%%Y%%u') = DATE_FORMAT(curdate() - interval 7 day, '%%Y%%u') group by name, photo, userid order by sum(questionsAnswered) desc limit %s"
con.execute(command, max(myrank+PLUSMINUS, 11))
allResults = con.fetchall()
if myrank+PLUSMINUS > len(allResults):
rangeStart = max(0, len(allResults)-(2*PLUSMINUS)-1)
else:
rangeStart = max(0, myrank-PLUSMINUS-1)
rangeEnd = min(len(allResults),max(myrank+PLUSMINUS+1,11))
r = rangeStart+1
for row in allResults[rangeStart:rangeEnd]:
userrank["lastWeek"].append({"name": row[0], "photo": row[1], "answered": int(row[2]), "rank": r})
r = r+1
# THIS MONTH
if timeframe in ["thisMonth", "all"]:
userrank["users"]["thisMonth"] = getUsersByPeriod("thisMonth")
userrank["thisMonth"] = [ ]
command = "select sum(questionsAnswered) from leaderboard where userid = %s and DATE_FORMAT(dateStamp, '%%Y%%m') = DATE_FORMAT(curdate(), '%%Y%%m')"
con.execute(command, me)
try:
myAnswered = int(con.fetchone()[0])
except:
myAnswered = 0
command = "select userid from leaderboard where DATE_FORMAT(dateStamp, '%%Y%%m') = DATE_FORMAT(curdate(), '%%Y%%m') group by userid having sum(questionsAnswered) > %s"
con.execute(command, myAnswered)
myrank = con.rowcount+1
userrank["myRank"]["thisMonth"] = myrank
command = "select name, photo, sum(questionsAnswered), userid from leaderboard join login using (userid) where DATE_FORMAT(dateStamp, '%%Y%%m') = DATE_FORMAT(curdate(), '%%Y%%m') group by name, photo, userid order by sum(questionsAnswered) desc limit %s"
con.execute(command, max(myrank+PLUSMINUS, 11))
allResults = con.fetchall()
if myrank+PLUSMINUS > len(allResults):
rangeStart = max(0, len(allResults)-(2*PLUSMINUS)-1)
else:
rangeStart = max(0, myrank-PLUSMINUS-1)
rangeEnd = min(len(allResults),max(myrank+PLUSMINUS+1,11))
r = rangeStart+1
for row in allResults[rangeStart:rangeEnd]:
userrank["thisMonth"].append({"name": row[0], "photo": row[1], "answered": int(row[2]), "rank": r})
r = r+1
# THIS YEAR
if timeframe in ["thisYear", "all"]:
userrank["users"]["thisYear"] = getUsersByPeriod("thisYear")
userrank["thisYear"] = [ ]
command = "select sum(questionsAnswered) from leaderboard where userid = %s and DATE_FORMAT(dateStamp, '%%Y') = DATE_FORMAT(curdate(), '%%Y')"
con.execute(command, me)
try:
myAnswered = int(con.fetchone()[0])
except:
myAnswered = 0
command = "select userid from leaderboard where DATE_FORMAT(dateStamp, '%%Y') = DATE_FORMAT(curdate(), '%%Y') group by userid having sum(questionsAnswered) > %s"
con.execute(command, myAnswered)
myrank = con.rowcount+1
userrank["myRank"]["thisYear"] = myrank
command = "select name, photo, sum(questionsAnswered), userid from leaderboard join login using (userid) where DATE_FORMAT(dateStamp, '%%Y') = DATE_FORMAT(curdate(), '%%Y') group by name, photo, userid order by sum(questionsAnswered) desc limit %s"
con.execute(command, max(myrank+PLUSMINUS, 11))
allResults = con.fetchall()
if myrank+PLUSMINUS > len(allResults):
rangeStart = max(0, len(allResults)-(2*PLUSMINUS)-1)
else:
rangeStart = max(0, myrank-PLUSMINUS-1)
rangeEnd = min(len(allResults),max(myrank+PLUSMINUS+1,11))
r = rangeStart+1
for row in allResults[rangeStart:rangeEnd]:
userrank["thisYear"].append({"name": row[0], "photo": row[1], "answered": int(row[2]), "rank": r})
r = r+1
except Exception as ex:
template = "Userrank: An exception of type {0} occured. Arguments:\n{1!r}"
message = template.format(type(ex).__name__, ex.args)
error["status"] = message
result["userrank"] = userrank
if getUserTotals:
usertotals = { }
try:
with xs.getMysqlCon() as con:
# Today
command = "select questionsAnswered from leaderboard where dateStamp = curdate() and userid = %s"
con.execute(command, me)
row = con.fetchone()
if row is not None:
todayQuestions = row[0]
else:
todayQuestions = 0
# Yesterday
command = "select questionsAnswered from leaderboard where dateStamp = curdate() - interval 1 day and userid = %s"
con.execute(command, me)
row = con.fetchone()
if row is not None:
yesterdayQuestions = row[0]
else:
yesterdayQuestions = 0
# This week
if time.strftime("%A") == "Monday":
thisWeekQuestions = todayQuestions
elif time.strftime("%A") == "Tuesday":
thisWeekQuestions = todayQuestions + yesterdayQuestions
else:
command = "select sum(questionsAnswered) from leaderboard where DATE_FORMAT(dateStamp, '%%Y%%u') = DATE_FORMAT(curdate(), '%%Y%%u') and userid = %s"
con.execute(command, me)
row = con.fetchone()
if row is not None:
thisWeekQuestions = int(row[0])
else:
thisWeekQuestions = 0
# This month
if time.strftime("%d") == "01":
thisMonthQuestions = todayQuestions
elif time.strftime("%d") == "02":
thisMonthQuestions = todayQuestions + yesterdayQuestions
else:
command = "select sum(questionsAnswered) from leaderboard where DATE_FORMAT(dateStamp, '%%Y%%m') = DATE_FORMAT(curdate(), '%%Y%%m') and userid = %s"
con.execute(command, me)
row = con.fetchone()
if row is not None:
thisMonthQuestions = int(row[0])
else:
thisMonthQuestions = 0
if time.strftime("%m") == "01":
thisYearQuestions = thisMonthQuestions
else:
command = "select sum(questionsAnswered) from leaderboard where DATE_FORMAT(dateStamp, '%%Y') = DATE_FORMAT(curdate(), '%%Y') and userid = %s"
con.execute(command, me)
row = con.fetchone()
if row is not None:
thisYearQuestions = int(row[0])
else:
thisYearQuestions = 0
command = "select sum(questionsAnswered) from leaderboard where userid = %s"
con.execute(command, me)
row = con.fetchone()
if row is not None:
eternityQuestions = int(row[0])
else:
eternityQuestions = 0
usertotals["questions"] = {"today": todayQuestions, "yesterday": yesterdayQuestions, "thisWeek": thisWeekQuestions, "thisMonth": thisMonthQuestions, "thisYear": thisYearQuestions, "eternity": eternityQuestions}
except Exception as ex:
template = "Usertotals: An exception of type {0} occured. Arguments:\n{1!r}"
message = template.format(type(ex).__name__, ex.args)
error["status"] = message
result["usertotals"] = usertotals
if getIndivRecords:
indivrecords = { }
try:
with xs.getMysqlCon() as con:
for x in ["day", "weekday"]:
if x == "day":
command = "select name, photo, questionsAnswered, dateStamp from leaderboard join login using (userid) order by questionsAnswered desc limit 1"
elif x == "weekday":
command = "select name, photo, questionsAnswered, dateStamp from leaderboard join login using (userid) where DATE_FORMAT(dateStamp, '%a') = DATE_FORMAT(curdate(), '%a') order by questionsAnswered desc limit 1"
else:
pass #shouldn't happen
con.execute(command)
row = con.fetchone()
indivrecords[x]= {"name": row[0], "photo": row[1], "answered": row[2], "date": str(row[3])}
for x in ["week", "month", "year", "eternity"]:
if x == "eternity":
command = "select name, photo, sum(questionsAnswered), null from leaderboard join login using (userid) group by name, photo, userid order by sum(questionsAnswered) desc limit 1"
con.execute(command)
else:
if x == "week":
datemask = "%Y%u"
elif x == "month":
datemask = "%Y%m"
elif x == "year":
datemask = "%Y"
else:
pass # shouldn't happen
command = "select name, photo, sum(questionsAnswered), min(dateStamp) from leaderboard join login using (userid) group by name, photo, userid, DATE_FORMAT(dateStamp, %s) order by sum(questionsAnswered) desc limit 1"
con.execute(command, datemask)
row = con.fetchone()
indivrecords[x]= {"name": row[0], "photo": row[1], "answered": int(row[2]), "date": str(row[3])}
for e in [("year", "%Y"), ("month", "%b %Y"), ("week", "%d %b %Y"), ("day", "%d %b %Y"), ("weekday", "%d %b %Y")]:
dt = datetime.datetime.strptime(indivrecords[e[0]]["date"], "%Y-%m-%d").date()
indivrecords[e[0]]["date"] = dt.strftime(e[1])
if e[0] == "week":
week = datetime.timedelta(days=6)
dt = dt + week
indivrecords[e[0]]["dateEnd"] = dt.strftime(e[1])
if e[0] == "weekday":
indivrecords[e[0]]["weekday"] = dt.strftime("%A")
except Exception as ex:
template = "Indiv Records: An exception of type {0} occured. Arguments:\n{1!r}"
message = template.format(type(ex).__name__, ex.args)
error["status"] = message
result["indivrecords"] = indivrecords
print "Content-type: application/json\n\n"
print json.dumps([result, error])
|
gpl-3.0
|
Adnn/django
|
tests/servers/test_basehttp.py
|
213
|
3129
|
from io import BytesIO
from django.core.handlers.wsgi import WSGIRequest
from django.core.servers.basehttp import WSGIRequestHandler
from django.test import SimpleTestCase
from django.test.client import RequestFactory
from django.test.utils import captured_stderr
class Stub(object):
def __init__(self, **kwargs):
self.__dict__.update(kwargs)
class WSGIRequestHandlerTestCase(SimpleTestCase):
def test_log_message(self):
request = WSGIRequest(RequestFactory().get('/').environ)
request.makefile = lambda *args, **kwargs: BytesIO()
handler = WSGIRequestHandler(request, '192.168.0.2', None)
with captured_stderr() as stderr:
handler.log_message('GET %s %s', 'A', 'B')
self.assertIn('] GET A B', stderr.getvalue())
def test_https(self):
request = WSGIRequest(RequestFactory().get('/').environ)
request.makefile = lambda *args, **kwargs: BytesIO()
handler = WSGIRequestHandler(request, '192.168.0.2', None)
with captured_stderr() as stderr:
handler.log_message("GET %s %s", str('\x16\x03'), "4")
self.assertIn(
"You're accessing the development server over HTTPS, "
"but it only supports HTTP.",
stderr.getvalue()
)
def test_strips_underscore_headers(self):
"""WSGIRequestHandler ignores headers containing underscores.
This follows the lead of nginx and Apache 2.4, and is to avoid
ambiguity between dashes and underscores in mapping to WSGI environ,
which can have security implications.
"""
def test_app(environ, start_response):
"""A WSGI app that just reflects its HTTP environ."""
start_response('200 OK', [])
http_environ_items = sorted(
'%s:%s' % (k, v) for k, v in environ.items()
if k.startswith('HTTP_')
)
yield (','.join(http_environ_items)).encode('utf-8')
rfile = BytesIO()
rfile.write(b"GET / HTTP/1.0\r\n")
rfile.write(b"Some-Header: good\r\n")
rfile.write(b"Some_Header: bad\r\n")
rfile.write(b"Other_Header: bad\r\n")
rfile.seek(0)
# WSGIRequestHandler closes the output file; we need to make this a
# no-op so we can still read its contents.
class UnclosableBytesIO(BytesIO):
def close(self):
pass
wfile = UnclosableBytesIO()
def makefile(mode, *a, **kw):
if mode == 'rb':
return rfile
elif mode == 'wb':
return wfile
request = Stub(makefile=makefile)
server = Stub(base_environ={}, get_app=lambda: test_app)
# We don't need to check stderr, but we don't want it in test output
with captured_stderr():
# instantiating a handler runs the request as side effect
WSGIRequestHandler(request, '192.168.0.2', server)
wfile.seek(0)
body = list(wfile.readlines())[-1]
self.assertEqual(body, b'HTTP_SOME_HEADER:good')
|
bsd-3-clause
|
servo/servo
|
tests/wpt/web-platform-tests/tools/third_party/pluggy/scripts/release.py
|
13
|
1959
|
"""
Release script.
"""
import argparse
import sys
from subprocess import check_call
from colorama import init, Fore
from git import Repo, Remote
def create_branch(version):
"""Create a fresh branch from upstream/master"""
repo = Repo.init(".")
if repo.is_dirty(untracked_files=True):
raise RuntimeError(f"Repository is dirty, please commit/stash your changes.")
branch_name = f"release-{version}"
print(f"{Fore.CYAN}Create {branch_name} branch from upstream master")
upstream = get_upstream(repo)
upstream.fetch()
release_branch = repo.create_head(branch_name, upstream.refs.master, force=True)
release_branch.checkout()
return repo
def get_upstream(repo: Repo) -> Remote:
"""Find upstream repository for pluggy on the remotes"""
for remote in repo.remotes:
for url in remote.urls:
if url.endswith(("pytest-dev/pluggy.git", "pytest-dev/pluggy")):
return remote
raise RuntimeError("could not find pytest-dev/pluggy remote")
def pre_release(version):
"""Generates new docs, release announcements and creates a local tag."""
create_branch(version)
changelog(version, write_out=True)
check_call(["git", "commit", "-a", "-m", f"Preparing release {version}"])
print()
print(f"{Fore.GREEN}Please push your branch to your fork and open a PR.")
def changelog(version, write_out=False):
if write_out:
addopts = []
else:
addopts = ["--draft"]
print(f"{Fore.CYAN}Generating CHANGELOG")
check_call(["towncrier", "--yes", "--version", version] + addopts)
def main():
init(autoreset=True)
parser = argparse.ArgumentParser()
parser.add_argument("version", help="Release version")
options = parser.parse_args()
try:
pre_release(options.version)
except RuntimeError as e:
print(f"{Fore.RED}ERROR: {e}")
return 1
if __name__ == "__main__":
sys.exit(main())
|
mpl-2.0
|
mstrcnvs/pybikes
|
pybikes/punpunbikeshare.py
|
2
|
2194
|
# -*- coding: utf-8 -*-
import json
from .base import BikeShareSystem, BikeShareStation
from . import utils
class Punpunbikeshare(BikeShareSystem):
sync = True
meta = {
'system': 'Smart Bike',
'company': 'BTS Group Holdings',
}
def __init__(self, tag, feed_url, meta):
super(Punpunbikeshare, self).__init__(tag, meta)
self.feed_url = feed_url
def update(self, scraper=None):
if scraper is None:
scraper = utils.PyBikesScraper()
data = json.loads(scraper.request(self.feed_url))
# Each station is like follows
# If there's no bikeId in bikeDocks object, it means dock is free
# Status seem mostly ignored by website, so let's not make assumptions
# on that.
# {
# "stationId":"01",
# "stationName":"foo bar",
# "location":"Chamchuri Square",
# "lat":"13.73345498316396",
# "lng":"100.52908658981323",
# "status":"1",
# "bikeDockCount":"8",
# "bikeDocks":[
# {"dockId":"9","bikeId":"0000A24C20C4","status":"1"},
# {"dockId":"10","bikeId":"0000E2CF1FC4","status":"1"},
# {"dockId":"11","bikeId":"000052B71FC4","status":"1"},
# {"dockId":"12","bikeId":"","status":"1"}
# ...
# ]
# }
stations = []
for item in data['stations']:
name = item['stationName']
latitude = float(item['lat'])
longitude = float(item['lng'])
total_slots = int(item['bikeDockCount'])
bike_uids = [b['bikeId'] for b in item['bikeDocks'] if b['bikeId']]
bikes = len(bike_uids)
free = total_slots - bikes
extra = {
'slots': total_slots,
'address': item['location'],
'uid': item['stationId'],
'bike_uids': bike_uids,
}
station = BikeShareStation(name, latitude, longitude, bikes, free,
extra)
stations.append(station)
self.stations = stations
|
lgpl-3.0
|
ashwyn/eden-message_parser
|
models/zzz_1st_run.py
|
1
|
16655
|
# -*- coding: utf-8 -*-
# 1st-run initialisation
# Set settings.base.prepopulate to 0 in Production
# (to save 1x DAL hit every page).
pop_list = settings.get_base_prepopulate()
if pop_list == 0:
pop_list = []
else:
table = db[auth.settings.table_group_name]
# The query used here takes 2/3 the time of .count().
if db(table.id > 0).select(table.id, limitby=(0, 1)).first():
pop_list = []
if not isinstance(pop_list, (list, tuple)):
pop_list = [pop_list]
if len(pop_list) > 0:
# =========================================================================
# Populate default roles and permissions
#
# Allow debug
import sys
# Shortcuts
acl = auth.permission
sysroles = auth.S3_SYSTEM_ROLES
create_role = auth.s3_create_role
update_acls = auth.s3_update_acls
default_oacl = acl.READ|acl.UPDATE
# Do not remove or change order of these 5 definitions (System Roles):
create_role("Administrator",
"System Administrator - can access & make changes to any data",
uid=sysroles.ADMIN,
system=True, protected=True)
authenticated = create_role("Authenticated",
"Authenticated - all logged-in users",
# Authenticated users can see the Map
dict(c="gis", uacl=acl.ALL, oacl=acl.ALL),
# Note the owning role for locations is set to Authenticated
# by default, so this controls the access that logged in
# users have. (In general, tables do not have a default
# owning role.)
dict(c="gis", f="location", uacl=acl.READ|acl.CREATE, oacl=acl.ALL),
# Authenticated users can only see/edit their own PR records
dict(c="pr", uacl=acl.NONE, oacl=acl.READ|acl.UPDATE),
dict(t="pr_person", uacl=acl.NONE, oacl=acl.READ|acl.UPDATE),
# But need to be able to add/edit addresses
dict(c="pr", f="person", uacl=acl.CREATE, oacl=acl.READ|acl.UPDATE),
# And access the Autocompletes
dict(c="pr", f="person_search", uacl=acl.READ),
dict(c="pr", f="pentity", uacl=acl.READ),
dict(c="msg", f="search", uacl=acl.READ),
# Authenticated users can see the Supply Catalogue
dict(c="supply", uacl=acl.READ|acl.CREATE, oacl=default_oacl),
# HRM access is controlled to just HR Staff, except for:
# Access to your own record
# - requires security policy 4+
dict(c="hrm", uacl=acl.NONE, oacl=acl.READ|acl.UPDATE),
dict(c="hrm", f="staff", uacl=acl.NONE, oacl=acl.NONE),
dict(c="hrm", f="volunteer", uacl=acl.NONE, oacl=acl.NONE),
dict(c="hrm", f="person", uacl=acl.NONE, oacl=acl.READ|acl.UPDATE),
uid=sysroles.AUTHENTICATED,
protected=True)
create_role("Anonymous",
"Unauthenticated users",
# Allow unauthenticated users to view the list of organisations
# so they can select an organisation when registering
dict(t="org_organisation", uacl=acl.READ, entity="any"),
uid=sysroles.ANONYMOUS,
protected=True)
# Primarily for Security Policy 2
create_role("Editor",
"Editor - can access & make changes to any unprotected data",
uid=sysroles.EDITOR,
system=True, protected=True)
# MapAdmin
map_admin = create_role("MapAdmin",
"MapAdmin - allowed access to edit the MapService Catalogue",
dict(c="gis", uacl=acl.ALL, oacl=acl.ALL),
dict(c="gis", f="location", uacl=acl.ALL, oacl=acl.ALL),
uid=sysroles.MAP_ADMIN,
system=True, protected=True)
# OrgAdmin (policies 6, 7 and 8)
create_role("OrgAdmin",
"OrgAdmin - allowed to manage user roles for entity realms",
uid=sysroles.ORG_ADMIN,
system=True, protected=True)
# Enable shortcuts (needed by default.py)
system_roles = auth.get_system_roles()
ADMIN = system_roles.ADMIN
AUTHENTICATED = system_roles.AUTHENTICATED
ANONYMOUS = system_roles.ANONYMOUS
EDITOR = system_roles.EDITOR
MAP_ADMIN = system_roles.MAP_ADMIN
ORG_ADMIN = system_roles.ORG_ADMIN
# =========================================================================
# Configure Scheduled Tasks
#
if settings.has_module("msg"):
# Send Messages from Outbox
# SMS every minute
s3task.schedule_task("msg_process_outbox",
vars={"contact_method":"SMS"},
period=120, # seconds
timeout=120, # seconds
repeats=0 # unlimited
)
# Emails every 5 minutes
s3task.schedule_task("msg_process_outbox",
vars={"contact_method":"EMAIL"},
period=300, # seconds
timeout=300, # seconds
repeats=0 # unlimited
)
# Daily maintenance
s3task.schedule_task("maintenance",
vars={"period":"daily"},
period=86400, # seconds, so 1/day
timeout=600, # seconds
repeats=0 # unlimited
)
# =========================================================================
# Import PrePopulate data
#
# Override authorization
auth.override = True
# Load all Models to ensure all DB tables present
s3db.load_all_models()
if settings.get_auth_opt_in_to_email():
table = db.pr_group
for team in settings.get_auth_opt_in_team_list():
table.insert(name = team, group_type = 5)
# Synchronisation
db.sync_config.insert() # Defaults are fine
# Person Registry
tablename = "pr_person"
# Add extra indexes on search fields
# Should work for our 3 supported databases: sqlite, MySQL & PostgreSQL
field = "first_name"
db.executesql("CREATE INDEX %s__idx on %s(%s);" % (field, tablename, field))
field = "middle_name"
db.executesql("CREATE INDEX %s__idx on %s(%s);" % (field, tablename, field))
field = "last_name"
db.executesql("CREATE INDEX %s__idx on %s(%s);" % (field, tablename, field))
# GIS
# L0 Countries
resource = s3mgr.define_resource("gis", "location")
stylesheet = os.path.join(request.folder, "static", "formats", "s3csv", "gis", "location.xsl")
import_file = os.path.join(request.folder, "private", "templates", "locations", "countries.csv")
File = open(import_file, "r")
resource.import_xml(File, format="csv", stylesheet=stylesheet)
db(db.gis_location.level == "L0").update(owned_by_group=map_admin)
db.commit()
# Add extra index on search field
# Should work for our 3 supported databases: sqlite, MySQL & PostgreSQL
tablename = "gis_location"
field = "name"
db.executesql("CREATE INDEX %s__idx on %s(%s);" % (field, tablename, field))
# Messaging Module
if settings.has_module("msg"):
# To read inbound email, set username (email address), password, etc.
# here. Insert multiple records for multiple email sources.
db.msg_inbound_email_settings.insert(server = "imap.gmail.com",
protocol = "imap",
use_ssl = True,
port = 993,
username = "example-username",
password = "password",
delete_from_server = False
)
# Need entries for the Settings/1/Update URLs to work
db.msg_setting.insert( outgoing_sms_handler = "WEB_API" )
db.msg_modem_settings.insert( modem_baud = 115200 )
db.msg_api_settings.insert( to_variable = "to" )
db.msg_smtp_to_sms_settings.insert( address="changeme" )
db.msg_tropo_settings.insert( token_messaging = "" )
db.msg_twitter_settings.insert( pin = "" )
# Budget Module
if settings.has_module("budget"):
db.budget_parameter.insert() # Defaults are fine
# Climate Module
if settings.has_module("climate"):
s3db.climate_first_run()
# Incident Reporting System
if settings.has_module("irs"):
# Categories visible to ends-users by default
table = db.irs_icategory
table.insert(code = "flood")
table.insert(code = "geophysical.landslide")
table.insert(code = "roadway.bridgeClosure")
table.insert(code = "roadway.roadwayClosure")
table.insert(code = "other.buildingCollapsed")
table.insert(code = "other.peopleTrapped")
table.insert(code = "other.powerFailure")
# Supply Module
if settings.has_module("supply"):
db.supply_catalog.insert(name = settings.get_supply_catalog_default() )
# Ensure DB population committed when running through shell
db.commit()
# =========================================================================
# PrePopulate import (from CSV)
#
# Create the bulk Importer object
bi = s3base.S3BulkImporter()
s3.import_role = bi.import_role
# Disable table protection
protected = s3mgr.PROTECTED
s3mgr.PROTECTED = []
# Additional settings for user table imports:
s3db.configure("auth_user",
onaccept = lambda form: \
auth.s3_link_to_person(user=form.vars))
s3db.add_component("auth_membership", auth_user="user_id")
# Allow population via shell scripts
if not request.env.request_method:
request.env.request_method = "GET"
_debug = settings.get_base_debug()
grandTotalStart = datetime.datetime.now()
for pop_setting in pop_list:
start = datetime.datetime.now()
bi.clear_tasks()
# Import data specific to the prepopulate setting
if isinstance(pop_setting, str):
path = os.path.join(request.folder,
"private",
"templates",
pop_setting)
if os.path.exists(path):
bi.perform_tasks(path)
else:
print >> sys.stderr, "Unable to install data %s no valid directory found" % pop_setting
elif pop_setting == 1:
# Populate with the default data
path = os.path.join(request.folder,
"private",
"templates",
"default")
bi.perform_tasks(path)
elif pop_setting == 2:
# Populate data for the regression tests
path = os.path.join(request.folder,
"private",
"templates",
"regression")
bi.perform_tasks(path)
print >> sys.stdout, "Installed Regression Test Data"
elif pop_setting == 3:
# Populate data for scalability testing
# This is different from the repeatable imports that use csv files
# This will generate millions of records of data for selected tables.
# Code needs to go here to generate a large volume of test data
pass
elif pop_setting == 4:
# Populate data for the user roles
path = os.path.join(request.folder,
"private",
"templates",
"roles")
bi.perform_tasks(path)
end = datetime.datetime.now()
duration = end - start
print >> sys.stdout, "Installed Authorisation Roles completed in %s" % \
(duration)
elif pop_setting == 10:
# Populate data for user specific data
path = os.path.join(request.folder,
"private",
"templates",
"user")
bi.perform_tasks(path)
end = datetime.datetime.now()
duration = end - start
print >> sys.stdout, "Installed Private User Data completed in %s" % \
(duration)
elif pop_setting >= 20:
# Populate data for a template
# Read the folders.cfg file and extract the folder for the specific template
file = os.path.join(request.folder,
"private",
"templates",
"folders.cfg")
source = open(file, "r")
values = source.readlines()
source.close()
template = ""
for templates in values:
# strip out the new line
templates = templates.strip()
if templates == "":
continue
# split at the comma
details = templates.split(",")
if len(details) == 2:
# remove any spaces and enclosing double quote
index = details[0].strip('" ')
if int(index) == pop_setting:
directory = details[1].strip('" ')
path = os.path.join(request.folder,
"private",
"templates",
directory)
template = directory
if os.path.exists(path):
bi.perform_tasks(path)
else:
print >> sys.stderr, "Unable to install template %s no template directory found" \
% index
if template == "":
print >> sys.stderr, "Unable to install a template with of an id '%s', please check 000_config and folders.cfg" \
% pop_setting
else:
end = datetime.datetime.now()
duration = end - start
try:
# Python-2.7
duration = '{:.2f}'.format(duration.total_seconds()/60)
print >> sys.stdout, "Installed template '%s' completed in %s mins" % \
(template, duration)
except AttributeError:
# older Python
print >> sys.stdout, "Installed template '%s' completed in %s" % \
(template, duration)
grandTotalEnd = datetime.datetime.now()
duration = grandTotalEnd - grandTotalStart
try:
# Python-2.7
duration = '{:.2f}'.format(duration.total_seconds()/60)
print >> sys.stdout, "Pre-populate completed in %s mins" % duration
except AttributeError:
# older Python
print >> sys.stdout, "Pre-populate completed in %s" % duration
bi.resultList = []
grandTotalEnd = datetime.datetime.now()
duration = grandTotalEnd - grandTotalStart
print >> sys.stdout, "Pre-populate completed in %s" % (duration)
for errorLine in bi.errorList:
print >> sys.stderr, errorLine
# Restore table protection
s3mgr.PROTECTED = protected
# Restore Auth
auth.override = False
# Restore view
response.view = "default/index.html"
# END =========================================================================
|
mit
|
dendisuhubdy/tensorflow
|
tensorflow/contrib/learn/python/learn/datasets/text_datasets.py
|
42
|
3051
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Text datasets (deprecated).
This module and all its submodules are deprecated. See
[contrib/learn/README.md](https://www.tensorflow.org/code/tensorflow/contrib/learn/README.md)
for migration instructions.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import tarfile
import numpy as np
from tensorflow.contrib.learn.python.learn.datasets import base
from tensorflow.python.platform import gfile
from tensorflow.python.util.deprecation import deprecated
DBPEDIA_URL = 'https://github.com/le-scientifique/torchDatasets/raw/master/dbpedia_csv.tar.gz'
@deprecated(None, 'See contrib/learn/README.md')
def maybe_download_dbpedia(data_dir):
"""Download if DBpedia data is not present."""
train_path = os.path.join(data_dir, 'dbpedia_csv/train.csv')
test_path = os.path.join(data_dir, 'dbpedia_csv/test.csv')
if not (gfile.Exists(train_path) and gfile.Exists(test_path)):
archive_path = base.maybe_download(
'dbpedia_csv.tar.gz', data_dir, DBPEDIA_URL)
tfile = tarfile.open(archive_path, 'r:*')
tfile.extractall(data_dir)
@deprecated(None, 'See contrib/learn/README.md')
def load_dbpedia(size='small', test_with_fake_data=False):
"""Get DBpedia datasets from CSV files."""
if not test_with_fake_data:
data_dir = os.path.join(os.getenv('TF_EXP_BASE_DIR', ''), 'dbpedia_data')
maybe_download_dbpedia(data_dir)
train_path = os.path.join(data_dir, 'dbpedia_csv', 'train.csv')
test_path = os.path.join(data_dir, 'dbpedia_csv', 'test.csv')
if size == 'small':
# Reduce the size of original data by a factor of 1000.
base.shrink_csv(train_path, 1000)
base.shrink_csv(test_path, 1000)
train_path = train_path.replace('train.csv', 'train_small.csv')
test_path = test_path.replace('test.csv', 'test_small.csv')
else:
module_path = os.path.dirname(__file__)
train_path = os.path.join(module_path, 'data', 'text_train.csv')
test_path = os.path.join(module_path, 'data', 'text_test.csv')
train = base.load_csv_without_header(
train_path, target_dtype=np.int32, features_dtype=np.str, target_column=0)
test = base.load_csv_without_header(
test_path, target_dtype=np.int32, features_dtype=np.str, target_column=0)
return base.Datasets(train=train, validation=None, test=test)
|
apache-2.0
|
Ceciliae/gourmet
|
gourmet/plugins/listsaver/shoppingSaverPlugin.py
|
6
|
2280
|
from gourmet.plugin import ShoppingListPlugin
import gtk
import gourmet.recipeManager, gourmet.GourmetRecipeManager, time
from gettext import gettext as _
class ShoppingListSaver (ShoppingListPlugin):
ui_string = '''<ui>
<menubar name="ShoppingListMenuBar">
<menu name="File" action="File">
<placeholder name="ExtraFileStuff">
<menuitem action="SaveAsRecipe"/>
</placeholder>
</menu>
</menubar>
<toolbar name="ShoppingListTopToolBar">
<separator/>
<toolitem action="SaveAsRecipe"/>
</toolbar>
</ui>
'''
name = 'shopping_list_saver'
label = _('Shopping List Saver')
def setup_action_groups (self):
self.shoppingListSaverActionGroup = gtk.ActionGroup('ShoppingListSaverActionGroup')
self.shoppingListSaverActionGroup.add_actions([
('SaveAsRecipe',# name
gtk.STOCK_SAVE_AS,# stock
_('Save List as Recipe'), # text
_('<Ctrl><Shift>S'), # key-command
_('Save current shopping list as a recipe for future use'), # tooltip
self.save_as_recipe# callback
),
])
self.action_groups.append(self.shoppingListSaverActionGroup)
def save_as_recipe (self, *args):
sg = self.pluggable
rr = sg.recs
rd = gourmet.recipeManager.get_recipe_manager()
rg = gourmet.GourmetRecipeManager.get_application()
#print rr
rec = rd.add_rec(dict(title=_('Menu for %s (%s)')%(time.strftime('%x'),
time.strftime('%X')),
category=_('Menu')))
for recipe,mult in rr.values():
# Add all recipes...
rd.add_ing({
'amount':mult,
'unit':'Recipe',
'refid':recipe.id,
'recipe_id':rec.id,
'item':recipe.title,
})
for amt,unit,item in sg.extras:
# Add all extras...
rd.add_ing({
'amount':amt,
'unit':unit,
'item':item,
'ingkey':item,
})
rg.open_rec_card(rec)
|
gpl-2.0
|
eywalker/datajoint-python
|
datajoint/declare.py
|
1
|
21577
|
"""
This module hosts functions to convert DataJoint table definitions into mysql table definitions, and to
declare the corresponding mysql tables.
"""
import re
import pyparsing as pp
import logging
from .errors import DataJointError, _support_filepath_types, FILEPATH_FEATURE_SWITCH
from .attribute_adapter import get_adapter
from .utils import OrderedDict
UUID_DATA_TYPE = 'binary(16)'
MAX_TABLE_NAME_LENGTH = 64
CONSTANT_LITERALS = {'CURRENT_TIMESTAMP'} # SQL literals to be used without quotes (case insensitive)
EXTERNAL_TABLE_ROOT = '~external'
TYPE_PATTERN = {k: re.compile(v, re.I) for k, v in dict(
INTEGER=r'((tiny|small|medium|big|)int|integer)(\s*\(.+\))?(\s+unsigned)?(\s+auto_increment)?|serial$',
DECIMAL=r'(decimal|numeric)(\s*\(.+\))?(\s+unsigned)?$',
FLOAT=r'(double|float|real)(\s*\(.+\))?(\s+unsigned)?$',
STRING=r'(var)?char\s*\(.+\)$',
ENUM=r'enum\s*\(.+\)$',
BOOL=r'bool(ean)?$', # aliased to tinyint(1)
TEMPORAL=r'(date|datetime|time|timestamp|year)(\s*\(.+\))?$',
INTERNAL_BLOB=r'(tiny|small|medium|long|)blob$',
EXTERNAL_BLOB=r'blob@(?P<store>[a-z]\w*)$',
INTERNAL_ATTACH=r'attach$',
EXTERNAL_ATTACH=r'attach@(?P<store>[a-z]\w*)$',
FILEPATH=r'filepath@(?P<store>[a-z]\w*)$',
UUID=r'uuid$',
ADAPTED=r'<.+>$'
).items()}
# custom types are stored in attribute comment
SPECIAL_TYPES = {'UUID', 'INTERNAL_ATTACH', 'EXTERNAL_ATTACH', 'EXTERNAL_BLOB', 'FILEPATH', 'ADAPTED'}
NATIVE_TYPES = set(TYPE_PATTERN) - SPECIAL_TYPES
EXTERNAL_TYPES = {'EXTERNAL_ATTACH', 'EXTERNAL_BLOB', 'FILEPATH'} # data referenced by a UUID in external tables
SERIALIZED_TYPES = {'EXTERNAL_ATTACH', 'INTERNAL_ATTACH', 'EXTERNAL_BLOB', 'INTERNAL_BLOB'} # requires packing data
assert set().union(SPECIAL_TYPES, EXTERNAL_TYPES, SERIALIZED_TYPES) <= set(TYPE_PATTERN)
def match_type(attribute_type):
try:
return next(category for category, pattern in TYPE_PATTERN.items() if pattern.match(attribute_type))
except StopIteration:
raise DataJointError("Unsupported attribute type {type}".format(type=attribute_type)) from None
logger = logging.getLogger(__name__)
def build_foreign_key_parser_old():
# old-style foreign key parser. Superceded by expression-based syntax. See issue #436
# This will be deprecated in a future release.
left = pp.Literal('(').suppress()
right = pp.Literal(')').suppress()
attribute_name = pp.Word(pp.srange('[a-z]'), pp.srange('[a-z0-9_]'))
new_attrs = pp.Optional(left + pp.delimitedList(attribute_name) + right).setResultsName('new_attrs')
arrow = pp.Literal('->').suppress()
lbracket = pp.Literal('[').suppress()
rbracket = pp.Literal(']').suppress()
option = pp.Word(pp.srange('[a-zA-Z]'))
options = pp.Optional(lbracket + pp.delimitedList(option) + rbracket).setResultsName('options')
ref_table = pp.Word(pp.alphas, pp.alphanums + '._').setResultsName('ref_table')
ref_attrs = pp.Optional(left + pp.delimitedList(attribute_name) + right).setResultsName('ref_attrs')
return new_attrs + arrow + options + ref_table + ref_attrs
def build_foreign_key_parser():
arrow = pp.Literal('->').suppress()
lbracket = pp.Literal('[').suppress()
rbracket = pp.Literal(']').suppress()
option = pp.Word(pp.srange('[a-zA-Z]'))
options = pp.Optional(lbracket + pp.delimitedList(option) + rbracket).setResultsName('options')
ref_table = pp.restOfLine.setResultsName('ref_table')
return arrow + options + ref_table
def build_attribute_parser():
quoted = pp.QuotedString('"') ^ pp.QuotedString("'")
colon = pp.Literal(':').suppress()
attribute_name = pp.Word(pp.srange('[a-z]'), pp.srange('[a-z0-9_]')).setResultsName('name')
data_type = (pp.Combine(pp.Word(pp.alphas) + pp.SkipTo("#", ignore=quoted))
^ pp.QuotedString('<', endQuoteChar='>', unquoteResults=False)).setResultsName('type')
default = pp.Literal('=').suppress() + pp.SkipTo(colon, ignore=quoted).setResultsName('default')
comment = pp.Literal('#').suppress() + pp.restOfLine.setResultsName('comment')
return attribute_name + pp.Optional(default) + colon + data_type + comment
def build_index_parser():
left = pp.Literal('(').suppress()
right = pp.Literal(')').suppress()
unique = pp.Optional(pp.CaselessKeyword('unique')).setResultsName('unique')
index = pp.CaselessKeyword('index').suppress()
attribute_name = pp.Word(pp.srange('[a-z]'), pp.srange('[a-z0-9_]'))
return unique + index + left + pp.delimitedList(attribute_name).setResultsName('attr_list') + right
foreign_key_parser_old = build_foreign_key_parser_old()
foreign_key_parser = build_foreign_key_parser()
attribute_parser = build_attribute_parser()
index_parser = build_index_parser()
def is_foreign_key(line):
"""
:param line: a line from the table definition
:return: true if the line appears to be a foreign key definition
"""
arrow_position = line.find('->')
return arrow_position >= 0 and not any(c in line[:arrow_position] for c in '"#\'')
def compile_foreign_key(line, context, attributes, primary_key, attr_sql, foreign_key_sql, index_sql):
"""
:param line: a line from a table definition
:param context: namespace containing referenced objects
:param attributes: list of attribute names already in the declaration -- to be updated by this function
:param primary_key: None if the current foreign key is made from the dependent section. Otherwise it is the list
of primary key attributes thus far -- to be updated by the function
:param attr_sql: list of sql statements defining attributes -- to be updated by this function.
:param foreign_key_sql: list of sql statements specifying foreign key constraints -- to be updated by this function.
:param index_sql: list of INDEX declaration statements, duplicate or redundant indexes are ok.
"""
# Parse and validate
from .table import Table
from .expression import Projection
obsolete = False # See issue #436. Old style to be deprecated in a future release
try:
result = foreign_key_parser.parseString(line)
except pp.ParseException:
try:
result = foreign_key_parser_old.parseString(line)
except pp.ParseBaseException as err:
raise DataJointError('Parsing error in line "%s". %s.' % (line, err)) from None
else:
obsolete = True
try:
ref = eval(result.ref_table, context)
except NameError if obsolete else Exception:
raise DataJointError('Foreign key reference %s could not be resolved' % result.ref_table)
options = [opt.upper() for opt in result.options]
for opt in options: # check for invalid options
if opt not in {'NULLABLE', 'UNIQUE'}:
raise DataJointError('Invalid foreign key option "{opt}"'.format(opt=opt))
is_nullable = 'NULLABLE' in options
is_unique = 'UNIQUE' in options
if is_nullable and primary_key is not None:
raise DataJointError('Primary dependencies cannot be nullable in line "{line}"'.format(line=line))
if obsolete:
if not isinstance(ref, type) or not issubclass(ref, Table):
raise DataJointError('Foreign key reference %r must be a valid query' % result.ref_table)
if isinstance(ref, type) and issubclass(ref, Table):
ref = ref()
# check that dependency is of supported type
if (not isinstance(ref, (Table, Projection)) or len(ref.restriction) or
(isinstance(ref, Projection) and (not isinstance(ref._arg, Table) or len(ref._arg.restriction)))):
raise DataJointError('Dependency "%s" is not supported (yet). Use a base table or its projection.' %
result.ref_table)
if obsolete:
# for backward compatibility with old-style dependency declarations. See issue #436
if not isinstance(ref, Table):
DataJointError('Dependency "%s" is not supported. Check documentation.' % result.ref_table)
if not all(r in ref.primary_key for r in result.ref_attrs):
raise DataJointError('Invalid foreign key attributes in "%s"' % line)
try:
raise DataJointError('Duplicate attributes "{attr}" in "{line}"'.format(
attr=next(attr for attr in result.new_attrs if attr in attributes), line=line))
except StopIteration:
pass # the normal outcome
# Match the primary attributes of the referenced table to local attributes
new_attrs = list(result.new_attrs)
ref_attrs = list(result.ref_attrs)
# special case, the renamed attribute is implicit
if new_attrs and not ref_attrs:
if len(new_attrs) != 1:
raise DataJointError('Renamed foreign key must be mapped to the primary key in "%s"' % line)
if len(ref.primary_key) == 1:
# if the primary key has one attribute, allow implicit renaming
ref_attrs = ref.primary_key
else:
# if only one primary key attribute remains, then allow implicit renaming
ref_attrs = [attr for attr in ref.primary_key if attr not in attributes]
if len(ref_attrs) != 1:
raise DataJointError('Could not resolve which primary key attribute should be referenced in "%s"' % line)
if len(new_attrs) != len(ref_attrs):
raise DataJointError('Mismatched attributes in foreign key "%s"' % line)
if ref_attrs:
# convert to projected dependency
ref = ref.proj(**dict(zip(new_attrs, ref_attrs)))
# declare new foreign key attributes
base = ref._arg if isinstance(ref, Projection) else ref # base reference table
for attr, ref_attr in zip(ref.primary_key, base.primary_key):
if attr not in attributes:
attributes.append(attr)
if primary_key is not None:
primary_key.append(attr)
attr_sql.append(
base.heading[ref_attr].sql.replace(ref_attr, attr, 1).replace('NOT NULL ', '', int(is_nullable)))
# declare the foreign key
foreign_key_sql.append(
'FOREIGN KEY (`{fk}`) REFERENCES {ref} (`{pk}`) ON UPDATE CASCADE ON DELETE RESTRICT'.format(
fk='`,`'.join(ref.primary_key),
pk='`,`'.join(base.primary_key),
ref=base.full_table_name))
# declare unique index
if is_unique:
index_sql.append('UNIQUE INDEX ({attrs})'.format(attrs=','.join("`%s`" % attr for attr in ref.primary_key)))
def prepare_declare(definition, context):
# split definition into lines
definition = re.split(r'\s*\n\s*', definition.strip())
# check for optional table comment
table_comment = definition.pop(0)[1:].strip() if definition[0].startswith('#') else ''
if table_comment.startswith(':'):
raise DataJointError('Table comment must not start with a colon ":"')
in_key = True # parse primary keys
primary_key = []
attributes = []
attribute_sql = []
foreign_key_sql = []
index_sql = []
external_stores = []
for line in definition:
if not line or line.startswith('#'): # ignore additional comments
pass
elif line.startswith('---') or line.startswith('___'):
in_key = False # start parsing dependent attributes
elif is_foreign_key(line):
compile_foreign_key(line, context, attributes,
primary_key if in_key else None,
attribute_sql, foreign_key_sql, index_sql)
elif re.match(r'^(unique\s+)?index[^:]*$', line, re.I): # index
compile_index(line, index_sql)
else:
name, sql, store = compile_attribute(line, in_key, foreign_key_sql, context)
if store:
external_stores.append(store)
if in_key and name not in primary_key:
primary_key.append(name)
if name not in attributes:
attributes.append(name)
attribute_sql.append(sql)
return table_comment, primary_key, attribute_sql, foreign_key_sql, index_sql, external_stores
def declare(full_table_name, definition, context):
"""
Parse declaration and generate the SQL CREATE TABLE code
:param full_table_name: full name of the table
:param definition: DataJoint table definition
:param context: dictionary of objects that might be referred to in the table
:return: SQL CREATE TABLE statement, list of external stores used
"""
table_name = full_table_name.strip('`').split('.')[1]
if len(table_name) > MAX_TABLE_NAME_LENGTH:
raise DataJointError(
'Table name `{name}` exceeds the max length of {max_length}'.format(
name=table_name,
max_length=MAX_TABLE_NAME_LENGTH))
table_comment, primary_key, attribute_sql, foreign_key_sql, index_sql, external_stores = prepare_declare(
definition, context)
if not primary_key:
raise DataJointError('Table must have a primary key')
return (
'CREATE TABLE IF NOT EXISTS %s (\n' % full_table_name +
',\n'.join(attribute_sql + ['PRIMARY KEY (`' + '`,`'.join(primary_key) + '`)'] + foreign_key_sql + index_sql) +
'\n) ENGINE=InnoDB, COMMENT "%s"' % table_comment), external_stores
def _make_attribute_alter(new, old, primary_key):
"""
:param new: new attribute declarations
:param old: old attribute declarations
:param primary_key: primary key attributes
:return: list of SQL ALTER commands
"""
# parse attribute names
name_regexp = re.compile(r"^`(?P<name>\w+)`")
original_regexp = re.compile(r'COMMENT "{\s*(?P<name>\w+)\s*}')
matched = ((name_regexp.match(d), original_regexp.search(d)) for d in new)
new_names = OrderedDict((d.group('name'), n and n.group('name')) for d, n in matched)
old_names = [name_regexp.search(d).group('name') for d in old]
# verify that original names are only used once
renamed = set()
for v in new_names.values():
if v:
if v in renamed:
raise DataJointError('Alter attempted to rename attribute {%s} twice.' % v)
renamed.add(v)
# verify that all renamed attributes existed in the old definition
try:
raise DataJointError(
"Attribute {} does not exist in the original definition".format(
next(attr for attr in renamed if attr not in old_names)))
except StopIteration:
pass
# dropping attributes
to_drop = [n for n in old_names if n not in renamed and n not in new_names]
sql = ['DROP `%s`' % n for n in to_drop]
old_names = [name for name in old_names if name not in to_drop]
# add or change attributes in order
prev = None
for new_def, (new_name, old_name) in zip(new, new_names.items()):
if new_name not in primary_key:
after = None # if None, then must include the AFTER clause
if prev:
try:
idx = old_names.index(old_name or new_name)
except ValueError:
after = prev[0]
else:
if idx >= 1 and old_names[idx - 1] != (prev[1] or prev[0]):
after = prev[0]
if new_def not in old or after:
sql.append('{command} {new_def} {after}'.format(
command=("ADD" if (old_name or new_name) not in old_names else
"MODIFY" if not old_name else
"CHANGE `%s`" % old_name),
new_def=new_def,
after="" if after is None else "AFTER `%s`" % after))
prev = new_name, old_name
return sql
def alter(definition, old_definition, context):
"""
:param definition: new table definition
:param old_definition: current table definition
:param context: the context in which to evaluate foreign key definitions
:return: string SQL ALTER command, list of new stores used for external storage
"""
table_comment, primary_key, attribute_sql, foreign_key_sql, index_sql, external_stores = prepare_declare(
definition, context)
table_comment_, primary_key_, attribute_sql_, foreign_key_sql_, index_sql_, external_stores_ = prepare_declare(
old_definition, context)
# analyze differences between declarations
sql = list()
if primary_key != primary_key_:
raise NotImplementedError('table.alter cannot alter the primary key (yet).')
if foreign_key_sql != foreign_key_sql_:
raise NotImplementedError('table.alter cannot alter foreign keys (yet).')
if index_sql != index_sql_:
raise NotImplementedError('table.alter cannot alter indexes (yet)')
if attribute_sql != attribute_sql_:
sql.extend(_make_attribute_alter(attribute_sql, attribute_sql_, primary_key))
if table_comment != table_comment_:
sql.append('COMMENT="%s"' % table_comment)
return sql, [e for e in external_stores if e not in external_stores_]
def compile_index(line, index_sql):
match = index_parser.parseString(line)
index_sql.append('{unique} index ({attrs})'.format(
unique=match.unique,
attrs=','.join('`%s`' % a for a in match.attr_list)))
def substitute_special_type(match, category, foreign_key_sql, context):
"""
:param match: dict containing with keys "type" and "comment" -- will be modified in place
:param category: attribute type category from TYPE_PATTERN
:param foreign_key_sql: list of foreign key declarations to add to
:param context: context for looking up user-defined attribute_type adapters
"""
if category == 'UUID':
match['type'] = UUID_DATA_TYPE
elif category == 'INTERNAL_ATTACH':
match['type'] = 'LONGBLOB'
elif category in EXTERNAL_TYPES:
if category == 'FILEPATH' and not _support_filepath_types():
raise DataJointError("""
The filepath data type is disabled until complete validation.
To turn it on as experimental feature, set the environment variable
{env} = TRUE or upgrade datajoint.
""".format(env=FILEPATH_FEATURE_SWITCH))
match['store'] = match['type'].split('@', 1)[1]
match['type'] = UUID_DATA_TYPE
foreign_key_sql.append(
"FOREIGN KEY (`{name}`) REFERENCES `{{database}}`.`{external_table_root}_{store}` (`hash`) "
"ON UPDATE RESTRICT ON DELETE RESTRICT".format(external_table_root=EXTERNAL_TABLE_ROOT, **match))
elif category == 'ADAPTED':
adapter = get_adapter(context, match['type'])
match['type'] = adapter.attribute_type
category = match_type(match['type'])
if category in SPECIAL_TYPES:
# recursive redefinition from user-defined datatypes.
substitute_special_type(match, category, foreign_key_sql, context)
else:
assert False, 'Unknown special type'
def compile_attribute(line, in_key, foreign_key_sql, context):
"""
Convert attribute definition from DataJoint format to SQL
:param line: attribution line
:param in_key: set to True if attribute is in primary key set
:param foreign_key_sql: the list of foreign key declarations to add to
:param context: context in which to look up user-defined attribute type adapterss
:returns: (name, sql, is_external) -- attribute name and sql code for its declaration
"""
try:
match = attribute_parser.parseString(line + '#', parseAll=True)
except pp.ParseException as err:
raise DataJointError('Declaration error in position {pos} in line:\n {line}\n{msg}'.format(
line=err.args[0], pos=err.args[1], msg=err.args[2])) from None
match['comment'] = match['comment'].rstrip('#')
if 'default' not in match:
match['default'] = ''
match = {k: v.strip() for k, v in match.items()}
match['nullable'] = match['default'].lower() == 'null'
if match['nullable']:
if in_key:
raise DataJointError('Primary key attributes cannot be nullable in line "%s"' % line)
match['default'] = 'DEFAULT NULL' # nullable attributes default to null
else:
if match['default']:
quote = (match['default'].split('(')[0].upper() not in CONSTANT_LITERALS
and match['default'][0] not in '"\'')
match['default'] = 'NOT NULL DEFAULT ' + ('"%s"' if quote else "%s") % match['default']
else:
match['default'] = 'NOT NULL'
match['comment'] = match['comment'].replace('"', '\\"') # escape double quotes in comment
if match['comment'].startswith(':'):
raise DataJointError('An attribute comment must not start with a colon in comment "{comment}"'.format(**match))
category = match_type(match['type'])
if category in SPECIAL_TYPES:
match['comment'] = ':{type}:{comment}'.format(**match) # insert custom type into comment
substitute_special_type(match, category, foreign_key_sql, context)
if category in SERIALIZED_TYPES and match['default'] not in {'DEFAULT NULL', 'NOT NULL'}:
raise DataJointError(
'The default value for a blob or attachment attributes can only be NULL in:\n{line}'.format(line=line))
sql = ('`{name}` {type} {default}' + (' COMMENT "{comment}"' if match['comment'] else '')).format(**match)
return match['name'], sql, match.get('store')
|
lgpl-2.1
|
Jionglun/w17test_2
|
static/Brython3.1.1-20150328-091302/Lib/ui/slider.py
|
603
|
2394
|
from . import widget
from browser import doc,html
class Slider(widget.Widget):
def __init__(self, id=None, label=False):
self._div_shell=html.DIV(Class="ui-slider ui-slider-horizontal ui-widget ui-widget-content ui-corner-all")
widget.Widget.__init__(self, self._div_shell, 'slider', id)
self._handle=html.A(Class="ui-slider-handle ui-state-default ui-corner-all",
Href='#', style={'left': '0px'})
self._value=0
self._isMouseDown=False
self.m0 = [None, None]
def startSlide(ev):
self._isMouseDown=True
self._upperBound = self._div_shell.offsetWidth - self._handle.offsetWidth
pos = widget.getMousePosition(ev)
self._startMouseX=pos['x']
print('left', self._handle.style.left,'ev.x',ev.x)
self._lastElementLeft = int(self._handle.left)
print('left', self._lastElementLeft)
updatePosition(ev)
def updatePosition(ev):
#pos = widget.getMousePosition(ev)
#print('mose pos',pos)
_newPos = self._lastElementLeft + ev.x - self._startMouseX
_newPos = max(0, _newPos)
_newPos = min(_newPos, self._upperBound)
self._handle.left = _newPos
print('new position',self._handle.style.left)
self._lastElementLeft = _newPos
def moving(e):
if self._isMouseDown:
updatePosition(e)
def dropCallback(e):
self._isMouseDown=False
self._handle.unbind('mousemove', moving)
self._handle.bind('mousemove', moving)
self._handle.bind('mouseup', dropCallback)
#self._handle.bind('mouseout', dropCallback)
self._handle.bind('mousedown', startSlide)
def mouseover(e):
_class=self._handle.getAttribute('class')
self._handle.setAttribute('class', '%s %s' % (_class, 'ui-state-hover'))
def mouseout(e):
self._isMouseDown=False
_class=self._handle.getAttribute('class')
self._handle.setAttribute('class', _class.replace('ui-state-hover', ''))
self._handle.bind('mouseover', mouseover)
self._handle.bind('mouseout', mouseout)
self._div_shell <= self._handle
def get_value(self):
return self._value
#def set_value(self, value):
# self._value=value
# self._handle.style.left='%spx' % value
|
gpl-3.0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.