repo_name
stringlengths 5
100
| path
stringlengths 4
375
| copies
stringclasses 991
values | size
stringlengths 4
7
| content
stringlengths 666
1M
| license
stringclasses 15
values |
---|---|---|---|---|---|
sznekol/django-cms | cms/tests/nonroot.py | 24 | 3211 | # -*- coding: utf-8 -*-
from __future__ import with_statement
from django.template import Template
from cms.api import create_page
from cms.models import Page
from cms.test_utils.testcases import CMSTestCase
from cms.templatetags.cms_admin import preview_link
from cms.utils.i18n import force_language
from menus.base import NavigationNode
class NonRootCase(CMSTestCase):
urls = 'cms.test_utils.project.nonroot_urls'
def setUp(self):
u = self._create_user("test", True, True)
with self.login_user_context(u):
self.create_some_pages()
def create_some_pages(self):
"""
Creates the following structure:
+ P1
| + P2
| + P3
+ P4
"""
self.page1 = create_page("page1", "nav_playground.html", "en",
published=True, in_navigation=True)
self.page2 = create_page("page2", "nav_playground.html", "en",
parent=self.page1, published=True, in_navigation=True)
self.page3 = create_page("page3", "nav_playground.html", "en",
parent=self.page2, published=True, in_navigation=True)
self.page4 = create_page("page4", "nav_playground.html", "en",
published=True, in_navigation=True)
self.all_pages = [self.page1, self.page2, self.page3, self.page4]
self.top_level_pages = [self.page1, self.page4]
self.level1_pages = [self.page2]
self.level2_pages = [self.page3]
def test_get_page_root(self):
self.assertEqual(self.get_pages_root(), '/en/content/')
def test_basic_cms_menu(self):
response = self.client.get(self.get_pages_root())
self.assertEqual(response.status_code, 200)
self.assertEqual(self.get_pages_root(), "/en/content/")
def test_show_menu(self):
context = self.get_context()
tpl = Template("{% load menu_tags %}{% show_menu %}")
tpl.render(context)
nodes = context['children']
self.assertEqual(nodes[0].get_absolute_url(), self.get_pages_root())
self.assertEqual(nodes[0].get_absolute_url(), "/en/content/")
def test_show_breadcrumb(self):
page2 = Page.objects.get(pk=self.page2.pk)
context = self.get_context(path=self.page2.get_absolute_url())
tpl = Template("{% load menu_tags %}{% show_breadcrumb %}")
tpl.render(context)
nodes = context['ancestors']
self.assertEqual(nodes[0].get_absolute_url(), self.get_pages_root())
self.assertEqual(nodes[0].get_absolute_url(), "/en/content/")
self.assertEqual(isinstance(nodes[0], NavigationNode), True)
self.assertEqual(nodes[1].get_absolute_url(), page2.get_absolute_url())
def test_form_multilingual_admin(self):
"""
Tests for correct form URL mangling in preview_link templatetag
"""
language = 'en'
with force_language("en"):
pages_root = self.get_pages_root()
link = preview_link(self.page2,language=language)
self.assertEqual(link,'%s%s/' % (pages_root,self.page2.get_slug()))
self.assertEqual(link,'/en/content/page2/')
| bsd-3-clause |
gilamsalem/pynfs | nfs4.0/servertests/st_setattr.py | 3 | 20543 | from nfs4_const import *
from environment import check, checklist, get_invalid_utf8strings
from nfs4lib import bitmap2list, dict2fattr
from nfs4_type import nfstime4, settime4
def _set_mode(t, c, file, stateid=None, msg=" using stateid=0",
warnlist=[]):
mode = 0740
dict = {FATTR4_MODE: mode}
ops = c.use_obj(file) + [c.setattr(dict, stateid)]
res = c.compound(ops)
check(res, msg="Setting mode to 0%o%s" % (mode, msg), warnlist=warnlist)
check_res(t, c, res, file, dict)
def _set_size(t, c, file, stateid=None, msg=" using stateid=0"):
startsize = c.do_getattr(FATTR4_SIZE, file)
newsize = startsize + 10
dict = {FATTR4_SIZE: newsize}
ops = c.use_obj(file) + [c.setattr(dict, stateid)]
res = c.compound(ops)
check(res, msg="Changing size from %i to %i%s" % (startsize, newsize, msg),
warnlist=[NFS4ERR_BAD_STATEID])
check_res(t, c, res, file, dict)
dict = {FATTR4_SIZE: 0}
ops = c.use_obj(file) + [c.setattr(dict, stateid)]
res = c.compound(ops)
check(res, msg="Changing size from %i to 0" % newsize)
check_res(t, c, res, file, dict)
def _try_readonly(t, env, path):
c = env.c1
baseops = c.use_obj(path)
supported = c.supportedAttrs(path)
attrlist = [attr for attr in env.attr_info if attr.readonly]
for attr in attrlist:
ops = baseops + [c.setattr({attr.bitnum: attr.sample})]
res = c.compound(ops)
if supported & attr.mask:
check(res, NFS4ERR_INVAL,
"SETATTR the supported read-only attribute %s" % attr.name)
else:
checklist(res, [NFS4ERR_INVAL, NFS4ERR_ATTRNOTSUPP],
"SETATTR the unsupported read-only attribute %s" % attr.name)
def _try_unsupported(t, env, path):
c = env.c1
baseops = c.use_obj(path)
supported = c.supportedAttrs(path)
attrlist = [ attr for attr in env.attr_info
if attr.writable and not supported & attr.mask ]
for attr in attrlist:
ops = baseops + [c.setattr({attr.bitnum: attr.sample})]
res = c.compound(ops)
check(res, NFS4ERR_ATTRNOTSUPP,
"SETATTR with unsupported attr %s" % attr.name)
def check_res(t, c, res, file, dict):
modified = bitmap2list(res.resarray[-1].attrsset)
for attr in modified:
if attr not in dict:
t.fail("attrsset contained %s, which was not requested" %
get_bitnumattr_dict()[attr])
newdict = c.do_getattrdict(file, dict.keys())
if newdict != dict:
t.fail("Set attrs %s not equal to got attrs %s" % (dict, newdict))
########################################
def testMode(t, env):
"""See if FATTR4_MODE is supported
FLAGS: all
CODE: MODE
"""
if not FATTR4_MODE & env.c1.supportedAttrs():
t.fail_support("Server does not support FATTR4_MODE")
def testFile(t, env):
"""SETATTR(FATTR4_MODE) on regular file
FLAGS: setattr file all
DEPEND: MODE MKFILE
CODE: SATT1r
"""
c = env.c1
c.init_connection()
fh, stateid = c.create_confirm(t.code)
_set_mode(t, c, fh)
def testDir(t, env):
"""SETATTR(FATTR4_MODE) on directory
FLAGS: setattr dir all
DEPEND: MODE MKDIR
CODE: SATT1d
"""
c = env.c1
path = c.homedir + [t.code]
res = c.create_obj(path)
_set_mode(t, c, path)
def testLink(t, env):
"""SETATTR(FATTR4_MODE) on symlink
FLAGS:
DEPEND: MODE MKLINK
CODE: SATT1a
"""
c = env.c1
path = c.homedir + [t.code]
res = c.create_obj(path, NF4LNK)
_set_mode(t, c, path)
def testBlock(t, env):
"""SETATTR(FATTR4_MODE) on block device
FLAGS: setattr block all
DEPEND: MODE MKBLK
CODE: SATT1b
"""
c = env.c1
path = c.homedir + [t.code]
res = c.create_obj(path, NF4BLK)
_set_mode(t, c, path)
def testChar(t, env):
"""SETATTR(FATTR4_MODE) on character device
FLAGS: setattr char all
DEPEND: MODE MKCHAR
CODE: SATT1c
"""
c = env.c1
path = c.homedir + [t.code]
res = c.create_obj(path, NF4CHR)
_set_mode(t, c, path)
def testFifo(t, env):
"""SETATTR(FATTR4_MODE) on fifo
FLAGS: setattr fifo all
DEPEND: MODE MKFIFO
CODE: SATT1f
"""
c = env.c1
path = c.homedir + [t.code]
res = c.create_obj(path, NF4FIFO)
_set_mode(t, c, path)
def testSocket(t, env):
"""SETATTR(FATTR4_MODE) on socket
FLAGS: setattr socketall
DEPEND: MODE MKSOCK
CODE: SATT1s
"""
c = env.c1
path = c.homedir + [t.code]
res = c.create_obj(path, NF4SOCK)
_set_mode(t, c, path)
def testUselessStateid1(t, env):
"""SETATTR(FATTR4_MODE) on file with stateid = ones
FLAGS: setattr file all
DEPEND: MODE MKFILE
CODE: SATT2a
"""
c = env.c1
c.init_connection()
fh, stateid = c.create_confirm(t.code)
_set_mode(t, c, fh, env.stateid1, " using stateid=1")
def testUselessStateid2(t, env):
"""SETATTR(FATTR4_MODE) on file with openstateid
FLAGS: setattr file all
DEPEND: MODE MKFILE
CODE: SATT2b
"""
c = env.c1
c.init_connection()
fh, stateid = c.create_confirm(t.code)
_set_mode(t, c, fh, stateid, " using openstateid")
def testUselessStateid3(t, env):
"""SETATTR(FATTR4_MODE) on file with different file's openstateid
FLAGS: setattr file all
DEPEND: MODE MKFILE MKDIR
CODE: SATT2c
"""
c = env.c1
c.init_connection()
c.maketree([t.code, 'file'])
path = c.homedir + [t.code, t.code]
fh, stateid = c.create_confirm(t.code, path)
_set_mode(t, c, c.homedir + [t.code, 'file'], stateid,
" using bad openstateid", [NFS4ERR_BAD_STATEID])
# FRED - redo first 2 tests with _DENY_WRITE
def testResizeFile0(t, env):
"""SETATTR(FATTR4_SIZE) on file with stateid = 0
FLAGS: setattr file all
DEPEND: MKFILE
CODE: SATT3a
"""
c = env.c1
c.init_connection()
fh, stateid = c.create_confirm(t.code, deny=OPEN4_SHARE_DENY_NONE)
_set_size(t, c, fh)
def testResizeFile1(t, env):
"""SETATTR(FATTR4_SIZE) on file with stateid = 1
FLAGS: setattr file all
DEPEND: MKFILE
CODE: SATT3b
"""
c = env.c1
c.init_connection()
fh, stateid = c.create_confirm(t.code, deny=OPEN4_SHARE_DENY_NONE)
_set_size(t, c, fh, env.stateid1, " using stateid=1")
def testResizeFile2(t, env):
"""SETATTR(FATTR4_SIZE) on file with openstateid
FLAGS: setattr file all
DEPEND: MKFILE
CODE: SATT3c
"""
c = env.c1
c.init_connection()
fh, stateid = c.create_confirm(t.code)
_set_size(t, c, fh, stateid, " using openstateid")
def testResizeFile3(t, env):
"""SETATTR(FATTR4_SIZE) with wrong openstateid should return _BAD_STATEID
FLAGS: setattr file all
DEPEND: MKFILE MKDIR
CODE: SATT3d
"""
c = env.c1
c.init_connection()
c.maketree([t.code, 'file'])
path = c.homedir + [t.code, t.code]
fh, stateid = c.create_confirm(t.code, path)
ops = c.use_obj(c.homedir + [t.code, 'file'])
ops += [c.setattr({FATTR4_SIZE: 10}, stateid)]
res = c.compound(ops)
check(res, NFS4ERR_BAD_STATEID, "SETATTR(_SIZE) with wrong openstateid")
def testOpenModeResize(t, env):
"""SETATTR(_SIZE) on file with _ACCESS_READ should return NFS4ERR_OPENMODE
FLAGS: setattr file all
DEPEND: MKFILE
CODE: SATT4
"""
c = env.c1
c.init_connection()
fh, stateid = c.create_confirm(t.code, access=OPEN4_SHARE_ACCESS_READ)
ops = c.use_obj(fh) + [c.setattr({FATTR4_SIZE: 10}, stateid)]
res = c.compound(ops)
check(res, NFS4ERR_OPENMODE, "SETATTR(_SIZE) on file with _ACCESS_READ")
def testNoFh(t, env):
"""SETATTR with no (cfh) should return NFS4ERR_NOFILEHANDLE
FLAGS: setattr emptyfh all
CODE: SATT5
"""
c = env.c1
res = c.compound([c.setattr({FATTR4_SIZE:0})])
check(res, NFS4ERR_NOFILEHANDLE, "SETATTR with no <cfh>")
def testReadonlyFile(t, env):
"""SETATTR on read-only attrs should return NFS4ERR_INVAL
FLAGS: setattr file all
DEPEND: MKFILE
CODE: SATT6r
"""
c = env.c1
c.init_connection()
c.create_confirm(t.code)
_try_readonly(t, env, c.homedir + [t.code])
def testReadonlyDir(t, env):
"""SETATTR on read-only attrs should return NFS4ERR_INVAL
FLAGS: setattr dir all
DEPEND: MKDIR
CODE: SATT6d
"""
c = env.c1
path = c.homedir + [t.code]
res = c.create_obj(path)
check(res)
_try_readonly(t, env, path)
def testReadonlyLink(t, env):
"""SETATTR on read-only attrs should return NFS4ERR_INVAL
FLAGS: setattr symlink all
DEPEND: MKLINK SATT6d
CODE: SATT6a
"""
c = env.c1
path = c.homedir + [t.code]
res = c.create_obj(path, NF4LNK)
check(res)
_try_readonly(t, env, path)
def testReadonlyBlock(t, env):
"""SETATTR on read-only attrs should return NFS4ERR_INVAL
FLAGS: setattr block all
DEPEND: MKBLK SATT6d
CODE: SATT6b
"""
c = env.c1
path = c.homedir + [t.code]
res = c.create_obj(path, NF4BLK)
check(res)
_try_readonly(t, env, path)
def testReadonlyChar(t, env):
"""SETATTR on read-only attrs should return NFS4ERR_INVAL
FLAGS: setattr char all
DEPEND: MKCHAR SATT6d
CODE: SATT6c
"""
c = env.c1
path = c.homedir + [t.code]
res = c.create_obj(path, NF4CHR)
check(res)
_try_readonly(t, env, path)
def testReadonlyFifo(t, env):
"""SETATTR on read-only attrs should return NFS4ERR_INVAL
FLAGS: setattr fifo all
DEPEND: MKFIFO SATT6d
CODE: SATT6f
"""
c = env.c1
path = c.homedir + [t.code]
res = c.create_obj(path, NF4FIFO)
check(res)
_try_readonly(t, env, path)
def testReadonlySocket(t, env):
"""SETATTR on read-only attrs should return NFS4ERR_INVAL
FLAGS: setattr socket all
DEPEND: MKSOCK SATT6d
CODE: SATT6s
"""
c = env.c1
path = c.homedir + [t.code]
res = c.create_obj(path, NF4SOCK)
check(res)
_try_readonly(t, env, path)
def testInvalidAttr1(t, env):
"""SETATTR with invalid attribute data should return NFS4ERR_BADXDR
This testcase try to set FATTR4_MODE but does not send any mode data.
FLAGS: setattr all
DEPEND: MODE MKDIR
CODE: SATT7
"""
c = env.c1
path = c.homedir + [t.code]
res = c.create_obj(path)
check(res)
badattr = dict2fattr({FATTR4_MODE: 0644})
badattr.attr_vals = ''
res = c.compound(c.use_obj(path) + [c.setattr_op(env.stateid0, badattr)])
check(res, NFS4ERR_BADXDR, "SETATTR(FATTR4_MODE) with no data")
def testInvalidAttr2(t, env):
"""SETATTR with extraneous attribute data should return NFS4ERR_BADXDR
This testcase try to set FATTR4_MODE with extraneous attribute data
appended
FLAGS: setattr all
DEPEND: MODE MKDIR
CODE: SATT8
"""
c = env.c1
path = c.homedir + [t.code]
res = c.create_obj(path)
check(res)
badattr = dict2fattr({FATTR4_MODE: 0644})
badattr.attr_vals += 'Garbage data'
res = c.compound(c.use_obj(path) + [c.setattr_op(env.stateid0, badattr)])
check(res, NFS4ERR_BADXDR,
"SETATTR(FATTR4_MODE) with extraneous attribute data appended")
def testNonUTF8(t, env):
"""SETATTR(_MIMETYPE) with non-utf8 string should return NFS4ERR_INVAL
The only attributes that use utf8 are MIMETYPE, OWNER, GROUP, and ACL.
OWNER and GROUP are subject to too many restrictions to use.
Similarly for ACL.
FLAGS: setattr utf8
DEPEND: MKFILE
CODE: SATT9
"""
c = env.c1
c.create_confirm(t.code)
supported = c.supportedAttrs()
if not (supported & 2**FATTR4_MIMETYPE):
t.fail_support("FATTR4_MIMETYPE not supported")
baseops = c.use_obj(c.homedir + [t.code])
for name in get_invalid_utf8strings():
ops = baseops + [c.setattr({FATTR4_MIMETYPE: name})]
res = c.compound(ops)
check(res, NFS4ERR_INVAL,
"SETATTR(_MIMETYPE) with non-utf8 string %s" % repr(name))
def testInvalidTime(t, env):
"""SETATTR(FATTR4_TIME_MODIFY_SET) with invalid nseconds
nseconds larger than 999999999 are considered invalid.
SETATTR(FATTR4_TIME_MODIFY_SET) should return NFS4ERR_INVAL on
such values.
FLAGS: setattr all
DEPEND: MKDIR
CODE: SATT10
"""
c = env.c1
path = c.homedir + [t.code]
res = c.create_obj(path)
check(res)
supported = c.supportedAttrs()
if not (supported & 2**FATTR4_TIME_MODIFY_SET):
t.fail_support("FATTR4_TIME_MODIFY_SET not supported")
time = nfstime4(seconds=500000000, nseconds=int(1E9))
settime = settime4(set_it=SET_TO_CLIENT_TIME4, time=time)
ops = c.use_obj(path) + [c.setattr({FATTR4_TIME_MODIFY_SET: settime})]
res = c.compound(ops)
check(res, NFS4ERR_INVAL,
"SETATTR(FATTR4_TIME_MODIFY_SET) with nseconds=1E9")
def testUnsupportedFile(t, env):
"""SETATTR with unsupported attr should return NFS4ERR_ATTRNOTSUPP
FLAGS: setattr file all
DEPEND: MKFILE
CODE: SATT11r
"""
c = env.c1
c.init_connection()
c.create_confirm(t.code)
_try_unsupported(t, env, c.homedir + [t.code])
def testUnsupportedDir(t, env):
"""SETATTR with unsupported attr should return NFS4ERR_ATTRNOTSUPP
FLAGS: setattr dir all
DEPEND: MKDIR
CODE: SATT11d
"""
c = env.c1
path = c.homedir + [t.code]
res = c.create_obj(path)
check(res)
_try_unsupported(t, env, path)
def testUnsupportedLink(t, env):
"""SETATTR with unsupported attr should return NFS4ERR_ATTRNOTSUPP
FLAGS: setattr symlink all
DEPEND: MKLINK
CODE: SATT11a
"""
c = env.c1
path = c.homedir + [t.code]
res = c.create_obj(path, NF4LNK)
check(res)
_try_unsupported(t, env, path)
def testUnsupportedBlock(t, env):
"""SETATTR with unsupported attr should return NFS4ERR_ATTRNOTSUPP
FLAGS: setattr block all
DEPEND: MKBLK
CODE: SATT11b
"""
c = env.c1
path = c.homedir + [t.code]
res = c.create_obj(path, NF4BLK)
check(res)
_try_unsupported(t, env, path)
def testUnsupportedChar(t, env):
"""SETATTR with unsupported attr should return NFS4ERR_ATTRNOTSUPP
FLAGS: setattr char all
DEPEND: MKCHAR
CODE: SATT11c
"""
c = env.c1
path = c.homedir + [t.code]
res = c.create_obj(path, NF4CHR)
check(res)
_try_unsupported(t, env, path)
def testUnsupportedFifo(t, env):
"""SETATTR with unsupported attr should return NFS4ERR_ATTRNOTSUPP
FLAGS: setattr fifo all
DEPEND: MKFIFO
CODE: SATT11f
"""
c = env.c1
path = c.homedir + [t.code]
res = c.create_obj(path, NF4FIFO)
check(res)
_try_unsupported(t, env, path)
def testUnsupportedSocket(t, env):
"""SETATTR with unsupported attr should return NFS4ERR_ATTRNOTSUPP
FLAGS: setattr socket all
DEPEND: MKSOCK
CODE: SATT11s
"""
c = env.c1
path = c.homedir + [t.code]
res = c.create_obj(path, NF4SOCK)
check(res)
_try_unsupported(t, env, path)
def testSizeDir(t, env):
"""SETATTR(_SIZE) of a directory should return NFS4ERR_ISDIR
FLAGS: setattr dir all
DEPEND: MKDIR
CODE: SATT12d
"""
c = env.c1
path = c.homedir + [t.code]
res = c.create_obj(path)
check(res)
ops = c.use_obj(path) + [c.setattr({FATTR4_SIZE: 0})]
res = c.compound(ops)
check(res, NFS4ERR_ISDIR, "SETATTR(_SIZE) of a directory")
def testSizeLink(t, env):
"""SETATTR(FATTR4_SIZE) of a non-file object should return NFS4ERR_INVAL
FLAGS: setattr symlink all
DEPEND: MKLINK
CODE: SATT12a
"""
c = env.c1
path = c.homedir + [t.code]
res = c.create_obj(path, NF4LNK)
check(res)
ops = c.use_obj(path) + [c.setattr({FATTR4_SIZE: 0})]
res = c.compound(ops)
checklist(res, [NFS4ERR_INVAL, NFS4ERR_SYMLINK],
"SETATTR(FATTR4_SIZE) of a symlink")
def testSizeBlock(t, env):
"""SETATTR(FATTR4_SIZE) of a non-file object should return NFS4ERR_INVAL
FLAGS: setattr block all
DEPEND: MKBLK
CODE: SATT12b
"""
c = env.c1
path = c.homedir + [t.code]
res = c.create_obj(path, NF4BLK)
check(res)
ops = c.use_obj(path) + [c.setattr({FATTR4_SIZE: 0})]
res = c.compound(ops)
check(res, NFS4ERR_INVAL, "SETATTR(FATTR4_SIZE) of a block device")
def testSizeChar(t, env):
"""SETATTR(FATTR4_SIZE) of a non-file object should return NFS4ERR_INVAL
FLAGS: setattr char all
DEPEND: MKCHAR
CODE: SATT12c
"""
c = env.c1
path = c.homedir + [t.code]
res = c.create_obj(path, NF4CHR)
check(res)
ops = c.use_obj(path) + [c.setattr({FATTR4_SIZE: 0})]
res = c.compound(ops)
check(res, NFS4ERR_INVAL, "SETATTR(FATTR4_SIZE) of a character device")
def testSizeFifo(t, env):
"""SETATTR(FATTR4_SIZE) of a non-file object should return NFS4ERR_INVAL
FLAGS: setattr fifo all
DEPEND: MKFIFO
CODE: SATT12f
"""
c = env.c1
path = c.homedir + [t.code]
res = c.create_obj(path, NF4FIFO)
check(res)
ops = c.use_obj(path) + [c.setattr({FATTR4_SIZE: 0})]
res = c.compound(ops)
check(res, NFS4ERR_INVAL, "SETATTR(FATTR4_SIZE) of a fifo")
def testSizeSocket(t, env):
"""SETATTR(FATTR4_SIZE) of a non-file object should return NFS4ERR_INVAL
FLAGS: setattr socket all
DEPEND: MKSOCK
CODE: SATT12s
"""
c = env.c1
path = c.homedir + [t.code]
res = c.create_obj(path, NF4SOCK)
check(res)
ops = c.use_obj(path) + [c.setattr({FATTR4_SIZE: 0})]
res = c.compound(ops)
check(res, NFS4ERR_INVAL, "SETATTR(FATTR4_SIZE) of a socket")
def testInodeLocking(t, env):
"""SETATTR: This causes printk message due to inode locking bug
log shows - nfsd: inode locked twice during operation.
Sporadic system crashes can occur after running this test
FLAGS: setattr all
DEPEND: MODE MKDIR MKFILE
CODE: SATT13
"""
#t.fail("Test set to fail without running. Currently causes "
# "inode corruption leading to sporadic system crashes.")
c = env.c1
c.init_connection()
basedir = c.homedir + [t.code]
res = c.create_obj(basedir)
check(res)
fh, stateid = c.create_confirm(t.code, basedir + ['file'])
# In a single compound statement, setattr on dir and then
# do a state operation on a file in dir (like write or remove)
ops = c.use_obj(basedir) + [c.setattr({FATTR4_MODE:0754})]
ops += [c.lookup_op('file'), c.write_op(stateid, 0, 0, 'blahblah')]
res = c.compound(ops)
check(res, msg="SETATTR on dir and state operation on file in dir")
def testChange(t, env):
"""SETATTR(MODE) should change changeattr
FLAGS: setattr all
DEPEND: MODE MKFILE
CODE: SATT14
"""
c = env.c1
c.init_connection()
fh, stateid = c.create_confirm(t.code)
change = c.do_getattr(FATTR4_CHANGE, fh)
ops = c.use_obj(fh) + [c.setattr({FATTR4_MODE: 0740})]
res = c.compound(ops)
check(res)
change2 = c.do_getattr(FATTR4_CHANGE, fh)
if change == change2:
t.fail("change attribute not affected by SETATTR(mode)")
def testChangeGranularity(t, env):
"""Rapidly repeated SETATTR(MODE) should change changeattr
FLAGS: setattr all
DEPEND: MODE MKFILE
CODE: SATT15
"""
c = env.c1
c.init_connection()
fh, stateid = c.create_confirm(t.code)
ops = c.use_obj(fh) + [c.getattr([FATTR4_CHANGE])] \
+ [c.setattr({FATTR4_MODE: 0740})] + [c.getattr([FATTR4_CHANGE])] \
+ [c.setattr({FATTR4_MODE: 0741})] + [c.getattr([FATTR4_CHANGE])] \
+ [c.setattr({FATTR4_MODE: 0742})] + [c.getattr([FATTR4_CHANGE])] \
+ [c.setattr({FATTR4_MODE: 0743})] + [c.getattr([FATTR4_CHANGE])]
res = c.compound(ops)
check(res)
chattr1 = res.resarray[1].obj_attributes
chattr2 = res.resarray[3].obj_attributes
chattr3 = res.resarray[5].obj_attributes
chattr4 = res.resarray[7].obj_attributes
if chattr1 == chattr2 or chattr2 == chattr3 or chattr3 == chattr4:
t.fail("consecutive SETATTR(mode)'s don't all change change attribute")
# TODO:
# - This test would be better done with async writes; synchronous
# setattrs may not execute quickly enough to trigger a problem.
# But Linux server (in violation of spec) doesn't allow multiple
# IO's per compound! For now we test Linux server with async export
# option if we want to reproduce this problem.
# - We should try the same tests on directories.
| gpl-2.0 |
hermes-jr/npui | netprofile/netprofile/db/valid.py | 3 | 1270 | #!/usr/bin/env python
# -*- coding: utf-8; tab-width: 4; indent-tabs-mode: t -*-
#
# NetProfile: DB-centric validators
# © Copyright 2013 Alex 'Unik' Unigovsky
#
# This file is part of NetProfile.
# NetProfile is free software: you can redistribute it and/or
# modify it under the terms of the GNU Affero General Public
# License as published by the Free Software Foundation, either
# version 3 of the License, or (at your option) any later
# version.
#
# NetProfile is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General
# Public License along with NetProfile. If not, see
# <http://www.gnu.org/licenses/>.
from __future__ import (
unicode_literals,
print_function,
absolute_import,
division
)
from sqlalchemy.orm import (
EXT_PASS,
MapperExtension
)
class Validator(MapperExtension):
def __init__(self, *args):
MapperExtension.__init__(self)
def before_insert(self, mapper, connection, instance):
self.validate(instance)
return EXT_PASS
def validate(self, instance):
pass
before_update = before_insert
| agpl-3.0 |
cloudtools/troposphere | troposphere/s3objectlambda.py | 1 | 1143 | # Copyright (c) 2012-2021, Mark Peek <[email protected]>
# All rights reserved.
#
# See LICENSE file for full license.
#
# *** Do not modify - this file is autogenerated ***
# Resource specification version: 31.1.0
from . import AWSObject, AWSProperty
from .validators import boolean
class TransformationConfiguration(AWSProperty):
props = {
"Actions": ([str], False),
"ContentTransformation": (dict, False),
}
class ObjectLambdaConfiguration(AWSProperty):
props = {
"AllowedFeatures": ([str], False),
"CloudWatchMetricsEnabled": (boolean, False),
"SupportingAccessPoint": (str, True),
"TransformationConfigurations": ([TransformationConfiguration], True),
}
class AccessPoint(AWSObject):
resource_type = "AWS::S3ObjectLambda::AccessPoint"
props = {
"Name": (str, True),
"ObjectLambdaConfiguration": (ObjectLambdaConfiguration, False),
}
class AccessPointPolicy(AWSObject):
resource_type = "AWS::S3ObjectLambda::AccessPointPolicy"
props = {
"ObjectLambdaAccessPoint": (str, True),
"PolicyDocument": (dict, True),
}
| bsd-2-clause |
Lujeni/ansible | lib/ansible/modules/storage/purestorage/purefa_ntp.py | 21 | 3627 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2018, Simon Dodsley ([email protected])
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: purefa_ntp
version_added: '2.8'
short_description: Configure Pure Storage FlashArray NTP settings
description:
- Set or erase NTP configuration for Pure Storage FlashArrays.
author:
- Pure Storage Ansible Team (@sdodsley) <[email protected]>
options:
state:
description:
- Create or delete NTP servers configuration
type: str
default: present
choices: [ absent, present ]
ntp_servers:
type: list
description:
- A list of up to 4 alternate NTP servers. These may include IPv4,
IPv6 or FQDNs. Invalid IP addresses will cause the module to fail.
No validation is performed for FQDNs.
- If more than 4 servers are provided, only the first 4 unique
nameservers will be used.
- if no servers are given a default of I(0.pool.ntp.org) will be used.
extends_documentation_fragment:
- purestorage.fa
'''
EXAMPLES = r'''
- name: Delete existing NTP server entries
purefa_ntp:
state: absent
fa_url: 10.10.10.2
api_token: e31060a7-21fc-e277-6240-25983c6c4592
- name: Set array NTP servers
purefa_ntp:
state: present
ntp_servers:
- "0.pool.ntp.org"
- "1.pool.ntp.org"
- "2.pool.ntp.org"
- "3.pool.ntp.org"
fa_url: 10.10.10.2
api_token: e31060a7-21fc-e277-6240-25983c6c4592
'''
RETURN = r'''
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.pure import get_system, purefa_argument_spec
def remove(duplicate):
final_list = []
for num in duplicate:
if num not in final_list:
final_list.append(num)
return final_list
def delete_ntp(module, array):
"""Delete NTP Servers"""
changed = False
if array.get(ntpserver=True)['ntpserver'] != []:
try:
array.set(ntpserver=[])
changed = True
except Exception:
module.fail_json(msg='Deletion of NTP servers failed')
module.exit_json(changed=changed)
def create_ntp(module, array):
"""Set NTP Servers"""
changed = False
if not module.params['ntp_servers']:
module.params['ntp_servers'] = ['0.pool.ntp.org']
try:
array.set(ntpserver=module.params['ntp_servers'][0:4])
changed = True
except Exception:
module.fail_json(msg='Update of NTP servers failed')
module.exit_json(changed=changed)
def main():
argument_spec = purefa_argument_spec()
argument_spec.update(dict(
ntp_servers=dict(type='list'),
state=dict(type='str', default='present', choices=['absent', 'present']),
))
required_if = [['state', 'present', ['ntp_servers']]]
module = AnsibleModule(argument_spec,
required_if=required_if,
supports_check_mode=False)
array = get_system(module)
if module.params['state'] == 'absent':
delete_ntp(module, array)
else:
module.params['ntp_servers'] = remove(module.params['ntp_servers'])
if sorted(array.get(ntpserver=True)['ntpserver']) != sorted(module.params['ntp_servers'][0:4]):
create_ntp(module, array)
module.exit_json(changed=False)
if __name__ == '__main__':
main()
| gpl-3.0 |
Eksmo/calibre | src/calibre/gui2/store/stores/baen_webscription_plugin.py | 2 | 3430 | # -*- coding: utf-8 -*-
from __future__ import (unicode_literals, division, absolute_import, print_function)
__license__ = 'GPL 3'
__copyright__ = '2011, John Schember <[email protected]>'
__docformat__ = 'restructuredtext en'
import re
import urllib2
from contextlib import closing
from lxml import html
from PyQt4.Qt import QUrl
from calibre import browser, url_slash_cleaner
from calibre.gui2 import open_url
from calibre.gui2.store import StorePlugin
from calibre.gui2.store.basic_config import BasicStoreConfig
from calibre.gui2.store.search_result import SearchResult
from calibre.gui2.store.web_store_dialog import WebStoreDialog
class BaenWebScriptionStore(BasicStoreConfig, StorePlugin):
def open(self, parent=None, detail_item=None, external=False):
url = 'http://www.baenebooks.com/'
if external or self.config.get('open_external', False):
if detail_item:
url = url + detail_item
open_url(QUrl(url_slash_cleaner(url)))
else:
detail_url = None
if detail_item:
detail_url = url + detail_item
d = WebStoreDialog(self.gui, url, parent, detail_url)
d.setWindowTitle(self.name)
d.set_tags(self.config.get('tags', ''))
d.exec_()
def search(self, query, max_results=10, timeout=60):
url = 'http://www.baenebooks.com/searchadv.aspx?IsSubmit=true&SearchTerm=' + urllib2.quote(query)
br = browser()
counter = max_results
with closing(br.open(url, timeout=timeout)) as f:
doc = html.fromstring(f.read())
for data in doc.xpath('//table//table//table//table//tr'):
if counter <= 0:
break
id = ''.join(data.xpath('./td[1]/a/@href'))
if not id or not id.startswith('p-'):
continue
title = ''.join(data.xpath('./td[1]/a/text()'))
author = ''
cover_url = ''
price = ''
with closing(br.open('http://www.baenebooks.com/' + id.strip(), timeout=timeout/4)) as nf:
idata = html.fromstring(nf.read())
author = ''.join(idata.xpath('//span[@class="ProductNameText"]/../b/text()'))
author = author.split('by ')[-1]
price = ''.join(idata.xpath('//span[@class="variantprice"]/text()'))
a, b, price = price.partition('$')
price = b + price
pnum = ''
mo = re.search(r'p-(?P<num>\d+)-', id.strip())
if mo:
pnum = mo.group('num')
if pnum:
cover_url = 'http://www.baenebooks.com/' + ''.join(idata.xpath('//img[@id="ProductPic%s"]/@src' % pnum))
counter -= 1
s = SearchResult()
s.cover_url = cover_url
s.title = title.strip()
s.author = author.strip()
s.price = price
s.detail_item = id.strip()
s.drm = SearchResult.DRM_UNLOCKED
s.formats = 'RB, MOBI, EPUB, LIT, LRF, RTF, HTML'
yield s
| gpl-3.0 |
Tejal011089/Medsyn2_app | setup/doctype/email_settings/email_settings.py | 29 | 2140 | # Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import webnotes
from webnotes.utils import cint
class DocType:
def __init__(self,doc,doclist):
self.doc,self.doclist = doc,doclist
def validate(self):
"""Checks connectivity to email servers before saving"""
self.validate_outgoing()
self.validate_incoming()
def validate_outgoing(self):
"""Checks incoming email settings"""
self.doc.encode()
if self.doc.outgoing_mail_server:
from webnotes.utils import cint
from webnotes.utils.email_lib.smtp import SMTPServer
smtpserver = SMTPServer(login = self.doc.mail_login,
password = self.doc.mail_password,
server = self.doc.outgoing_mail_server,
port = cint(self.doc.mail_port),
use_ssl = self.doc.use_ssl
)
# exceptions are handled in session connect
sess = smtpserver.sess
def validate_incoming(self):
"""
Checks support ticket email settings
"""
if self.doc.sync_support_mails and self.doc.support_host:
from webnotes.utils.email_lib.receive import POP3Mailbox
from webnotes.model.doc import Document
import _socket, poplib
inc_email = Document('Incoming Email Settings')
inc_email.encode()
inc_email.host = self.doc.support_host
inc_email.use_ssl = self.doc.support_use_ssl
try:
err_msg = 'User Name or Support Password missing. Please enter and try again.'
if not (self.doc.support_username and self.doc.support_password):
raise AttributeError, err_msg
inc_email.username = self.doc.support_username
inc_email.password = self.doc.support_password
except AttributeError, e:
webnotes.msgprint(err_msg)
raise
pop_mb = POP3Mailbox(inc_email)
try:
pop_mb.connect()
except _socket.error, e:
# Invalid mail server -- due to refusing connection
webnotes.msgprint('Invalid POP3 Mail Server. Please rectify and try again.')
raise
except poplib.error_proto, e:
webnotes.msgprint('Invalid User Name or Support Password. Please rectify and try again.')
raise
| agpl-3.0 |
vivekpabani/projecteuler | python/243/problem_243.py | 1 | 1228 | #!/usr/bin/env python
# coding=utf-8
#--- In Progress. ---#
"""
Problem Definition :
A positive fraction whose numerator is less than its denominator is called a proper fraction.
For any denominator, d, there will be d−1 proper fractions; for example, with d = 12:
1/12 , 2/12 , 3/12 , 4/12 , 5/12 , 6/12 , 7/12 , 8/12 , 9/12 , 10/12 , 11/12 .
We shall call a fraction that cannot be cancelled down a resilient fraction.
Furthermore we shall define the resilience of a denominator, R(d), to be the ratio of its proper fractions that are resilient; for example, R(12) = 4/11 .
In fact, d = 12 is the smallest denominator having a resilience R(d) < 4/10 .
Find the smallest denominator d, having a resilience R(d) < 15499/94744 .
"""
__author__ = 'vivek'
import time
from fractions import gcd
startTime = time.clock()
for denominator in xrange(20000,50000):
resilient = 0
for numerator in xrange(1,denominator):
if gcd(numerator,denominator)==1:
resilient += 1
if float(resilient)/float(denominator-1) < 15499.0/94744:
print("found")
break
print(numerator, denominator, resilient)
print "Run time...{} secs \n".format(round(time.clock() - startTime, 4))
| apache-2.0 |
a-b/PopClip-Extensions | source/OneNote/requests/api.py | 206 | 4935 | # -*- coding: utf-8 -*-
"""
requests.api
~~~~~~~~~~~~
This module implements the Requests API.
:copyright: (c) 2012 by Kenneth Reitz.
:license: Apache2, see LICENSE for more details.
"""
from . import sessions
def request(method, url, **kwargs):
"""Constructs and sends a :class:`Request <Request>`.
Returns :class:`Response <Response>` object.
:param method: method for the new :class:`Request` object.
:param url: URL for the new :class:`Request` object.
:param params: (optional) Dictionary or bytes to be sent in the query string for the :class:`Request`.
:param data: (optional) Dictionary, bytes, or file-like object to send in the body of the :class:`Request`.
:param json: (optional) json data to send in the body of the :class:`Request`.
:param headers: (optional) Dictionary of HTTP Headers to send with the :class:`Request`.
:param cookies: (optional) Dict or CookieJar object to send with the :class:`Request`.
:param files: (optional) Dictionary of ``'name': file-like-objects`` (or ``{'name': ('filename', fileobj)}``) for multipart encoding upload.
:param auth: (optional) Auth tuple to enable Basic/Digest/Custom HTTP Auth.
:param timeout: (optional) How long to wait for the server to send data
before giving up, as a float, or a (`connect timeout, read timeout
<user/advanced.html#timeouts>`_) tuple.
:type timeout: float or tuple
:param allow_redirects: (optional) Boolean. Set to True if POST/PUT/DELETE redirect following is allowed.
:type allow_redirects: bool
:param proxies: (optional) Dictionary mapping protocol to the URL of the proxy.
:param verify: (optional) if ``True``, the SSL cert will be verified. A CA_BUNDLE path can also be provided.
:param stream: (optional) if ``False``, the response content will be immediately downloaded.
:param cert: (optional) if String, path to ssl client cert file (.pem). If Tuple, ('cert', 'key') pair.
Usage::
>>> import requests
>>> req = requests.request('GET', 'http://httpbin.org/get')
<Response [200]>
"""
session = sessions.Session()
response = session.request(method=method, url=url, **kwargs)
# By explicitly closing the session, we avoid leaving sockets open which
# can trigger a ResourceWarning in some cases, and look like a memory leak
# in others.
session.close()
return response
def get(url, **kwargs):
"""Sends a GET request. Returns :class:`Response` object.
:param url: URL for the new :class:`Request` object.
:param \*\*kwargs: Optional arguments that ``request`` takes.
"""
kwargs.setdefault('allow_redirects', True)
return request('get', url, **kwargs)
def options(url, **kwargs):
"""Sends a OPTIONS request. Returns :class:`Response` object.
:param url: URL for the new :class:`Request` object.
:param \*\*kwargs: Optional arguments that ``request`` takes.
"""
kwargs.setdefault('allow_redirects', True)
return request('options', url, **kwargs)
def head(url, **kwargs):
"""Sends a HEAD request. Returns :class:`Response` object.
:param url: URL for the new :class:`Request` object.
:param \*\*kwargs: Optional arguments that ``request`` takes.
"""
kwargs.setdefault('allow_redirects', False)
return request('head', url, **kwargs)
def post(url, data=None, json=None, **kwargs):
"""Sends a POST request. Returns :class:`Response` object.
:param url: URL for the new :class:`Request` object.
:param data: (optional) Dictionary, bytes, or file-like object to send in the body of the :class:`Request`.
:param json: (optional) json data to send in the body of the :class:`Request`.
:param \*\*kwargs: Optional arguments that ``request`` takes.
"""
return request('post', url, data=data, json=json, **kwargs)
def put(url, data=None, **kwargs):
"""Sends a PUT request. Returns :class:`Response` object.
:param url: URL for the new :class:`Request` object.
:param data: (optional) Dictionary, bytes, or file-like object to send in the body of the :class:`Request`.
:param \*\*kwargs: Optional arguments that ``request`` takes.
"""
return request('put', url, data=data, **kwargs)
def patch(url, data=None, **kwargs):
"""Sends a PATCH request. Returns :class:`Response` object.
:param url: URL for the new :class:`Request` object.
:param data: (optional) Dictionary, bytes, or file-like object to send in the body of the :class:`Request`.
:param \*\*kwargs: Optional arguments that ``request`` takes.
"""
return request('patch', url, data=data, **kwargs)
def delete(url, **kwargs):
"""Sends a DELETE request. Returns :class:`Response` object.
:param url: URL for the new :class:`Request` object.
:param \*\*kwargs: Optional arguments that ``request`` takes.
"""
return request('delete', url, **kwargs)
| mit |
shoma/mycli | mycli/packages/tabulate.py | 16 | 38129 | # -*- coding: utf-8 -*-
"""Pretty-print tabular data."""
from __future__ import print_function
from __future__ import unicode_literals
from collections import namedtuple
from decimal import Decimal
from platform import python_version_tuple
from wcwidth import wcswidth
import re
if python_version_tuple()[0] < "3":
from itertools import izip_longest
from functools import partial
_none_type = type(None)
_int_type = int
_long_type = long
_float_type = float
_text_type = unicode
_binary_type = str
def _is_file(f):
return isinstance(f, file)
else:
from itertools import zip_longest as izip_longest
from functools import reduce, partial
_none_type = type(None)
_int_type = int
_long_type = int
_float_type = float
_text_type = str
_binary_type = bytes
import io
def _is_file(f):
return isinstance(f, io.IOBase)
__all__ = ["tabulate", "tabulate_formats", "simple_separated_format"]
__version__ = "0.7.4"
MIN_PADDING = 2
Line = namedtuple("Line", ["begin", "hline", "sep", "end"])
DataRow = namedtuple("DataRow", ["begin", "sep", "end"])
# A table structure is suppposed to be:
#
# --- lineabove ---------
# headerrow
# --- linebelowheader ---
# datarow
# --- linebewteenrows ---
# ... (more datarows) ...
# --- linebewteenrows ---
# last datarow
# --- linebelow ---------
#
# TableFormat's line* elements can be
#
# - either None, if the element is not used,
# - or a Line tuple,
# - or a function: [col_widths], [col_alignments] -> string.
#
# TableFormat's *row elements can be
#
# - either None, if the element is not used,
# - or a DataRow tuple,
# - or a function: [cell_values], [col_widths], [col_alignments] -> string.
#
# padding (an integer) is the amount of white space around data values.
#
# with_header_hide:
#
# - either None, to display all table elements unconditionally,
# - or a list of elements not to be displayed if the table has column headers.
#
TableFormat = namedtuple("TableFormat", ["lineabove", "linebelowheader",
"linebetweenrows", "linebelow",
"headerrow", "datarow",
"padding", "with_header_hide"])
def _pipe_segment_with_colons(align, colwidth):
"""Return a segment of a horizontal line with optional colons which
indicate column's alignment (as in `pipe` output format)."""
w = colwidth
if align in ["right", "decimal"]:
return ('-' * (w - 1)) + ":"
elif align == "center":
return ":" + ('-' * (w - 2)) + ":"
elif align == "left":
return ":" + ('-' * (w - 1))
else:
return '-' * w
def _pipe_line_with_colons(colwidths, colaligns):
"""Return a horizontal line with optional colons to indicate column's
alignment (as in `pipe` output format)."""
segments = [_pipe_segment_with_colons(a, w) for a, w in zip(colaligns, colwidths)]
return "|" + "|".join(segments) + "|"
def _mediawiki_row_with_attrs(separator, cell_values, colwidths, colaligns):
alignment = { "left": '',
"right": 'align="right"| ',
"center": 'align="center"| ',
"decimal": 'align="right"| ' }
# hard-coded padding _around_ align attribute and value together
# rather than padding parameter which affects only the value
values_with_attrs = [' ' + alignment.get(a, '') + c + ' '
for c, a in zip(cell_values, colaligns)]
colsep = separator*2
return (separator + colsep.join(values_with_attrs)).rstrip()
def _html_row_with_attrs(celltag, cell_values, colwidths, colaligns):
alignment = { "left": '',
"right": ' style="text-align: right;"',
"center": ' style="text-align: center;"',
"decimal": ' style="text-align: right;"' }
values_with_attrs = ["<{0}{1}>{2}</{0}>".format(celltag, alignment.get(a, ''), c)
for c, a in zip(cell_values, colaligns)]
return "<tr>" + "".join(values_with_attrs).rstrip() + "</tr>"
def _latex_line_begin_tabular(colwidths, colaligns, booktabs=False):
alignment = { "left": "l", "right": "r", "center": "c", "decimal": "r" }
tabular_columns_fmt = "".join([alignment.get(a, "l") for a in colaligns])
return "\n".join(["\\begin{tabular}{" + tabular_columns_fmt + "}",
"\\toprule" if booktabs else "\hline"])
LATEX_ESCAPE_RULES = {r"&": r"\&", r"%": r"\%", r"$": r"\$", r"#": r"\#",
r"_": r"\_", r"^": r"\^{}", r"{": r"\{", r"}": r"\}",
r"~": r"\textasciitilde{}", "\\": r"\textbackslash{}",
r"<": r"\ensuremath{<}", r">": r"\ensuremath{>}"}
def _latex_row(cell_values, colwidths, colaligns):
def escape_char(c):
return LATEX_ESCAPE_RULES.get(c, c)
escaped_values = ["".join(map(escape_char, cell)) for cell in cell_values]
rowfmt = DataRow("", "&", "\\\\")
return _build_simple_row(escaped_values, rowfmt)
_table_formats = {"simple":
TableFormat(lineabove=Line("", "-", " ", ""),
linebelowheader=Line("", "-", " ", ""),
linebetweenrows=None,
linebelow=Line("", "-", " ", ""),
headerrow=DataRow("", " ", ""),
datarow=DataRow("", " ", ""),
padding=0,
with_header_hide=["lineabove", "linebelow"]),
"plain":
TableFormat(lineabove=None, linebelowheader=None,
linebetweenrows=None, linebelow=None,
headerrow=DataRow("", " ", ""),
datarow=DataRow("", " ", ""),
padding=0, with_header_hide=None),
"grid":
TableFormat(lineabove=Line("+", "-", "+", "+"),
linebelowheader=Line("+", "=", "+", "+"),
linebetweenrows=Line("+", "-", "+", "+"),
linebelow=Line("+", "-", "+", "+"),
headerrow=DataRow("|", "|", "|"),
datarow=DataRow("|", "|", "|"),
padding=1, with_header_hide=None),
"fancy_grid":
TableFormat(lineabove=Line("╒", "═", "╤", "╕"),
linebelowheader=Line("╞", "═", "╪", "╡"),
linebetweenrows=Line("├", "─", "┼", "┤"),
linebelow=Line("╘", "═", "╧", "╛"),
headerrow=DataRow("│", "│", "│"),
datarow=DataRow("│", "│", "│"),
padding=1, with_header_hide=None),
"pipe":
TableFormat(lineabove=_pipe_line_with_colons,
linebelowheader=_pipe_line_with_colons,
linebetweenrows=None,
linebelow=None,
headerrow=DataRow("|", "|", "|"),
datarow=DataRow("|", "|", "|"),
padding=1,
with_header_hide=["lineabove"]),
"orgtbl":
TableFormat(lineabove=None,
linebelowheader=Line("|", "-", "+", "|"),
linebetweenrows=None,
linebelow=None,
headerrow=DataRow("|", "|", "|"),
datarow=DataRow("|", "|", "|"),
padding=1, with_header_hide=None),
"psql":
TableFormat(lineabove=Line("+", "-", "+", "+"),
linebelowheader=Line("|", "-", "+", "|"),
linebetweenrows=None,
linebelow=Line("+", "-", "+", "+"),
headerrow=DataRow("|", "|", "|"),
datarow=DataRow("|", "|", "|"),
padding=1, with_header_hide=None),
"rst":
TableFormat(lineabove=Line("", "=", " ", ""),
linebelowheader=Line("", "=", " ", ""),
linebetweenrows=None,
linebelow=Line("", "=", " ", ""),
headerrow=DataRow("", " ", ""),
datarow=DataRow("", " ", ""),
padding=0, with_header_hide=None),
"mediawiki":
TableFormat(lineabove=Line("{| class=\"wikitable\" style=\"text-align: left;\"",
"", "", "\n|+ <!-- caption -->\n|-"),
linebelowheader=Line("|-", "", "", ""),
linebetweenrows=Line("|-", "", "", ""),
linebelow=Line("|}", "", "", ""),
headerrow=partial(_mediawiki_row_with_attrs, "!"),
datarow=partial(_mediawiki_row_with_attrs, "|"),
padding=0, with_header_hide=None),
"html":
TableFormat(lineabove=Line("<table>", "", "", ""),
linebelowheader=None,
linebetweenrows=None,
linebelow=Line("</table>", "", "", ""),
headerrow=partial(_html_row_with_attrs, "th"),
datarow=partial(_html_row_with_attrs, "td"),
padding=0, with_header_hide=None),
"latex":
TableFormat(lineabove=_latex_line_begin_tabular,
linebelowheader=Line("\\hline", "", "", ""),
linebetweenrows=None,
linebelow=Line("\\hline\n\\end{tabular}", "", "", ""),
headerrow=_latex_row,
datarow=_latex_row,
padding=1, with_header_hide=None),
"latex_booktabs":
TableFormat(lineabove=partial(_latex_line_begin_tabular, booktabs=True),
linebelowheader=Line("\\midrule", "", "", ""),
linebetweenrows=None,
linebelow=Line("\\bottomrule\n\\end{tabular}", "", "", ""),
headerrow=_latex_row,
datarow=_latex_row,
padding=1, with_header_hide=None),
"tsv":
TableFormat(lineabove=None, linebelowheader=None,
linebetweenrows=None, linebelow=None,
headerrow=DataRow("", "\t", ""),
datarow=DataRow("", "\t", ""),
padding=0, with_header_hide=None)}
tabulate_formats = list(sorted(_table_formats.keys()))
_invisible_codes = re.compile(r"\x1b\[\d*m|\x1b\[\d*\;\d*\;\d*m") # ANSI color codes
_invisible_codes_bytes = re.compile(b"\x1b\[\d*m|\x1b\[\d*\;\d*\;\d*m") # ANSI color codes
def simple_separated_format(separator):
"""Construct a simple TableFormat with columns separated by a separator.
>>> tsv = simple_separated_format("\\t") ; \
tabulate([["foo", 1], ["spam", 23]], tablefmt=tsv) == 'foo \\t 1\\nspam\\t23'
True
"""
return TableFormat(None, None, None, None,
headerrow=DataRow('', separator, ''),
datarow=DataRow('', separator, ''),
padding=0, with_header_hide=None)
def _isconvertible(conv, string):
try:
n = conv(string)
return True
except (ValueError, TypeError):
return False
def _isnumber(string):
"""
>>> _isnumber("123.45")
True
>>> _isnumber("123")
True
>>> _isnumber("spam")
False
"""
return _isconvertible(float, string)
def _isint(string):
"""
>>> _isint("123")
True
>>> _isint("123.45")
False
"""
return type(string) is _int_type or type(string) is _long_type or \
(isinstance(string, _binary_type) or isinstance(string, _text_type)) and \
_isconvertible(int, string)
def _type(string, has_invisible=True):
"""The least generic type (type(None), int, float, str, unicode).
>>> _type(None) is type(None)
True
>>> _type("foo") is type("")
True
>>> _type("1") is type(1)
True
>>> _type('\x1b[31m42\x1b[0m') is type(42)
True
>>> _type('\x1b[31m42\x1b[0m') is type(42)
True
"""
if has_invisible and \
(isinstance(string, _text_type) or isinstance(string, _binary_type)):
string = _strip_invisible(string)
if string is None:
return _none_type
if isinstance(string, (bool, Decimal,)):
return _text_type
elif hasattr(string, "isoformat"): # datetime.datetime, date, and time
return _text_type
elif _isint(string):
return int
elif _isnumber(string):
return float
elif isinstance(string, _binary_type):
return _binary_type
else:
return _text_type
def _afterpoint(string):
"""Symbols after a decimal point, -1 if the string lacks the decimal point.
>>> _afterpoint("123.45")
2
>>> _afterpoint("1001")
-1
>>> _afterpoint("eggs")
-1
>>> _afterpoint("123e45")
2
"""
if _isnumber(string):
if _isint(string):
return -1
else:
pos = string.rfind(".")
pos = string.lower().rfind("e") if pos < 0 else pos
if pos >= 0:
return len(string) - pos - 1
else:
return -1 # no point
else:
return -1 # not a number
def _padleft(width, s, has_invisible=True):
"""Flush right.
>>> _padleft(6, '\u044f\u0439\u0446\u0430') == ' \u044f\u0439\u0446\u0430'
True
"""
lwidth = width - wcswidth(_strip_invisible(s) if has_invisible else s)
return ' ' * lwidth + s
def _padright(width, s, has_invisible=True):
"""Flush left.
>>> _padright(6, '\u044f\u0439\u0446\u0430') == '\u044f\u0439\u0446\u0430 '
True
"""
rwidth = width - wcswidth(_strip_invisible(s) if has_invisible else s)
return s + ' ' * rwidth
def _padboth(width, s, has_invisible=True):
"""Center string.
>>> _padboth(6, '\u044f\u0439\u0446\u0430') == ' \u044f\u0439\u0446\u0430 '
True
"""
xwidth = width - wcswidth(_strip_invisible(s) if has_invisible else s)
lwidth = xwidth // 2
rwidth = 0 if xwidth <= 0 else lwidth + xwidth % 2
return ' ' * lwidth + s + ' ' * rwidth
def _strip_invisible(s):
"Remove invisible ANSI color codes."
if isinstance(s, _text_type):
return re.sub(_invisible_codes, "", s)
else: # a bytestring
return re.sub(_invisible_codes_bytes, "", s)
def _visible_width(s):
"""Visible width of a printed string. ANSI color codes are removed.
>>> _visible_width('\x1b[31mhello\x1b[0m'), _visible_width("world")
(5, 5)
"""
if isinstance(s, _text_type) or isinstance(s, _binary_type):
return wcswidth(_strip_invisible(s))
else:
return wcswidth(_text_type(s))
def _align_column(strings, alignment, minwidth=0, has_invisible=True):
"""[string] -> [padded_string]
>>> list(map(str,_align_column(["12.345", "-1234.5", "1.23", "1234.5", "1e+234", "1.0e234"], "decimal")))
[' 12.345 ', '-1234.5 ', ' 1.23 ', ' 1234.5 ', ' 1e+234 ', ' 1.0e234']
>>> list(map(str,_align_column(['123.4', '56.7890'], None)))
['123.4', '56.7890']
"""
if alignment == "right":
strings = [s.strip() for s in strings]
padfn = _padleft
elif alignment == "center":
strings = [s.strip() for s in strings]
padfn = _padboth
elif alignment == "decimal":
decimals = [_afterpoint(s) for s in strings]
maxdecimals = max(decimals)
strings = [s + (maxdecimals - decs) * " "
for s, decs in zip(strings, decimals)]
padfn = _padleft
elif not alignment:
return strings
else:
strings = [s.strip() for s in strings]
padfn = _padright
if has_invisible:
width_fn = _visible_width
else:
width_fn = wcswidth
maxwidth = max(max(map(width_fn, strings)), minwidth)
padded_strings = [padfn(maxwidth, s, has_invisible) for s in strings]
return padded_strings
def _more_generic(type1, type2):
types = { _none_type: 0, int: 1, float: 2, _binary_type: 3, _text_type: 4 }
invtypes = { 4: _text_type, 3: _binary_type, 2: float, 1: int, 0: _none_type }
moregeneric = max(types.get(type1, 4), types.get(type2, 4))
return invtypes[moregeneric]
def _column_type(strings, has_invisible=True):
"""The least generic type all column values are convertible to.
>>> _column_type(["1", "2"]) is _int_type
True
>>> _column_type(["1", "2.3"]) is _float_type
True
>>> _column_type(["1", "2.3", "four"]) is _text_type
True
>>> _column_type(["four", '\u043f\u044f\u0442\u044c']) is _text_type
True
>>> _column_type([None, "brux"]) is _text_type
True
>>> _column_type([1, 2, None]) is _int_type
True
>>> import datetime as dt
>>> _column_type([dt.datetime(1991,2,19), dt.time(17,35)]) is _text_type
True
"""
types = [_type(s, has_invisible) for s in strings ]
return reduce(_more_generic, types, int)
def _format(val, valtype, floatfmt, missingval=""):
"""Format a value accoding to its type.
Unicode is supported:
>>> hrow = ['\u0431\u0443\u043a\u0432\u0430', '\u0446\u0438\u0444\u0440\u0430'] ; \
tbl = [['\u0430\u0437', 2], ['\u0431\u0443\u043a\u0438', 4]] ; \
good_result = '\\u0431\\u0443\\u043a\\u0432\\u0430 \\u0446\\u0438\\u0444\\u0440\\u0430\\n------- -------\\n\\u0430\\u0437 2\\n\\u0431\\u0443\\u043a\\u0438 4' ; \
tabulate(tbl, headers=hrow) == good_result
True
"""
if val is None:
return missingval
if valtype in [int, _text_type]:
return "{0}".format(val)
elif valtype is _binary_type:
try:
return _text_type(val, "ascii")
except TypeError:
return _text_type(val)
elif valtype is float:
return format(float(val), floatfmt)
else:
return "{0}".format(val)
def _align_header(header, alignment, width):
if alignment == "left":
return _padright(width, header)
elif alignment == "center":
return _padboth(width, header)
elif not alignment:
return "{0}".format(header)
else:
return _padleft(width, header)
def _normalize_tabular_data(tabular_data, headers):
"""Transform a supported data type to a list of lists, and a list of headers.
Supported tabular data types:
* list-of-lists or another iterable of iterables
* list of named tuples (usually used with headers="keys")
* list of dicts (usually used with headers="keys")
* list of OrderedDicts (usually used with headers="keys")
* 2D NumPy arrays
* NumPy record arrays (usually used with headers="keys")
* dict of iterables (usually used with headers="keys")
* pandas.DataFrame (usually used with headers="keys")
The first row can be used as headers if headers="firstrow",
column indices can be used as headers if headers="keys".
"""
if hasattr(tabular_data, "keys") and hasattr(tabular_data, "values"):
# dict-like and pandas.DataFrame?
if hasattr(tabular_data.values, "__call__"):
# likely a conventional dict
keys = tabular_data.keys()
rows = list(izip_longest(*tabular_data.values())) # columns have to be transposed
elif hasattr(tabular_data, "index"):
# values is a property, has .index => it's likely a pandas.DataFrame (pandas 0.11.0)
keys = tabular_data.keys()
vals = tabular_data.values # values matrix doesn't need to be transposed
names = tabular_data.index
rows = [[v]+list(row) for v,row in zip(names, vals)]
else:
raise ValueError("tabular data doesn't appear to be a dict or a DataFrame")
if headers == "keys":
headers = list(map(_text_type,keys)) # headers should be strings
else: # it's a usual an iterable of iterables, or a NumPy array
rows = list(tabular_data)
if (headers == "keys" and
hasattr(tabular_data, "dtype") and
getattr(tabular_data.dtype, "names")):
# numpy record array
headers = tabular_data.dtype.names
elif (headers == "keys"
and len(rows) > 0
and isinstance(rows[0], tuple)
and hasattr(rows[0], "_fields")):
# namedtuple
headers = list(map(_text_type, rows[0]._fields))
elif (len(rows) > 0
and isinstance(rows[0], dict)):
# dict or OrderedDict
uniq_keys = set() # implements hashed lookup
keys = [] # storage for set
if headers == "firstrow":
firstdict = rows[0] if len(rows) > 0 else {}
keys.extend(firstdict.keys())
uniq_keys.update(keys)
rows = rows[1:]
for row in rows:
for k in row.keys():
#Save unique items in input order
if k not in uniq_keys:
keys.append(k)
uniq_keys.add(k)
if headers == 'keys':
headers = keys
elif isinstance(headers, dict):
# a dict of headers for a list of dicts
headers = [headers.get(k, k) for k in keys]
headers = list(map(_text_type, headers))
elif headers == "firstrow":
if len(rows) > 0:
headers = [firstdict.get(k, k) for k in keys]
headers = list(map(_text_type, headers))
else:
headers = []
elif headers:
raise ValueError('headers for a list of dicts is not a dict or a keyword')
rows = [[row.get(k) for k in keys] for row in rows]
elif headers == "keys" and len(rows) > 0:
# keys are column indices
headers = list(map(_text_type, range(len(rows[0]))))
# take headers from the first row if necessary
if headers == "firstrow" and len(rows) > 0:
headers = list(map(_text_type, rows[0])) # headers should be strings
rows = rows[1:]
headers = list(map(_text_type,headers))
rows = list(map(list,rows))
# pad with empty headers for initial columns if necessary
if headers and len(rows) > 0:
nhs = len(headers)
ncols = len(rows[0])
if nhs < ncols:
headers = [""]*(ncols - nhs) + headers
return rows, headers
def table_formats():
return _table_formats.keys()
def tabulate(tabular_data, headers=[], tablefmt="simple",
floatfmt="g", numalign="decimal", stralign="left",
missingval=""):
"""Format a fixed width table for pretty printing.
>>> print(tabulate([[1, 2.34], [-56, "8.999"], ["2", "10001"]]))
--- ---------
1 2.34
-56 8.999
2 10001
--- ---------
The first required argument (`tabular_data`) can be a
list-of-lists (or another iterable of iterables), a list of named
tuples, a dictionary of iterables, an iterable of dictionaries,
a two-dimensional NumPy array, NumPy record array, or a Pandas'
dataframe.
Table headers
-------------
To print nice column headers, supply the second argument (`headers`):
- `headers` can be an explicit list of column headers
- if `headers="firstrow"`, then the first row of data is used
- if `headers="keys"`, then dictionary keys or column indices are used
Otherwise a headerless table is produced.
If the number of headers is less than the number of columns, they
are supposed to be names of the last columns. This is consistent
with the plain-text format of R and Pandas' dataframes.
>>> print(tabulate([["sex","age"],["Alice","F",24],["Bob","M",19]],
... headers="firstrow"))
sex age
----- ----- -----
Alice F 24
Bob M 19
Column alignment
----------------
`tabulate` tries to detect column types automatically, and aligns
the values properly. By default it aligns decimal points of the
numbers (or flushes integer numbers to the right), and flushes
everything else to the left. Possible column alignments
(`numalign`, `stralign`) are: "right", "center", "left", "decimal"
(only for `numalign`), and None (to disable alignment).
Table formats
-------------
`floatfmt` is a format specification used for columns which
contain numeric data with a decimal point.
`None` values are replaced with a `missingval` string:
>>> print(tabulate([["spam", 1, None],
... ["eggs", 42, 3.14],
... ["other", None, 2.7]], missingval="?"))
----- -- ----
spam 1 ?
eggs 42 3.14
other ? 2.7
----- -- ----
Various plain-text table formats (`tablefmt`) are supported:
'plain', 'simple', 'grid', 'pipe', 'orgtbl', 'rst', 'mediawiki',
'latex', and 'latex_booktabs'. Variable `tabulate_formats` contains the list of
currently supported formats.
"plain" format doesn't use any pseudographics to draw tables,
it separates columns with a double space:
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]],
... ["strings", "numbers"], "plain"))
strings numbers
spam 41.9999
eggs 451
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], tablefmt="plain"))
spam 41.9999
eggs 451
"simple" format is like Pandoc simple_tables:
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]],
... ["strings", "numbers"], "simple"))
strings numbers
--------- ---------
spam 41.9999
eggs 451
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], tablefmt="simple"))
---- --------
spam 41.9999
eggs 451
---- --------
"grid" is similar to tables produced by Emacs table.el package or
Pandoc grid_tables:
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]],
... ["strings", "numbers"], "grid"))
+-----------+-----------+
| strings | numbers |
+===========+===========+
| spam | 41.9999 |
+-----------+-----------+
| eggs | 451 |
+-----------+-----------+
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], tablefmt="grid"))
+------+----------+
| spam | 41.9999 |
+------+----------+
| eggs | 451 |
+------+----------+
"fancy_grid" draws a grid using box-drawing characters:
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]],
... ["strings", "numbers"], "fancy_grid"))
╒═══════════╤═══════════╕
│ strings │ numbers │
╞═══════════╪═══════════╡
│ spam │ 41.9999 │
├───────────┼───────────┤
│ eggs │ 451 │
╘═══════════╧═══════════╛
"pipe" is like tables in PHP Markdown Extra extension or Pandoc
pipe_tables:
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]],
... ["strings", "numbers"], "pipe"))
| strings | numbers |
|:----------|----------:|
| spam | 41.9999 |
| eggs | 451 |
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], tablefmt="pipe"))
|:-----|---------:|
| spam | 41.9999 |
| eggs | 451 |
"orgtbl" is like tables in Emacs org-mode and orgtbl-mode. They
are slightly different from "pipe" format by not using colons to
define column alignment, and using a "+" sign to indicate line
intersections:
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]],
... ["strings", "numbers"], "orgtbl"))
| strings | numbers |
|-----------+-----------|
| spam | 41.9999 |
| eggs | 451 |
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], tablefmt="orgtbl"))
| spam | 41.9999 |
| eggs | 451 |
"rst" is like a simple table format from reStructuredText; please
note that reStructuredText accepts also "grid" tables:
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]],
... ["strings", "numbers"], "rst"))
========= =========
strings numbers
========= =========
spam 41.9999
eggs 451
========= =========
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], tablefmt="rst"))
==== ========
spam 41.9999
eggs 451
==== ========
"mediawiki" produces a table markup used in Wikipedia and on other
MediaWiki-based sites:
>>> print(tabulate([["strings", "numbers"], ["spam", 41.9999], ["eggs", "451.0"]],
... headers="firstrow", tablefmt="mediawiki"))
{| class="wikitable" style="text-align: left;"
|+ <!-- caption -->
|-
! strings !! align="right"| numbers
|-
| spam || align="right"| 41.9999
|-
| eggs || align="right"| 451
|}
"html" produces HTML markup:
>>> print(tabulate([["strings", "numbers"], ["spam", 41.9999], ["eggs", "451.0"]],
... headers="firstrow", tablefmt="html"))
<table>
<tr><th>strings </th><th style="text-align: right;"> numbers</th></tr>
<tr><td>spam </td><td style="text-align: right;"> 41.9999</td></tr>
<tr><td>eggs </td><td style="text-align: right;"> 451 </td></tr>
</table>
"latex" produces a tabular environment of LaTeX document markup:
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], tablefmt="latex"))
\\begin{tabular}{lr}
\\hline
spam & 41.9999 \\\\
eggs & 451 \\\\
\\hline
\\end{tabular}
"latex_booktabs" produces a tabular environment of LaTeX document markup
using the booktabs.sty package:
>>> print(tabulate([["spam", 41.9999], ["eggs", "451.0"]], tablefmt="latex_booktabs"))
\\begin{tabular}{lr}
\\toprule
spam & 41.9999 \\\\
eggs & 451 \\\\
\\bottomrule
\end{tabular}
"""
if tabular_data is None:
tabular_data = []
list_of_lists, headers = _normalize_tabular_data(tabular_data, headers)
# optimization: look for ANSI control codes once,
# enable smart width functions only if a control code is found
plain_text = '\n'.join(['\t'.join(map(_text_type, headers))] + \
['\t'.join(map(_text_type, row)) for row in list_of_lists])
has_invisible = re.search(_invisible_codes, plain_text)
if has_invisible:
width_fn = _visible_width
else:
width_fn = wcswidth
# format rows and columns, convert numeric values to strings
cols = list(zip(*list_of_lists))
coltypes = list(map(_column_type, cols))
cols = [[_format(v, ct, floatfmt, missingval) for v in c]
for c,ct in zip(cols, coltypes)]
# align columns
aligns = [numalign if ct in [int,float] else stralign for ct in coltypes]
minwidths = [width_fn(h) + MIN_PADDING for h in headers] if headers else [0]*len(cols)
cols = [_align_column(c, a, minw, has_invisible)
for c, a, minw in zip(cols, aligns, minwidths)]
if headers:
# align headers and add headers
t_cols = cols or [['']] * len(headers)
t_aligns = aligns or [stralign] * len(headers)
minwidths = [max(minw, width_fn(c[0])) for minw, c in zip(minwidths, t_cols)]
headers = [_align_header(h, a, minw)
for h, a, minw in zip(headers, t_aligns, minwidths)]
rows = list(zip(*cols))
else:
minwidths = [width_fn(c[0]) for c in cols]
rows = list(zip(*cols))
if not isinstance(tablefmt, TableFormat):
tablefmt = _table_formats.get(tablefmt, _table_formats["simple"])
return _format_table(tablefmt, headers, rows, minwidths, aligns)
def _build_simple_row(padded_cells, rowfmt):
"Format row according to DataRow format without padding."
begin, sep, end = rowfmt
return (begin + sep.join(padded_cells) + end).rstrip()
def _build_row(padded_cells, colwidths, colaligns, rowfmt):
"Return a string which represents a row of data cells."
if not rowfmt:
return None
if hasattr(rowfmt, "__call__"):
return rowfmt(padded_cells, colwidths, colaligns)
else:
return _build_simple_row(padded_cells, rowfmt)
def _build_line(colwidths, colaligns, linefmt):
"Return a string which represents a horizontal line."
if not linefmt:
return None
if hasattr(linefmt, "__call__"):
return linefmt(colwidths, colaligns)
else:
begin, fill, sep, end = linefmt
cells = [fill*w for w in colwidths]
return _build_simple_row(cells, (begin, sep, end))
def _pad_row(cells, padding):
if cells:
pad = " "*padding
padded_cells = [pad + cell + pad for cell in cells]
return padded_cells
else:
return cells
def _format_table(fmt, headers, rows, colwidths, colaligns):
"""Produce a plain-text representation of the table."""
lines = []
hidden = fmt.with_header_hide if (headers and fmt.with_header_hide) else []
pad = fmt.padding
headerrow = fmt.headerrow
padded_widths = [(w + 2*pad) for w in colwidths]
padded_headers = _pad_row(headers, pad)
padded_rows = [_pad_row(row, pad) for row in rows]
if fmt.lineabove and "lineabove" not in hidden:
lines.append(_build_line(padded_widths, colaligns, fmt.lineabove))
if padded_headers:
lines.append(_build_row(padded_headers, padded_widths, colaligns, headerrow))
if fmt.linebelowheader and "linebelowheader" not in hidden:
lines.append(_build_line(padded_widths, colaligns, fmt.linebelowheader))
if padded_rows and fmt.linebetweenrows and "linebetweenrows" not in hidden:
# initial rows with a line below
for row in padded_rows[:-1]:
lines.append(_build_row(row, padded_widths, colaligns, fmt.datarow))
lines.append(_build_line(padded_widths, colaligns, fmt.linebetweenrows))
# the last row without a line below
lines.append(_build_row(padded_rows[-1], padded_widths, colaligns, fmt.datarow))
else:
for row in padded_rows:
lines.append(_build_row(row, padded_widths, colaligns, fmt.datarow))
if fmt.linebelow and "linebelow" not in hidden:
lines.append(_build_line(padded_widths, colaligns, fmt.linebelow))
return "\n".join(lines)
def _main():
"""\
Usage: tabulate [options] [FILE ...]
Pretty-print tabular data. See also https://bitbucket.org/astanin/python-tabulate
FILE a filename of the file with tabular data;
if "-" or missing, read data from stdin.
Options:
-h, --help show this message
-1, --header use the first row of data as a table header
-s REGEXP, --sep REGEXP use a custom column separator (default: whitespace)
-f FMT, --format FMT set output table format; supported formats:
plain, simple, grid, fancy_grid, pipe, orgtbl,
rst, mediawiki, html, latex, latex_booktabs, tsv
(default: simple)
"""
import getopt
import sys
import textwrap
usage = textwrap.dedent(_main.__doc__)
try:
opts, args = getopt.getopt(sys.argv[1:],
"h1f:s:",
["help", "header", "format", "separator"])
except getopt.GetoptError as e:
print(e)
print(usage)
sys.exit(2)
headers = []
tablefmt = "simple"
sep = r"\s+"
for opt, value in opts:
if opt in ["-1", "--header"]:
headers = "firstrow"
elif opt in ["-f", "--format"]:
if value not in tabulate_formats:
print("%s is not a supported table format" % value)
print(usage)
sys.exit(3)
tablefmt = value
elif opt in ["-s", "--sep"]:
sep = value
elif opt in ["-h", "--help"]:
print(usage)
sys.exit(0)
files = [sys.stdin] if not args else args
for f in files:
if f == "-":
f = sys.stdin
if _is_file(f):
_pprint_file(f, headers=headers, tablefmt=tablefmt, sep=sep)
else:
with open(f) as fobj:
_pprint_file(fobj)
def _pprint_file(fobject, headers, tablefmt, sep):
rows = fobject.readlines()
table = [re.split(sep, r.rstrip()) for r in rows]
print(tabulate(table, headers, tablefmt))
if __name__ == "__main__":
_main()
| bsd-3-clause |
gVallverdu/pymatgen | pymatgen/symmetry/analyzer.py | 1 | 64034 | # coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
"""
An interface to the excellent spglib library by Atsushi Togo
(http://spglib.sourceforge.net/) for pymatgen.
v1.0 - Now works with both ordered and disordered structure.
v2.0 - Updated for spglib 1.6.
v3.0 - pymatgen no longer ships with spglib. Instead, spglib (the python
version) is now a dependency and the SpacegroupAnalyzer merely serves
as an interface to spglib for pymatgen Structures.
"""
import itertools
import logging
from collections import defaultdict
import copy
import math
from math import cos
from math import sin
from fractions import Fraction
import numpy as np
import spglib
from pymatgen.core.structure import Structure, Molecule
from pymatgen.symmetry.structure import SymmetrizedStructure
from pymatgen.core.lattice import Lattice
from pymatgen.core.structure import PeriodicSite
from pymatgen.core.operations import SymmOp
from pymatgen.util.coord import find_in_coord_list, pbc_diff
__author__ = "Shyue Ping Ong"
__copyright__ = "Copyright 2012, The Materials Project"
__version__ = "3.0"
__maintainer__ = "Shyue Ping Ong"
__email__ = "[email protected]"
__date__ = "May 14, 2016"
logger = logging.getLogger(__name__)
class SpacegroupAnalyzer:
"""
Takes a pymatgen.core.structure.Structure object and a symprec.
Uses spglib to perform various symmetry finding operations.
"""
def __init__(self, structure, symprec=0.01, angle_tolerance=5.0):
"""
Args:
structure (Structure/IStructure): Structure to find symmetry
symprec (float): Tolerance for symmetry finding. Defaults to 0.01,
which is fairly strict and works well for properly refined
structures with atoms in the proper symmetry coordinates. For
structures with slight deviations from their proper atomic
positions (e.g., structures relaxed with electronic structure
codes), a looser tolerance of 0.1 (the value used in Materials
Project) is often needed.
angle_tolerance (float): Angle tolerance for symmetry finding.
"""
self._symprec = symprec
self._angle_tol = angle_tolerance
self._structure = structure
latt = structure.lattice.matrix
positions = structure.frac_coords
unique_species = []
zs = []
magmoms = []
for species, g in itertools.groupby(structure,
key=lambda s: s.species):
if species in unique_species:
ind = unique_species.index(species)
zs.extend([ind + 1] * len(tuple(g)))
else:
unique_species.append(species)
zs.extend([len(unique_species)] * len(tuple(g)))
for site in structure:
if hasattr(site, 'magmom'):
magmoms.append(site.magmom)
elif site.is_ordered and hasattr(site.specie, 'spin'):
magmoms.append(site.specie.spin)
else:
magmoms.append(0)
self._unique_species = unique_species
self._numbers = zs
# For now, we are setting magmom to zero.
self._cell = latt, positions, zs, magmoms
self._space_group_data = spglib.get_symmetry_dataset(
self._cell, symprec=self._symprec, angle_tolerance=angle_tolerance)
def get_space_group_symbol(self):
"""
Get the spacegroup symbol (e.g., Pnma) for structure.
Returns:
(str): Spacegroup symbol for structure.
"""
return self._space_group_data["international"]
def get_space_group_number(self):
"""
Get the international spacegroup number (e.g., 62) for structure.
Returns:
(int): International spacegroup number for structure.
"""
return int(self._space_group_data["number"])
def get_space_group_operations(self):
"""
Get the SpacegroupOperations for the Structure.
Returns:
SpacgroupOperations object.
"""
return SpacegroupOperations(self.get_space_group_symbol(),
self.get_space_group_number(),
self.get_symmetry_operations())
def get_hall(self):
"""
Returns Hall symbol for structure.
Returns:
(str): Hall symbol
"""
return self._space_group_data["hall"]
def get_point_group_symbol(self):
"""
Get the point group associated with the structure.
Returns:
(Pointgroup): Point group for structure.
"""
rotations = self._space_group_data["rotations"]
# passing a 0-length rotations list to spglib can segfault
if len(rotations) == 0:
return '1'
return spglib.get_pointgroup(rotations)[0].strip()
def get_crystal_system(self):
"""
Get the crystal system for the structure, e.g., (triclinic,
orthorhombic, cubic, etc.).
Returns:
(str): Crystal system for structure or None if system cannot be detected.
"""
n = self._space_group_data["number"]
def f(i, j):
return i <= n <= j
cs = {"triclinic": (1, 2), "monoclinic": (3, 15),
"orthorhombic": (16, 74), "tetragonal": (75, 142),
"trigonal": (143, 167), "hexagonal": (168, 194),
"cubic": (195, 230)}
crystal_sytem = None
for k, v in cs.items():
if f(*v):
crystal_sytem = k
break
return crystal_sytem
def get_lattice_type(self):
"""
Get the lattice for the structure, e.g., (triclinic,
orthorhombic, cubic, etc.).This is the same than the
crystal system with the exception of the hexagonal/rhombohedral
lattice
Returns:
(str): Lattice type for structure or None if type cannot be detected.
"""
n = self._space_group_data["number"]
system = self.get_crystal_system()
if n in [146, 148, 155, 160, 161, 166, 167]:
return "rhombohedral"
elif system == "trigonal":
return "hexagonal"
else:
return system
def get_symmetry_dataset(self):
"""
Returns the symmetry dataset as a dict.
Returns:
(dict): With the following properties:
number: International space group number
international: International symbol
hall: Hall symbol
transformation_matrix: Transformation matrix from lattice of
input cell to Bravais lattice L^bravais = L^original * Tmat
origin shift: Origin shift in the setting of "Bravais lattice"
rotations, translations: Rotation matrices and translation
vectors. Space group operations are obtained by
[(r,t) for r, t in zip(rotations, translations)]
wyckoffs: Wyckoff letters
"""
return self._space_group_data
def _get_symmetry(self):
"""
Get the symmetry operations associated with the structure.
Returns:
Symmetry operations as a tuple of two equal length sequences.
(rotations, translations). "rotations" is the numpy integer array
of the rotation matrices for scaled positions
"translations" gives the numpy float64 array of the translation
vectors in scaled positions.
"""
d = spglib.get_symmetry(self._cell, symprec=self._symprec,
angle_tolerance=self._angle_tol)
# Sometimes spglib returns small translation vectors, e.g.
# [1e-4, 2e-4, 1e-4]
# (these are in fractional coordinates, so should be small denominator
# fractions)
trans = []
for t in d["translations"]:
trans.append([float(Fraction.from_float(c).limit_denominator(1000))
for c in t])
trans = np.array(trans)
# fractional translations of 1 are more simply 0
trans[np.abs(trans) == 1] = 0
return d["rotations"], trans
def get_symmetry_operations(self, cartesian=False):
"""
Return symmetry operations as a list of SymmOp objects.
By default returns fractional coord symmops.
But cartesian can be returned too.
Returns:
([SymmOp]): List of symmetry operations.
"""
rotation, translation = self._get_symmetry()
symmops = []
mat = self._structure.lattice.matrix.T
invmat = np.linalg.inv(mat)
for rot, trans in zip(rotation, translation):
if cartesian:
rot = np.dot(mat, np.dot(rot, invmat))
trans = np.dot(trans, self._structure.lattice.matrix)
op = SymmOp.from_rotation_and_translation(rot, trans)
symmops.append(op)
return symmops
def get_point_group_operations(self, cartesian=False):
"""
Return symmetry operations as a list of SymmOp objects.
By default returns fractional coord symmops.
But cartesian can be returned too.
Args:
cartesian (bool): Whether to return SymmOps as cartesian or
direct coordinate operations.
Returns:
([SymmOp]): List of point group symmetry operations.
"""
rotation, translation = self._get_symmetry()
symmops = []
mat = self._structure.lattice.matrix.T
invmat = np.linalg.inv(mat)
for rot in rotation:
if cartesian:
rot = np.dot(mat, np.dot(rot, invmat))
op = SymmOp.from_rotation_and_translation(rot, np.array([0, 0, 0]))
symmops.append(op)
return symmops
def get_symmetrized_structure(self):
"""
Get a symmetrized structure. A symmetrized structure is one where the
sites have been grouped into symmetrically equivalent groups.
Returns:
:class:`pymatgen.symmetry.structure.SymmetrizedStructure` object.
"""
ds = self.get_symmetry_dataset()
sg = SpacegroupOperations(self.get_space_group_symbol(),
self.get_space_group_number(),
self.get_symmetry_operations())
return SymmetrizedStructure(self._structure, sg,
ds["equivalent_atoms"],
ds["wyckoffs"])
def get_refined_structure(self):
"""
Get the refined structure based on detected symmetry. The refined
structure is a *conventional* cell setting with atoms moved to the
expected symmetry positions.
Returns:
Refined structure.
"""
# Atomic positions have to be specified by scaled positions for spglib.
lattice, scaled_positions, numbers \
= spglib.refine_cell(self._cell, self._symprec, self._angle_tol)
species = [self._unique_species[i - 1] for i in numbers]
s = Structure(lattice, species, scaled_positions)
return s.get_sorted_structure()
def find_primitive(self):
"""
Find a primitive version of the unit cell.
Returns:
A primitive cell in the input cell is searched and returned
as an Structure object. If no primitive cell is found, None is
returned.
"""
lattice, scaled_positions, numbers = spglib.find_primitive(
self._cell, symprec=self._symprec)
species = [self._unique_species[i - 1] for i in numbers]
return Structure(lattice, species, scaled_positions,
to_unit_cell=True).get_reduced_structure()
def get_ir_reciprocal_mesh(self, mesh=(10, 10, 10), is_shift=(0, 0, 0)):
"""
k-point mesh of the Brillouin zone generated taken into account
symmetry.The method returns the irreducible kpoints of the mesh
and their weights
Args:
mesh (3x1 array): The number of kpoint for the mesh needed in
each direction
is_shift (3x1 array): Whether to shift the kpoint grid. (1, 1,
1) means all points are shifted by 0.5, 0.5, 0.5.
Returns:
A list of irreducible kpoints and their weights as a list of
tuples [(ir_kpoint, weight)], with ir_kpoint given
in fractional coordinates
"""
shift = np.array([1 if i else 0 for i in is_shift])
mapping, grid = spglib.get_ir_reciprocal_mesh(
np.array(mesh), self._cell, is_shift=shift, symprec=self._symprec)
results = []
for i, count in zip(*np.unique(mapping, return_counts=True)):
results.append(((grid[i] + shift * (0.5, 0.5, 0.5)) / mesh,
count))
return results
def get_conventional_to_primitive_transformation_matrix(self, international_monoclinic=True):
"""
Gives the transformation matrix to transform a conventional
unit cell to a primitive cell according to certain standards
the standards are defined in Setyawan, W., & Curtarolo, S. (2010).
High-throughput electronic band structure calculations:
Challenges and tools. Computational Materials Science,
49(2), 299-312. doi:10.1016/j.commatsci.2010.05.010
Returns:
Transformation matrix to go from conventional to primitive cell
"""
conv = self.get_conventional_standard_structure(
international_monoclinic=international_monoclinic)
lattice = self.get_lattice_type()
if "P" in self.get_space_group_symbol() or lattice == "hexagonal":
return np.eye(3)
if lattice == "rhombohedral":
# check if the conventional representation is hexagonal or
# rhombohedral
lengths = conv.lattice.lengths
if abs(lengths[0] - lengths[2]) < 0.0001:
transf = np.eye
else:
transf = np.array([[-1, 1, 1], [2, 1, 1], [-1, -2, 1]],
dtype=np.float) / 3
elif "I" in self.get_space_group_symbol():
transf = np.array([[-1, 1, 1], [1, -1, 1], [1, 1, -1]],
dtype=np.float) / 2
elif "F" in self.get_space_group_symbol():
transf = np.array([[0, 1, 1], [1, 0, 1], [1, 1, 0]],
dtype=np.float) / 2
elif "C" in self.get_space_group_symbol() or "A" in self.get_space_group_symbol():
if self.get_crystal_system() == "monoclinic":
transf = np.array([[1, 1, 0], [-1, 1, 0], [0, 0, 2]],
dtype=np.float) / 2
else:
transf = np.array([[1, -1, 0], [1, 1, 0], [0, 0, 2]],
dtype=np.float) / 2
else:
transf = np.eye(3)
return transf
def get_primitive_standard_structure(self, international_monoclinic=True):
"""
Gives a structure with a primitive cell according to certain standards
the standards are defined in Setyawan, W., & Curtarolo, S. (2010).
High-throughput electronic band structure calculations:
Challenges and tools. Computational Materials Science,
49(2), 299-312. doi:10.1016/j.commatsci.2010.05.010
Returns:
The structure in a primitive standardized cell
"""
conv = self.get_conventional_standard_structure(
international_monoclinic=international_monoclinic)
lattice = self.get_lattice_type()
if "P" in self.get_space_group_symbol() or lattice == "hexagonal":
return conv
transf = self.get_conventional_to_primitive_transformation_matrix(
international_monoclinic=international_monoclinic)
new_sites = []
latt = Lattice(np.dot(transf, conv.lattice.matrix))
for s in conv:
new_s = PeriodicSite(
s.specie, s.coords, latt,
to_unit_cell=True, coords_are_cartesian=True,
properties=s.properties)
if not any(map(new_s.is_periodic_image, new_sites)):
new_sites.append(new_s)
if lattice == "rhombohedral":
prim = Structure.from_sites(new_sites)
lengths = prim.lattice.lengths
angles = prim.lattice.angles
a = lengths[0]
alpha = math.pi * angles[0] / 180
new_matrix = [
[a * cos(alpha / 2), -a * sin(alpha / 2), 0],
[a * cos(alpha / 2), a * sin(alpha / 2), 0],
[a * cos(alpha) / cos(alpha / 2), 0,
a * math.sqrt(1 - (cos(alpha) ** 2 / (cos(alpha / 2) ** 2)))]]
new_sites = []
latt = Lattice(new_matrix)
for s in prim:
new_s = PeriodicSite(
s.specie, s.frac_coords, latt,
to_unit_cell=True, properties=s.properties)
if not any(map(new_s.is_periodic_image, new_sites)):
new_sites.append(new_s)
return Structure.from_sites(new_sites)
return Structure.from_sites(new_sites)
def get_conventional_standard_structure(
self, international_monoclinic=True):
"""
Gives a structure with a conventional cell according to certain
standards. The standards are defined in Setyawan, W., & Curtarolo,
S. (2010). High-throughput electronic band structure calculations:
Challenges and tools. Computational Materials Science,
49(2), 299-312. doi:10.1016/j.commatsci.2010.05.010
They basically enforce as much as possible
norm(a1)<norm(a2)<norm(a3)
Returns:
The structure in a conventional standardized cell
"""
tol = 1e-5
struct = self.get_refined_structure()
latt = struct.lattice
latt_type = self.get_lattice_type()
sorted_lengths = sorted(latt.abc)
sorted_dic = sorted([{'vec': latt.matrix[i],
'length': latt.abc[i],
'orig_index': i} for i in [0, 1, 2]],
key=lambda k: k['length'])
if latt_type in ("orthorhombic", "cubic"):
# you want to keep the c axis where it is
# to keep the C- settings
transf = np.zeros(shape=(3, 3))
if self.get_space_group_symbol().startswith("C"):
transf[2] = [0, 0, 1]
a, b = sorted(latt.abc[:2])
sorted_dic = sorted([{'vec': latt.matrix[i],
'length': latt.abc[i],
'orig_index': i} for i in [0, 1]],
key=lambda k: k['length'])
for i in range(2):
transf[i][sorted_dic[i]['orig_index']] = 1
c = latt.abc[2]
elif self.get_space_group_symbol().startswith(
"A"): # change to C-centering to match Setyawan/Curtarolo convention
transf[2] = [1, 0, 0]
a, b = sorted(latt.abc[1:])
sorted_dic = sorted([{'vec': latt.matrix[i],
'length': latt.abc[i],
'orig_index': i} for i in [1, 2]],
key=lambda k: k['length'])
for i in range(2):
transf[i][sorted_dic[i]['orig_index']] = 1
c = latt.abc[0]
else:
for i in range(len(sorted_dic)):
transf[i][sorted_dic[i]['orig_index']] = 1
a, b, c = sorted_lengths
latt = Lattice.orthorhombic(a, b, c)
elif latt_type == "tetragonal":
# find the "a" vectors
# it is basically the vector repeated two times
transf = np.zeros(shape=(3, 3))
a, b, c = sorted_lengths
for d in range(len(sorted_dic)):
transf[d][sorted_dic[d]['orig_index']] = 1
if abs(b - c) < tol and abs(a - c) > tol:
a, c = c, a
transf = np.dot([[0, 0, 1], [0, 1, 0], [1, 0, 0]], transf)
latt = Lattice.tetragonal(a, c)
elif latt_type in ("hexagonal", "rhombohedral"):
# for the conventional cell representation,
# we allways show the rhombohedral lattices as hexagonal
# check first if we have the refined structure shows a rhombohedral
# cell
# if so, make a supercell
a, b, c = latt.abc
if np.all(np.abs([a - b, c - b, a - c]) < 0.001):
struct.make_supercell(((1, -1, 0), (0, 1, -1), (1, 1, 1)))
a, b, c = sorted(struct.lattice.abc)
if abs(b - c) < 0.001:
a, c = c, a
new_matrix = [[a / 2, -a * math.sqrt(3) / 2, 0],
[a / 2, a * math.sqrt(3) / 2, 0],
[0, 0, c]]
latt = Lattice(new_matrix)
transf = np.eye(3, 3)
elif latt_type == "monoclinic":
# You want to keep the c axis where it is to keep the C- settings
if self.get_space_group_operations().int_symbol.startswith("C"):
transf = np.zeros(shape=(3, 3))
transf[2] = [0, 0, 1]
sorted_dic = sorted([{'vec': latt.matrix[i],
'length': latt.abc[i],
'orig_index': i} for i in [0, 1]],
key=lambda k: k['length'])
a = sorted_dic[0]['length']
b = sorted_dic[1]['length']
c = latt.abc[2]
new_matrix = None
for t in itertools.permutations(list(range(2)), 2):
m = latt.matrix
latt2 = Lattice([m[t[0]], m[t[1]], m[2]])
lengths = latt2.lengths
angles = latt2.angles
if angles[0] > 90:
# if the angle is > 90 we invert a and b to get
# an angle < 90
a, b, c, alpha, beta, gamma = Lattice(
[-m[t[0]], -m[t[1]], m[2]]).parameters
transf = np.zeros(shape=(3, 3))
transf[0][t[0]] = -1
transf[1][t[1]] = -1
transf[2][2] = 1
alpha = math.pi * alpha / 180
new_matrix = [[a, 0, 0],
[0, b, 0],
[0, c * cos(alpha), c * sin(alpha)]]
continue
elif angles[0] < 90:
transf = np.zeros(shape=(3, 3))
transf[0][t[0]] = 1
transf[1][t[1]] = 1
transf[2][2] = 1
a, b, c = lengths
alpha = math.pi * angles[0] / 180
new_matrix = [[a, 0, 0],
[0, b, 0],
[0, c * cos(alpha), c * sin(alpha)]]
if new_matrix is None:
# this if is to treat the case
# where alpha==90 (but we still have a monoclinic sg
new_matrix = [[a, 0, 0],
[0, b, 0],
[0, 0, c]]
transf = np.zeros(shape=(3, 3))
for c in range(len(sorted_dic)):
transf[c][sorted_dic[c]['orig_index']] = 1
# if not C-setting
else:
# try all permutations of the axis
# keep the ones with the non-90 angle=alpha
# and b<c
new_matrix = None
for t in itertools.permutations(list(range(3)), 3):
m = latt.matrix
a, b, c, alpha, beta, gamma = Lattice(
[m[t[0]], m[t[1]], m[t[2]]]).parameters
if alpha > 90 and b < c:
a, b, c, alpha, beta, gamma = Lattice(
[-m[t[0]], -m[t[1]], m[t[2]]]).parameters
transf = np.zeros(shape=(3, 3))
transf[0][t[0]] = -1
transf[1][t[1]] = -1
transf[2][t[2]] = 1
alpha = math.pi * alpha / 180
new_matrix = [[a, 0, 0],
[0, b, 0],
[0, c * cos(alpha), c * sin(alpha)]]
continue
elif alpha < 90 and b < c:
transf = np.zeros(shape=(3, 3))
transf[0][t[0]] = 1
transf[1][t[1]] = 1
transf[2][t[2]] = 1
alpha = math.pi * alpha / 180
new_matrix = [[a, 0, 0],
[0, b, 0],
[0, c * cos(alpha), c * sin(alpha)]]
if new_matrix is None:
# this if is to treat the case
# where alpha==90 (but we still have a monoclinic sg
new_matrix = [[sorted_lengths[0], 0, 0],
[0, sorted_lengths[1], 0],
[0, 0, sorted_lengths[2]]]
transf = np.zeros(shape=(3, 3))
for c in range(len(sorted_dic)):
transf[c][sorted_dic[c]['orig_index']] = 1
if international_monoclinic:
# The above code makes alpha the non-right angle.
# The following will convert to proper international convention
# that beta is the non-right angle.
op = [[0, 1, 0], [1, 0, 0], [0, 0, -1]]
transf = np.dot(op, transf)
new_matrix = np.dot(op, new_matrix)
beta = Lattice(new_matrix).beta
if beta < 90:
op = [[-1, 0, 0], [0, -1, 0], [0, 0, 1]]
transf = np.dot(op, transf)
new_matrix = np.dot(op, new_matrix)
latt = Lattice(new_matrix)
elif latt_type == "triclinic":
# we use a LLL Minkowski-like reduction for the triclinic cells
struct = struct.get_reduced_structure("LLL")
a, b, c = latt.lengths
alpha, beta, gamma = [math.pi * i / 180 for i in latt.angles]
new_matrix = None
test_matrix = [[a, 0, 0],
[b * cos(gamma), b * sin(gamma), 0.0],
[c * cos(beta),
c * (cos(alpha) - cos(beta) * cos(gamma)) /
sin(gamma),
c * math.sqrt(sin(gamma) ** 2 - cos(alpha) ** 2
- cos(beta) ** 2
+ 2 * cos(alpha) * cos(beta)
* cos(gamma)) / sin(gamma)]]
def is_all_acute_or_obtuse(m):
recp_angles = np.array(Lattice(m).reciprocal_lattice.angles)
return np.all(recp_angles <= 90) or np.all(recp_angles > 90)
if is_all_acute_or_obtuse(test_matrix):
transf = np.eye(3)
new_matrix = test_matrix
test_matrix = [[-a, 0, 0],
[b * cos(gamma), b * sin(gamma), 0.0],
[-c * cos(beta),
-c * (cos(alpha) - cos(beta) * cos(gamma)) /
sin(gamma),
-c * math.sqrt(sin(gamma) ** 2 - cos(alpha) ** 2
- cos(beta) ** 2
+ 2 * cos(alpha) * cos(beta)
* cos(gamma)) / sin(gamma)]]
if is_all_acute_or_obtuse(test_matrix):
transf = [[-1, 0, 0],
[0, 1, 0],
[0, 0, -1]]
new_matrix = test_matrix
test_matrix = [[-a, 0, 0],
[-b * cos(gamma), -b * sin(gamma), 0.0],
[c * cos(beta),
c * (cos(alpha) - cos(beta) * cos(gamma)) /
sin(gamma),
c * math.sqrt(sin(gamma) ** 2 - cos(alpha) ** 2
- cos(beta) ** 2
+ 2 * cos(alpha) * cos(beta)
* cos(gamma)) / sin(gamma)]]
if is_all_acute_or_obtuse(test_matrix):
transf = [[-1, 0, 0],
[0, -1, 0],
[0, 0, 1]]
new_matrix = test_matrix
test_matrix = [[a, 0, 0],
[-b * cos(gamma), -b * sin(gamma), 0.0],
[-c * cos(beta),
-c * (cos(alpha) - cos(beta) * cos(gamma)) /
sin(gamma),
-c * math.sqrt(sin(gamma) ** 2 - cos(alpha) ** 2
- cos(beta) ** 2
+ 2 * cos(alpha) * cos(beta)
* cos(gamma)) / sin(gamma)]]
if is_all_acute_or_obtuse(test_matrix):
transf = [[1, 0, 0],
[0, -1, 0],
[0, 0, -1]]
new_matrix = test_matrix
latt = Lattice(new_matrix)
new_coords = np.dot(transf, np.transpose(struct.frac_coords)).T
new_struct = Structure(latt, struct.species_and_occu, new_coords,
site_properties=struct.site_properties,
to_unit_cell=True)
return new_struct.get_sorted_structure()
def get_kpoint_weights(self, kpoints, atol=1e-5):
"""
Calculate the weights for a list of kpoints.
Args:
kpoints (Sequence): Sequence of kpoints. np.arrays is fine. Note
that the code does not check that the list of kpoints
provided does not contain duplicates.
atol (float): Tolerance for fractional coordinates comparisons.
Returns:
List of weights, in the SAME order as kpoints.
"""
kpts = np.array(kpoints)
shift = []
mesh = []
for i in range(3):
nonzero = [i for i in kpts[:, i] if abs(i) > 1e-5]
if len(nonzero) != len(kpts):
# gamma centered
if not nonzero:
mesh.append(1)
else:
m = np.abs(np.round(1 / np.array(nonzero)))
mesh.append(int(max(m)))
shift.append(0)
else:
# Monk
m = np.abs(np.round(0.5 / np.array(nonzero)))
mesh.append(int(max(m)))
shift.append(1)
mapping, grid = spglib.get_ir_reciprocal_mesh(
np.array(mesh), self._cell, is_shift=shift, symprec=self._symprec)
mapping = list(mapping)
grid = (np.array(grid) + np.array(shift) * (0.5, 0.5, 0.5)) / mesh
weights = []
mapped = defaultdict(int)
for k in kpoints:
for i, g in enumerate(grid):
if np.allclose(pbc_diff(k, g), (0, 0, 0), atol=atol):
mapped[tuple(g)] += 1
weights.append(mapping.count(mapping[i]))
break
if (len(mapped) != len(set(mapping))) or (
not all([v == 1 for v in mapped.values()])):
raise ValueError("Unable to find 1:1 corresponding between input "
"kpoints and irreducible grid!")
return [w / sum(weights) for w in weights]
def is_laue(self):
"""
Check if the point group of the structure
has Laue symmetry (centrosymmetry)
"""
laue = ["-1", "2/m", "mmm", "4/m", "4/mmm",
"-3", "-3m", "6/m", "6/mmm", "m-3", "m-3m"]
return str(self.get_point_group_symbol()) in laue
class PointGroupAnalyzer:
"""
A class to analyze the point group of a molecule. The general outline of
the algorithm is as follows:
1. Center the molecule around its center of mass.
2. Compute the inertia tensor and the eigenvalues and eigenvectors.
3. Handle the symmetry detection based on eigenvalues.
a. Linear molecules have one zero eigenvalue. Possible symmetry
operations are C*v or D*v
b. Asymetric top molecules have all different eigenvalues. The
maximum rotational symmetry in such molecules is 2
c. Symmetric top molecules have 1 unique eigenvalue, which gives a
unique rotation axis. All axial point groups are possible
except the cubic groups (T & O) and I.
d. Spherical top molecules have all three eigenvalues equal. They
have the rare T, O or I point groups.
.. attribute:: sch_symbol
Schoenflies symbol of the detected point group.
"""
inversion_op = SymmOp.inversion()
def __init__(self, mol, tolerance=0.3, eigen_tolerance=0.01,
matrix_tol=0.1):
"""
The default settings are usually sufficient.
Args:
mol (Molecule): Molecule to determine point group for.
tolerance (float): Distance tolerance to consider sites as
symmetrically equivalent. Defaults to 0.3 Angstrom.
eigen_tolerance (float): Tolerance to compare eigen values of
the inertia tensor. Defaults to 0.01.
matrix_tol (float): Tolerance used to generate the full set of
symmetry operations of the point group.
"""
self.mol = mol
self.centered_mol = mol.get_centered_molecule()
self.tol = tolerance
self.eig_tol = eigen_tolerance
self.mat_tol = matrix_tol
self._analyze()
if self.sch_symbol in ["C1v", "C1h"]:
self.sch_symbol = "Cs"
def _analyze(self):
if len(self.centered_mol) == 1:
self.sch_symbol = "Kh"
else:
inertia_tensor = np.zeros((3, 3))
total_inertia = 0
for site in self.centered_mol:
c = site.coords
wt = site.species.weight
for i in range(3):
inertia_tensor[i, i] += wt * (c[(i + 1) % 3] ** 2
+ c[(i + 2) % 3] ** 2)
for i, j in [(0, 1), (1, 2), (0, 2)]:
inertia_tensor[i, j] += -wt * c[i] * c[j]
inertia_tensor[j, i] += -wt * c[j] * c[i]
total_inertia += wt * np.dot(c, c)
# Normalize the inertia tensor so that it does not scale with size
# of the system. This mitigates the problem of choosing a proper
# comparison tolerance for the eigenvalues.
inertia_tensor /= total_inertia
eigvals, eigvecs = np.linalg.eig(inertia_tensor)
self.principal_axes = eigvecs.T
self.eigvals = eigvals
v1, v2, v3 = eigvals
eig_zero = abs(v1 * v2 * v3) < self.eig_tol
eig_all_same = abs(v1 - v2) < self.eig_tol and abs(
v1 - v3) < self.eig_tol
eig_all_diff = abs(v1 - v2) > self.eig_tol and abs(
v1 - v3) > self.eig_tol and abs(v2 - v3) > self.eig_tol
self.rot_sym = []
self.symmops = [SymmOp(np.eye(4))]
if eig_zero:
logger.debug("Linear molecule detected")
self._proc_linear()
elif eig_all_same:
logger.debug("Spherical top molecule detected")
self._proc_sph_top()
elif eig_all_diff:
logger.debug("Asymmetric top molecule detected")
self._proc_asym_top()
else:
logger.debug("Symmetric top molecule detected")
self._proc_sym_top()
def _proc_linear(self):
if self.is_valid_op(PointGroupAnalyzer.inversion_op):
self.sch_symbol = "D*h"
self.symmops.append(PointGroupAnalyzer.inversion_op)
else:
self.sch_symbol = "C*v"
def _proc_asym_top(self):
"""
Handles assymetric top molecules, which cannot contain rotational
symmetry larger than 2.
"""
self._check_R2_axes_asym()
if len(self.rot_sym) == 0:
logger.debug("No rotation symmetries detected.")
self._proc_no_rot_sym()
elif len(self.rot_sym) == 3:
logger.debug("Dihedral group detected.")
self._proc_dihedral()
else:
logger.debug("Cyclic group detected.")
self._proc_cyclic()
def _proc_sym_top(self):
"""
Handles symetric top molecules which has one unique eigenvalue whose
corresponding principal axis is a unique rotational axis. More complex
handling required to look for R2 axes perpendicular to this unique
axis.
"""
if abs(self.eigvals[0] - self.eigvals[1]) < self.eig_tol:
ind = 2
elif abs(self.eigvals[1] - self.eigvals[2]) < self.eig_tol:
ind = 0
else:
ind = 1
logger.debug("Eigenvalues = %s." % self.eigvals)
unique_axis = self.principal_axes[ind]
self._check_rot_sym(unique_axis)
logger.debug("Rotation symmetries = %s" % self.rot_sym)
if len(self.rot_sym) > 0:
self._check_perpendicular_r2_axis(unique_axis)
if len(self.rot_sym) >= 2:
self._proc_dihedral()
elif len(self.rot_sym) == 1:
self._proc_cyclic()
else:
self._proc_no_rot_sym()
def _proc_no_rot_sym(self):
"""
Handles molecules with no rotational symmetry. Only possible point
groups are C1, Cs and Ci.
"""
self.sch_symbol = "C1"
if self.is_valid_op(PointGroupAnalyzer.inversion_op):
self.sch_symbol = "Ci"
self.symmops.append(PointGroupAnalyzer.inversion_op)
else:
for v in self.principal_axes:
mirror_type = self._find_mirror(v)
if not mirror_type == "":
self.sch_symbol = "Cs"
break
def _proc_cyclic(self):
"""
Handles cyclic group molecules.
"""
main_axis, rot = max(self.rot_sym, key=lambda v: v[1])
self.sch_symbol = "C{}".format(rot)
mirror_type = self._find_mirror(main_axis)
if mirror_type == "h":
self.sch_symbol += "h"
elif mirror_type == "v":
self.sch_symbol += "v"
elif mirror_type == "":
if self.is_valid_op(SymmOp.rotoreflection(main_axis,
angle=180 / rot)):
self.sch_symbol = "S{}".format(2 * rot)
def _proc_dihedral(self):
"""
Handles dihedral group molecules, i.e those with intersecting R2 axes
and a main axis.
"""
main_axis, rot = max(self.rot_sym, key=lambda v: v[1])
self.sch_symbol = "D{}".format(rot)
mirror_type = self._find_mirror(main_axis)
if mirror_type == "h":
self.sch_symbol += "h"
elif not mirror_type == "":
self.sch_symbol += "d"
def _check_R2_axes_asym(self):
"""
Test for 2-fold rotation along the principal axes. Used to handle
asymetric top molecules.
"""
for v in self.principal_axes:
op = SymmOp.from_axis_angle_and_translation(v, 180)
if self.is_valid_op(op):
self.symmops.append(op)
self.rot_sym.append((v, 2))
def _find_mirror(self, axis):
"""
Looks for mirror symmetry of specified type about axis. Possible
types are "h" or "vd". Horizontal (h) mirrors are perpendicular to
the axis while vertical (v) or diagonal (d) mirrors are parallel. v
mirrors has atoms lying on the mirror plane while d mirrors do
not.
"""
mirror_type = ""
# First test whether the axis itself is the normal to a mirror plane.
if self.is_valid_op(SymmOp.reflection(axis)):
self.symmops.append(SymmOp.reflection(axis))
mirror_type = "h"
else:
# Iterate through all pairs of atoms to find mirror
for s1, s2 in itertools.combinations(self.centered_mol, 2):
if s1.species == s2.species:
normal = s1.coords - s2.coords
if np.dot(normal, axis) < self.tol:
op = SymmOp.reflection(normal)
if self.is_valid_op(op):
self.symmops.append(op)
if len(self.rot_sym) > 1:
mirror_type = "d"
for v, r in self.rot_sym:
if not np.linalg.norm(v - axis) < self.tol:
if np.dot(v, normal) < self.tol:
mirror_type = "v"
break
else:
mirror_type = "v"
break
return mirror_type
def _get_smallest_set_not_on_axis(self, axis):
"""
Returns the smallest list of atoms with the same species and
distance from origin AND does not lie on the specified axis. This
maximal set limits the possible rotational symmetry operations,
since atoms lying on a test axis is irrelevant in testing rotational
symmetryOperations.
"""
def not_on_axis(site):
v = np.cross(site.coords, axis)
return np.linalg.norm(v) > self.tol
valid_sets = []
origin_site, dist_el_sites = cluster_sites(self.centered_mol, self.tol)
for test_set in dist_el_sites.values():
valid_set = list(filter(not_on_axis, test_set))
if len(valid_set) > 0:
valid_sets.append(valid_set)
return min(valid_sets, key=lambda s: len(s))
def _check_rot_sym(self, axis):
"""
Determines the rotational symmetry about supplied axis. Used only for
symmetric top molecules which has possible rotational symmetry
operations > 2.
"""
min_set = self._get_smallest_set_not_on_axis(axis)
max_sym = len(min_set)
for i in range(max_sym, 0, -1):
if max_sym % i != 0:
continue
op = SymmOp.from_axis_angle_and_translation(axis, 360 / i)
rotvalid = self.is_valid_op(op)
if rotvalid:
self.symmops.append(op)
self.rot_sym.append((axis, i))
return i
return 1
def _check_perpendicular_r2_axis(self, axis):
"""
Checks for R2 axes perpendicular to unique axis. For handling
symmetric top molecules.
"""
min_set = self._get_smallest_set_not_on_axis(axis)
for s1, s2 in itertools.combinations(min_set, 2):
test_axis = np.cross(s1.coords - s2.coords, axis)
if np.linalg.norm(test_axis) > self.tol:
op = SymmOp.from_axis_angle_and_translation(test_axis, 180)
r2present = self.is_valid_op(op)
if r2present:
self.symmops.append(op)
self.rot_sym.append((test_axis, 2))
return True
def _proc_sph_top(self):
"""
Handles Sperhical Top Molecules, which belongs to the T, O or I point
groups.
"""
self._find_spherical_axes()
if len(self.rot_sym) == 0:
logger.debug("Accidental speherical top!")
self._proc_sym_top()
main_axis, rot = max(self.rot_sym, key=lambda v: v[1])
if rot < 3:
logger.debug("Accidental speherical top!")
self._proc_sym_top()
elif rot == 3:
mirror_type = self._find_mirror(main_axis)
if mirror_type != "":
if self.is_valid_op(PointGroupAnalyzer.inversion_op):
self.symmops.append(PointGroupAnalyzer.inversion_op)
self.sch_symbol = "Th"
else:
self.sch_symbol = "Td"
else:
self.sch_symbol = "T"
elif rot == 4:
if self.is_valid_op(PointGroupAnalyzer.inversion_op):
self.symmops.append(PointGroupAnalyzer.inversion_op)
self.sch_symbol = "Oh"
else:
self.sch_symbol = "O"
elif rot == 5:
if self.is_valid_op(PointGroupAnalyzer.inversion_op):
self.symmops.append(PointGroupAnalyzer.inversion_op)
self.sch_symbol = "Ih"
else:
self.sch_symbol = "I"
def _find_spherical_axes(self):
"""
Looks for R5, R4, R3 and R2 axes in spherical top molecules. Point
group T molecules have only one unique 3-fold and one unique 2-fold
axis. O molecules have one unique 4, 3 and 2-fold axes. I molecules
have a unique 5-fold axis.
"""
rot_present = defaultdict(bool)
origin_site, dist_el_sites = cluster_sites(self.centered_mol, self.tol)
test_set = min(dist_el_sites.values(), key=lambda s: len(s))
coords = [s.coords for s in test_set]
for c1, c2, c3 in itertools.combinations(coords, 3):
for cc1, cc2 in itertools.combinations([c1, c2, c3], 2):
if not rot_present[2]:
test_axis = cc1 + cc2
if np.linalg.norm(test_axis) > self.tol:
op = SymmOp.from_axis_angle_and_translation(test_axis,
180)
rot_present[2] = self.is_valid_op(op)
if rot_present[2]:
self.symmops.append(op)
self.rot_sym.append((test_axis, 2))
test_axis = np.cross(c2 - c1, c3 - c1)
if np.linalg.norm(test_axis) > self.tol:
for r in (3, 4, 5):
if not rot_present[r]:
op = SymmOp.from_axis_angle_and_translation(
test_axis, 360 / r)
rot_present[r] = self.is_valid_op(op)
if rot_present[r]:
self.symmops.append(op)
self.rot_sym.append((test_axis, r))
break
if rot_present[2] and rot_present[3] and (
rot_present[4] or rot_present[5]):
break
def get_pointgroup(self):
"""
Returns a PointGroup object for the molecule.
"""
return PointGroupOperations(self.sch_symbol, self.symmops,
self.mat_tol)
def get_symmetry_operations(self):
"""
Return symmetry operations as a list of SymmOp objects.
Returns Cartesian coord symmops.
Returns:
([SymmOp]): List of symmetry operations.
"""
return generate_full_symmops(self.symmops, self.tol)
def is_valid_op(self, symmop):
"""
Check if a particular symmetry operation is a valid symmetry operation
for a molecule, i.e., the operation maps all atoms to another
equivalent atom.
Args:
symmop (SymmOp): Symmetry operation to test.
Returns:
(bool): Whether SymmOp is valid for Molecule.
"""
coords = self.centered_mol.cart_coords
for site in self.centered_mol:
coord = symmop.operate(site.coords)
ind = find_in_coord_list(coords, coord, self.tol)
if not (len(ind) == 1
and self.centered_mol[ind[0]].species
== site.species):
return False
return True
def _get_eq_sets(self):
"""
Calculates the dictionary for mapping equivalent atoms onto each other.
Args:
None
Returns:
dict: The returned dictionary has two possible keys:
``eq_sets``:
A dictionary of indices mapping to sets of indices,
each key maps to indices of all equivalent atoms.
The keys are guaranteed to be not equivalent.
``sym_ops``:
Twofold nested dictionary.
``operations[i][j]`` gives the symmetry operation
that maps atom ``i`` unto ``j``.
"""
UNIT = np.eye(3)
eq_sets, operations = defaultdict(set), defaultdict(dict)
symm_ops = [op.rotation_matrix
for op in generate_full_symmops(self.symmops, self.tol)]
def get_clustered_indices():
indices = cluster_sites(self.centered_mol, self.tol,
give_only_index=True)
out = list(indices[1].values())
if indices[0] is not None:
out.append([indices[0]])
return out
for index in get_clustered_indices():
sites = self.centered_mol.cart_coords[index]
for i, reference in zip(index, sites):
for op in symm_ops:
rotated = np.dot(op, sites.T).T
matched_indices = find_in_coord_list(rotated, reference,
self.tol)
matched_indices = {
dict(enumerate(index))[i] for i in matched_indices}
eq_sets[i] |= matched_indices
if i not in operations:
operations[i] = {j: op.T if j != i else UNIT
for j in matched_indices}
else:
for j in matched_indices:
if j not in operations[i]:
operations[i][j] = op.T if j != i else UNIT
for j in matched_indices:
if j not in operations:
operations[j] = {i: op if j != i else UNIT}
elif i not in operations[j]:
operations[j][i] = op if j != i else UNIT
return {'eq_sets': eq_sets,
'sym_ops': operations}
@staticmethod
def _combine_eq_sets(eq_sets, operations):
"""Combines the dicts of _get_equivalent_atom_dicts into one
Args:
eq_sets (dict)
operations (dict)
Returns:
dict: The returned dictionary has two possible keys:
``eq_sets``:
A dictionary of indices mapping to sets of indices,
each key maps to indices of all equivalent atoms.
The keys are guaranteed to be not equivalent.
``sym_ops``:
Twofold nested dictionary.
``operations[i][j]`` gives the symmetry operation
that maps atom ``i`` unto ``j``.
"""
UNIT = np.eye(3)
def all_equivalent_atoms_of_i(i, eq_sets, ops):
"""WORKS INPLACE on operations
"""
visited = set([i])
tmp_eq_sets = {j: (eq_sets[j] - visited) for j in eq_sets[i]}
while tmp_eq_sets:
new_tmp_eq_sets = {}
for j in tmp_eq_sets:
if j in visited:
continue
visited.add(j)
for k in tmp_eq_sets[j]:
new_tmp_eq_sets[k] = eq_sets[k] - visited
if i not in ops[k]:
ops[k][i] = (np.dot(ops[j][i], ops[k][j])
if k != i else UNIT)
ops[i][k] = ops[k][i].T
tmp_eq_sets = new_tmp_eq_sets
return visited, ops
eq_sets = copy.deepcopy(eq_sets)
ops = copy.deepcopy(operations)
to_be_deleted = set()
for i in eq_sets:
if i in to_be_deleted:
continue
visited, ops = all_equivalent_atoms_of_i(i, eq_sets, ops)
to_be_deleted |= visited - {i}
for k in to_be_deleted:
eq_sets.pop(k, None)
return {'eq_sets': eq_sets,
'sym_ops': ops}
def get_equivalent_atoms(self):
"""Returns sets of equivalent atoms with symmetry operations
Args:
None
Returns:
dict: The returned dictionary has two possible keys:
``eq_sets``:
A dictionary of indices mapping to sets of indices,
each key maps to indices of all equivalent atoms.
The keys are guaranteed to be not equivalent.
``sym_ops``:
Twofold nested dictionary.
``operations[i][j]`` gives the symmetry operation
that maps atom ``i`` unto ``j``.
"""
eq = self._get_eq_sets()
return self._combine_eq_sets(eq['eq_sets'],
eq['sym_ops'])
def symmetrize_molecule(self):
"""Returns a symmetrized molecule
The equivalent atoms obtained via
:meth:`~pymatgen.symmetry.analyzer.PointGroupAnalyzer.get_equivalent_atoms`
are rotated, mirrored... unto one position.
Then the average position is calculated.
The average position is rotated, mirrored... back with the inverse
of the previous symmetry operations, which gives the
symmetrized molecule
Args:
None
Returns:
dict: The returned dictionary has three possible keys:
``sym_mol``:
A symmetrized molecule instance.
``eq_sets``:
A dictionary of indices mapping to sets of indices,
each key maps to indices of all equivalent atoms.
The keys are guaranteed to be not equivalent.
``sym_ops``:
Twofold nested dictionary.
``operations[i][j]`` gives the symmetry operation
that maps atom ``i`` unto ``j``.
"""
eq = self.get_equivalent_atoms()
eq_sets, ops = eq['eq_sets'], eq['sym_ops']
coords = self.centered_mol.cart_coords.copy()
for i, eq_indices in eq_sets.items():
for j in eq_indices:
coords[j] = np.dot(ops[j][i], coords[j])
coords[i] = np.mean(coords[list(eq_indices)], axis=0)
for j in eq_indices:
if j == i:
continue
coords[j] = np.dot(ops[i][j], coords[i])
coords[j] = np.dot(ops[i][j], coords[i])
molecule = Molecule(species=self.centered_mol.species_and_occu,
coords=coords)
return {'sym_mol': molecule,
'eq_sets': eq_sets,
'sym_ops': ops}
def iterative_symmetrize(mol, max_n=10, tolerance=0.3, epsilon=1e-2):
"""Returns a symmetrized molecule
The equivalent atoms obtained via
:meth:`~pymatgen.symmetry.analyzer.PointGroupAnalyzer.get_equivalent_atoms`
are rotated, mirrored... unto one position.
Then the average position is calculated.
The average position is rotated, mirrored... back with the inverse
of the previous symmetry operations, which gives the
symmetrized molecule
Args:
mol (Molecule): A pymatgen Molecule instance.
max_n (int): Maximum number of iterations.
tolerance (float): Tolerance for detecting symmetry.
Gets passed as Argument into
:class:`~pymatgen.analyzer.symmetry.PointGroupAnalyzer`.
epsilon (float): If the elementwise absolute difference of two
subsequently symmetrized structures is smaller epsilon,
the iteration stops before ``max_n`` is reached.
Returns:
dict: The returned dictionary has three possible keys:
``sym_mol``:
A symmetrized molecule instance.
``eq_sets``:
A dictionary of indices mapping to sets of indices,
each key maps to indices of all equivalent atoms.
The keys are guaranteed to be not equivalent.
``sym_ops``:
Twofold nested dictionary.
``operations[i][j]`` gives the symmetry operation
that maps atom ``i`` unto ``j``.
"""
new = mol
n = 0
finished = False
while not finished and n <= max_n:
previous = new
PA = PointGroupAnalyzer(previous, tolerance=tolerance)
eq = PA.symmetrize_molecule()
new = eq['sym_mol']
finished = np.allclose(new.cart_coords, previous.cart_coords,
atol=epsilon)
n += 1
return eq
def cluster_sites(mol, tol, give_only_index=False):
"""
Cluster sites based on distance and species type.
Args:
mol (Molecule): Molecule **with origin at center of mass**.
tol (float): Tolerance to use.
Returns:
(origin_site, clustered_sites): origin_site is a site at the center
of mass (None if there are no origin atoms). clustered_sites is a
dict of {(avg_dist, species_and_occu): [list of sites]}
"""
# Cluster works for dim > 2 data. We just add a dummy 0 for second
# coordinate.
dists = [[np.linalg.norm(site.coords), 0] for site in mol]
import scipy.cluster as spcluster
f = spcluster.hierarchy.fclusterdata(dists, tol, criterion='distance')
clustered_dists = defaultdict(list)
for i, site in enumerate(mol):
clustered_dists[f[i]].append(dists[i])
avg_dist = {label: np.mean(val) for label, val in clustered_dists.items()}
clustered_sites = defaultdict(list)
origin_site = None
for i, site in enumerate(mol):
if avg_dist[f[i]] < tol:
if give_only_index:
origin_site = i
else:
origin_site = site
else:
if give_only_index:
clustered_sites[
(avg_dist[f[i]], site.species)].append(i)
else:
clustered_sites[
(avg_dist[f[i]], site.species)].append(site)
return origin_site, clustered_sites
def generate_full_symmops(symmops, tol):
"""
Recursive algorithm to permute through all possible combinations of the
initially supplied symmetry operations to arrive at a complete set of
operations mapping a single atom to all other equivalent atoms in the
point group. This assumes that the initial number already uniquely
identifies all operations.
Args:
symmops ([SymmOp]): Initial set of symmetry operations.
Returns:
Full set of symmetry operations.
"""
# Uses an algorithm described in:
# Gregory Butler. Fundamental Algorithms for Permutation Groups.
# Lecture Notes in Computer Science (Book 559). Springer, 1991. page 15
UNIT = np.eye(4)
generators = [op.affine_matrix for op in symmops
if not np.allclose(op.affine_matrix, UNIT)]
if not generators:
# C1 symmetry breaks assumptions in the algorithm afterwards
return symmops
else:
full = list(generators)
for g in full:
for s in generators:
op = np.dot(g, s)
d = np.abs(full - op) < tol
if not np.any(np.all(np.all(d, axis=2), axis=1)):
full.append(op)
d = np.abs(full - UNIT) < tol
if not np.any(np.all(np.all(d, axis=2), axis=1)):
full.append(UNIT)
return [SymmOp(op) for op in full]
class SpacegroupOperations(list):
"""
Represents a space group, which is a collection of symmetry operations.
"""
def __init__(self, int_symbol, int_number, symmops):
"""
Args:
int_symbol (str): International symbol of the spacegroup.
int_number (int): International number of the spacegroup.
symmops ([SymmOp]): Symmetry operations associated with the
spacegroup.
"""
self.int_symbol = int_symbol
self.int_number = int_number
super().__init__(symmops)
def are_symmetrically_equivalent(self, sites1, sites2, symm_prec=1e-3):
"""
Given two sets of PeriodicSites, test if they are actually
symmetrically equivalent under this space group. Useful, for example,
if you want to test if selecting atoms 1 and 2 out of a set of 4 atoms
are symmetrically the same as selecting atoms 3 and 4, etc.
One use is in PartialRemoveSpecie transformation to return only
symmetrically distinct arrangements of atoms.
Args:
sites1 ([Site]): 1st set of sites
sites2 ([Site]): 2nd set of sites
symm_prec (float): Tolerance in atomic distance to test if atoms
are symmetrically similar.
Returns:
(bool): Whether the two sets of sites are symmetrically
equivalent.
"""
def in_sites(site):
for test_site in sites1:
if test_site.is_periodic_image(site, symm_prec, False):
return True
return False
for op in self:
newsites2 = [PeriodicSite(site.species,
op.operate(site.frac_coords),
site.lattice) for site in sites2]
for site in newsites2:
if not in_sites(site):
break
else:
return True
return False
def __str__(self):
return "{} ({}) spacegroup".format(self.int_symbol, self.int_number)
class PointGroupOperations(list):
"""
Defines a point group, which is essentially a sequence of symmetry
operations.
.. attribute:: sch_symbol
Schoenflies symbol of the point group.
"""
def __init__(self, sch_symbol, operations, tol=0.1):
"""
Args:
sch_symbol (str): Schoenflies symbol of the point group.
operations ([SymmOp]): Initial set of symmetry operations. It is
sufficient to provide only just enough operations to generate
the full set of symmetries.
tol (float): Tolerance to generate the full set of symmetry
operations.
"""
self.sch_symbol = sch_symbol
super().__init__(
generate_full_symmops(operations, tol))
def __str__(self):
return self.sch_symbol
def __repr__(self):
return self.__str__()
| mit |
KatPro/nlptools-ru | nlptools2/tagger.py | 4 | 51260 | #!/usr/bin/python
# -*- encoding: utf-8 -*-
"""
Снятие морфологической омонимии русского текста.
Статистический тэггер-лемматизатор для русского языка на основе библиотеки pymorphy2.
Использует статистику совместного употребления окончаний слов в тексте,
а также статистику зависимости падежей от управляющих предлогов.
Для Python 3
"""
import sys
import os
import time
import itertools
import re
import math
from random import choice
from datetime import datetime
from collections import defaultdict, OrderedDict
import struct
import dawg
import pymorphy2
from pickling import *
from tokenizer import Tokenizer
from segmentizer import Segmentizer
from dater import Dater, _U
import mc
from commontools import *
from morphotools import *
class Tagger(object):
"""
Статистический тэггер-лемматизатор для русского языка на основе pymorphy
"""
def __init__(self, morph=None, dater=None):
"""
Инициализация тэггера. Создание регулярных выражений для лемматизации.
Подгрузка словаря аббревиатур.
Создание словарей месяцев, падежей и пр.
morph - морфологический словарь pymorphy2
"""
if not morph:
raise ValueError("No morphoanalyzer found!")
# Рег. выражения для лемматизации
self.digit = re.compile("^\d+$")
self.eng = re.compile("^\d*[a-zA-Z]+(?:-[a-zA-Z])?$", re.UNICODE)
self.short = re.compile("^[A-ZА-ЯЁ][a-zа-яё]?$")
# Рег. выражения для разбиения текста на предложения
self.splitter = re.compile("[.?!]+")
self.starter = re.compile("[А-ЯЁA-Z\d\"\'\(\)\[\]~`«s-]")
self.bad_ender = re.compile("^[А-ЯЁа-яёA-Za-z][а-яёa-z]?$")
self.gram_spl = re.compile("[,\s]+")
# Рег. выражение для управления
self.prepcase = re.compile("\d+:" + mc.PREP + "(?:\d+:(?:" +
mc.ADJF + "|" + mc.PRTF + "|" + mc.PRCL + "|" +
mc.CONJ + "|" + mc.ADVB + "))*\d+:(?:" + mc.ADJF +
"|" + mc.NOUN + "|" + mc.NPRO + ")(?:\d+:" +
mc.CONJ + "\d+:(?:" + mc.NOUN + "|" + mc.NPRO + "))?")
self.positem = re.compile("\d+:[А-Я-]+")
# Морфология
self.morph = morph
# Обработка дат
self.dater = dater
# Аббревиатуры
self.abbrs = unpkl_2layered_s(os.path.join(os.path.dirname(sys.argv[0]), "dicts/abbr.pkl"))
# Суффиксные частоты
self.freqs = dawg.BytesDAWG()
self.weights = defaultdict(float)
self.small = 0.0
def gram_bad(self, word):
"""
Возвращает грамматические признаки для слова, которого нет в словаре pymorphy.
Если self.dater != None, для всех дат в качестве части речи указывается "DD" "MM", "YY" или "YYYY",
а в качестве граммем - формат YYYY-MM-DD, YY-MM-DD, YYYY-MM или MM-DD с суффиксом -B, -L, -I или -U
"""
if self.dater:
if self.dater.is_date(word):
date = self.dater.check_date(word)
if date:
return {mc._lemma: date[0], mc._pos: date[1], mc._gram: {date[1] + _U}}
return {mc._lemma: word}
if re.match(self.digit, word):
return {mc._lemma: word, mc._pos: mc.NUMB}
if re.match(self.eng, word):
if word.endswith("s'") or word.endswith("'s"):
return {mc._lemma: word, mc._pos: mc.ADJF}
if word.endswith("'a") or word.endswith("'а"):
word = word[:-2]
return {mc._lemma: word, mc._pos: mc.NOUN}
if word in self.abbrs.keys():
return self.abbrs[word]
return {mc._lemma: word}
def check_lemma(self, norm, word):
"""
Проверка того, что возвращается непустая лемма (если лемма пуста, вместо нее возвращается само слово)
"""
lexeme = norm.normal_form.replace("ё", "е").replace("Ё", "Е")
if not lexeme and "-" in word: # Для сложного слова пытаемся лемматизировать каждую из частей
try:
lexeme = "-".join([self.morph.parse(part)[0].normal_form if self.morph.parse(part)[0].normal_form else part
for part in lexeme.split("-")])
except Exception:
print("Unable to lemmatize: ", word, "\nPossible lemma:", lexeme)
sys.exit()
elif not lexeme:
lexeme = word.replace("ё", "е").replace("Ё", "Е")
grams = re.split(self.gram_spl, str(norm.tag))
pos = grams[0]
if pos == mc.LATN:
pos = mc.NOUN
if pos == mc.ROMN:
pos = mc.NUMB
return {mc._lemma: lexeme, mc._pos: pos, mc._gram: set(grams[1:])}
def gram_first(self, word):
"""
Возвращает для слова его ПЕРВУЮ лемму, часть речи и грамматические признаки в виде словаря
Если self.dater != None, для всех дат в качестве части речи указывается "DD" "MM", "YY" или "YYYY",
а в качестве граммем - формат YYYY-MM-DD, YY-MM-DD, YYYY-MM или MM-DD с суффиксом -B, -L, -I или -U
"""
data = self.morph.parse(word)
if data:
return [self.check_lemma(data[0], word)]
return [self.gram_bad(word)]
def gram_all(self, word):
"""
Возвращает для слова ВСЕ его леммы, части речи и грамматические признаки в виде кортежа словарей
Если self.dater != None, для всех дат в качестве части речи указывается "DD" "MM", "YY" или "YYYY",
а в качестве граммем - формат YYYY-MM-DD, YY-MM-DD, YYYY-MM или MM-DD с суффиксом -B, -L, -I или -U
"""
data = self.morph.parse(word)
if data:
return [self.check_lemma(info, word) for info in data]
return [self.gram_bad(word)]
def lemm_list(self, word, true_lemma):
"""
Возвращает список лемм слова, вначале - правильную лемму
"""
norms = self.morph.parse(word)
if norms:
lemms = set([info.normal_form.replace("ё", "е").replace("Ё", "Ё") for info in norms])
if len(lemms) == 1:
if true_lemma in lemms:
return [true_lemma]
if true_lemma in lemms:
lemms.remove(true_lemma)
return [true_lemma] + sorted(list(lemms))
return [word]
def get_sentence_cases(self, sentence):
"""
Сбор статистики на основе падежей: обработка одного предложения (sentence)
"""
if not sentence:
return []
result = []
for (ind, info) in enumerate(sentence):
if len(info) < 3:
continue
if not info[2].split("|")[0] in mc._declinable: # Работаем только со словами, которые могут иметь падеж
continue
norms = self.gram_all(info[0]) # Все возможные варианты лемм текущего слова
try:
true_cases = set(re.split(self.gram_spl, info[2].split("|")[1])).intersection(mc._cases)
if len(true_cases) > 1:
continue
true_case = true_cases.pop()
all_vars = [norm for norm in norms if mc._gram in norm.keys()]
all_cases = set([x for y in [norm[mc._gram].intersection(mc._cases)
for norm in all_vars] for x in y])
if not true_case in all_cases or len(all_cases) == 1:
continue
prep = self.find_prep(sentence, ind)
# Результат в виде <Ближайший слева предлог, Список возможных падежей, Правильный падеж>
result.append("\t".join((prep, "|".join((sorted(all_cases))), true_case)))
except Exception:
continue
return result
def prepare_cases(self, trainfile):
"""
Обработка тренировочного корпуса: убираем все, кроме предлогов и падежей,
записываем в новый файл.
"""
with open(trainfile, "r", encoding="UTF8") as fin, open(trainfile + ".cases", "w", encoding="UTF8") as fout:
sentence = []
for line in fin:
if line == mc.BTAG: # Если это метка начала предложения
fout.write(line)
sentence = []
continue
if line == mc.ETAG: # Если это метка конца предложения
case_result = self.get_sentence_cases(sentence)
if case_result:
fout.write("{0}\n{1}".format("\n".join(case_result), line))
else:
fout.write(line)
del sentence[:]
continue
sentence.append(line.strip().split("\t"))
return True
def prepare_corpus(self, trainfile, suff_len):
"""
Обработка тренировочного корпуса: убираем все, кроме суффиксов,
записываем в новый файл.
"""
with open(trainfile, "r", encoding="UTF8") as fin, open(trainfile + "." + str(suff_len).zfill(2) + ".suffs", "w", encoding="UTF8") as fout:
for line in fin:
if line in {mc.BTAG, mc.ETAG}: # Если это метка начала или конца предложения
fout.write(line)
continue
items = line.strip().split("\t")
if len(items) <= 2:
continue # Это знак препинания
word = items[0].lower()
lemms = [x.lower() for x in self.lemm_list(*items[:2])] # Список возможных лемм, первая - правильная
suff = suffix(word, suff_len) # Трехбуквенный суффикс слова
stem = longest_common([word] + lemms) # Наибольший общий префикс (стем?)
lem_flexes = [suffix(lemma, len(lemma) - len(stem)) for lemma in lemms] # Берем только суффиксы от всех лемм
fout.write("{0}\t{1}\n".format(suff, "\t".join(lem_flexes)))
return True
@staticmethod
def count_sentence_suffs(sentence, freqs, cfreqs, radius):
"""
Сбор статистики на основе суффиксов: обработка одного предложения
"""
if not sentence:
return True
pairs = dict(enumerate(sentence))
hom_nums = [num for (num, info) in pairs.items() if len(info) > 2] # Номера омонимов в предложении
for hom_num in hom_nums:
for num in smart_range(pairs.keys(), hom_num, radius):
freqs[(num - hom_num, pairs[num][0], tuple(sorted(pairs[hom_num][1:])))][pairs[hom_num][1]] += 1
cfreqs[(num - hom_num, tuple(sorted(pairs[hom_num][1:])))][pairs[hom_num][1]] += 1
return True
def find_prep(self, sentence, ind):
"""
Нахождение ближайшего предлога слева от данного слова
ind - номер данного слова в предложении sentence
"""
sent = dict(enumerate(sentence))
for cur in list(range(ind))[::-1]:
if len(sent[cur]) < 3 and not re.match(self.splitter, sent[cur][0]):
continue
if not sent[cur][2] in mc._btwn_prep_noun:
break
if sent[cur][2] == mc.PREP:
return sent[cur][1]
return mc.NOPREP
def count_sentence_cases(self, sentence, freqs):
"""
Сбор статистики на основе падежей: обработка одного предложения (sentence)
freqs - словарь для наполнения статистикой
"""
if not sentence:
return True
for (ind, info) in enumerate(sentence):
if len(info) < 3:
continue
if not info[2].split("|")[0] in mc._declinable: # Работаем только со словами, которые могут иметь падеж
continue
norms = self.gram_all(info[0]) # Все возможные варианты лемм текущего слова
try:
true_cases = set(re.split(self.gram_spl, info[2].split("|")[1])).intersection(mc._cases)
if len(true_cases) > 1:
continue
true_case = true_cases.pop()
all_vars = [norm for norm in norms if mc._gram in norm.keys()]
all_cases = set([x for y in [norm[mc._gram].intersection(mc._cases)
for norm in all_vars]
for x in y])
if not true_case in all_cases or len(all_cases) == 1:
continue
prep = self.find_prep(sentence, ind)
freqs[(prep, tuple(sorted(all_cases)))][true_case] += 1
except Exception:
continue
return True
def count_sentence_cases_re(self, sentence, freqs):
"""
Сбор статистики на основе падежей: обработка одного предложения (sentence)
с помощью регулярных выражений.
Альтернативнвй способ: см. count_sentence_cases(self, sentence, freqs)
freqs - словарь для наполнения статистикой
"""
words = [(ind, info) for (ind, info) in enumerate(sentence) if len(info) > 2]
words_pat = "".join(["{0:d}:{1}".format(ind, info[2].split("|")[0]) for (ind, info) in words])
matches = re.findall(self.prepcase, words_pat)
if matches == []:
return True
found = set()
for match_obj in matches:
pos_items = re.findall(self.positem, match_obj)
inds = [int(x.split(":")[0]) for x in pos_items]
found = found.union(set(inds))
prep = sentence[inds[0]][1]
for pos_item in pos_items[1:]:
ind = int(pos_item.split(":")[0])
if sentence[ind][2].split("|")[0] in mc._declinable:
self.add_case_counts(sentence[ind], freqs, prep)
for (ind, info) in ((ind, info) for (ind, info) in words if info[2].split("|")[0] in mc._declinable and not ind in found):
self.add_case_counts(info, freqs, mc.NOPREP)
return True
def add_case_counts(self, info, freqs, prep):
norms = self.gram_all(info[0]) # Все возможные варианты лемм текущего слова
try:
true_cases = set(info[2].split("|")[1].split(",")).intersection(mc._cases)
if len(true_cases) > 1:
return True
true_case = true_cases.pop()
all_vars = [norm for norm in norms if mc._gram in norm.keys()]
all_cases = set([x for y in [norm[mc._gram].intersection(mc._cases)
for norm in all_vars] for x in y])
if not true_case in all_cases or len(all_cases) == 1:
return True
freqs[(prep, tuple(sorted(all_cases)))][true_case] += 1
except Exception:
return True
return True
def train(self, trainfile, radius=2, suff_len=3):
"""
Сбор статистики на основе суффиксов: обработка всего корпуса
trainfile - размеченный корпус,
radius - это радиус контекста, который учитывается при выбора правильной леммы,
suff_len - длина суффиксов, на которых основано обучение
"""
# Если тренировочный корпус еще не подготовлен, делаем это прямо сейчас
if trainfile.endswith(".lemma"):
Tagger.prepare_corpus(trainfile, suff_len)
trainfile += "." + str(suff_len).zfill(2) + ".suffs"
freqs = defaultdict(lambda: defaultdict(int))
cfreqs = defaultdict(lambda: defaultdict(int))
ranks = defaultdict(float)
caseranks = defaultdict(float)
# Структура словаря: {<Номер в контексте>, <Контекст>, <Список омонимов> : <Выбранный омоним> : <Вероятность>}
normfreqs = defaultdict(lambda: defaultdict(float))
# Структура словаря: {<Номер в контексте>, <Контекст>: <Ранг>}
normweights = defaultdict(float)
# Собираем частоты из корпуса
with open(trainfile, "r", encoding="UTF8") as fin:
sentence = []
for line in fin:
if line == mc.BTAG:
continue
if line == mc.ETAG:
Tagger.count_sentence_suffs(sentence, freqs, cfreqs, radius)
del sentence[:]
sentence = []
continue
sentence.append(line.strip().split("\t"))
# Нормализуем частоты
for k, v in freqs.items():
total = sum([freq for freq in v.values()])
for hom, freq in v.items():
normfreqs[k][hom] = float(freq) / total
# Вычисляем ранги контекстов
for k, v in cfreqs.items():
total = sum(v.values())
entropy = - float(sum([freq * math.log(freq) for freq in v.values()]) - total * math.log(total)) / total
ranks[k] = 1.0 / math.exp(entropy)
# Вычисляем веса контекстов
for k, v in ranks.items():
normweights[k[0]] += v
v_sum = sum([v for v in normweights.values()])
for k, v in normweights.items():
normweights[k] = v / v_sum
# Сериализуем частоты и веса (ранги) контекстов (чем больше энтропия распределения по омонимам, тем меньше ранг (вес))
dfreqs = dawg.BytesDAWG([("{0:d}\t{1}\t{2}\t{3}".format(k[0], k[1], " ".join(k[2]), hom), struct.pack("f", freq))
for k, v in normfreqs.items() for hom, freq in v.items()])
dfreqs.save(trainfile + ".freqs.dawg")
dump_data(trainfile + ".weights.pkl", normweights)
# Сериализуем small-значение (для тех случаев, которых нет в словаре)
small = 1.0 / (2 * sum([freq for k, v in normfreqs.items() for v1, freq in v.items()]))
dump_data(trainfile + ".small", small)
return True
def train_cases(self, trainfile, threshold=1, small_diff=0.2):
"""
Обучение снятию падежной омонимии (автоматическое извлечение правил)
trainfile - размеченный корпус,
threshold - минимальная абсолютная частота вхождения правила в корпус,
small_diff - максимальная допустимая разность между двумя вариантами правила с наибольшей вероятностью.
"""
# Если тренировочный корпус еще не подготовлен, делаем это прямо сейчас
if trainfile.endswith(".lemma"):
self.prepare_cases(trainfile)
trainfile += ".cases"
freqs = defaultdict(lambda: defaultdict(int))
self.caserules = defaultdict(str)
# Собираем частоты из корпуса
with open(trainfile, "r", encoding="UTF8") as fin:
sentence = []
for line in fin:
if line == mc.BTAG:
continue
if line == mc.ETAG:
for parts in sentence:
freqs[(parts[0], tuple(sorted(parts[1].split("|"))))][parts[-1]] += 1
del sentence[:]
sentence = []
continue
sentence.append(line.strip().split("\t"))
# Извлекаем правила
for k, v in freqs.items():
good_values = {case: freq for case, freq in v.items() if freq >= threshold}
total = sum(good_values.values())
for case, freq in good_values.items():
freqs[k][case] = float(freq) / total
chosen = argmax([(case, freq) for case, freq in good_values.items()])
if chosen is None:
continue
if len(chosen) != 1:
continue
if len(v.keys()) == 1:
self.caserules[k] = sorted(chosen)[0]
continue
second = argmax([(case, freq) for case, freq in good_values.items() if case != chosen[0]])
if second:
if freqs[k][chosen[0]] - freqs[k][second[0]] < small_diff:
continue
self.caserules[k] = sorted(chosen)[0]
# Тестовый вывод в файл
#with open("prep_stat_new.txt", "w", encoding="UTF8") as fout:
# for k, v in sorted(freqs.items()):
# total = sum([freq for freq in v.values()])
# entropy = - sum([float(freq) * math.log(float(freq) / total) / total for freq in v.values()])
# entropy = - sum([freq * math.log(freq) for freq in v.values()])
# for case, freq in sorted(v.items()):
# fout.write("{0}\t{1}\t{2}\t{3:.3f}\t{4:.3f}\n".format(k[0], "|".join(k[1]), case, freq, entropy))
# Сериализуем правила
# Структура: <Предлог>, <Список падежей> : <Правильный падеж>
dump_data(trainfile + ".caserules.pkl", self.caserules)
return True
def train_cases_full(self, trainfile, threshold=1, small_diff=0.01):
"""
Обучение снятию падежной омонимии (автоматическое извлечение правил)
с помощью регулярных выражений.
Полностью, начиная с предварительной обработки корпуса.
trainfile - размеченный корпус,
threshold - минимальная абсолютная частота вхождения правила в корпус,
small_diff - максимальная допустимая разность между двумя вариантами правила с наибольшей вероятностью.
"""
freqs = defaultdict(lambda: defaultdict(int))
self.caserules = defaultdict(str)
# Собираем частоты из корпуса
with open(trainfile, "r", encoding="UTF8") as fin:
sentence = []
for line in fin:
if line == mc.BTAG:
continue
if line == mc.ETAG:
self.count_sentence_cases_re(sentence, freqs)
del sentence[:]
sentence = []
continue
sentence.append(line.strip().split("\t"))
# Извлекаем правила
for k, v in freqs.items():
good_values = {case: freq for case, freq in v.items() if freq >= threshold}
total = sum(good_values.values())
for case, freq in good_values.items():
freqs[k][case] = float(freq) / total
chosen = argmax([(case, freq) for case, freq in good_values.items()])
if chosen is None:
continue
if len(chosen) != 1:
continue
if len(v.keys()) == 1:
self.caserules[k] = sorted(chosen)[0]
continue
second = argmax([(case, freq) for case, freq in good_values.items() if case != chosen[0]])
if second:
if freqs[k][chosen[0]] - freqs[k][second[0]] < small_diff:
continue
self.caserules[k] = sorted(chosen)[0]
# Тестовый вывод в файл
#with open("prep_stat_new.txt", "w", encoding="UTF8") as fout:
# for k, v in sorted(freqs.items()):
# total = sum([freq for freq in v.values()])
# entropy = - sum([float(freq) * math.log(float(freq) / total) / total for freq in v.values()])
# entropy = - sum([freq * math.log(freq) for freq in v.values()])
# for case, freq in sorted(v.items()):
# fout.write("{0}\t{1}\t{2}\t{3:.3f}\t{4:.3f}\n".format(k[0], "|".join(k[1]), case, freq, entropy))
# Сериализуем правила
# Структура: <Предлог>, <Список падежей> : <Правильный падеж>
dump_data(trainfile + ".caserules.pkl", self.caserules)
return True
def dump_preps(self, filename):
"""
Запись статистики по предлогам и падежам в текстовый файл
"""
with open(filename, "w", encoding="UTF8") as fout:
for k, v in sorted(self.caserules.items()):
fout.write("{0}\t{1}\t{2}\n".format(k[0], "|".join(k[1]), v))
return True
def load_statistics(self, trainfile, suff_len=3, process_cases=True):
"""
Загрузка суффиксной и падежной статистики
"""
try:
if process_cases:
self.caserules = unpkl_1layered_s(trainfile + ".cases.caserules.pkl")
self.weights = unpkl_1layered_f(trainfile + "." + str(suff_len).zfill(2) + ".suffs.weights.pkl")
self.freqs = dawg.BytesDAWG()
self.freqs.load(trainfile + "." + str(suff_len).zfill(2) + ".suffs.freqs.dawg")
with open(trainfile + "." + str(suff_len).zfill(2) + ".suffs.small", "rb") as fin:
self.small = pickle.load(fin)
except Exception as e:
print("Tagger statistics not found!", e)
sys.exit()
def lemmatize(self, tokens, make_all=True):
"""
Получение словаря нумерованных лемматизированных токенов по простому списку токенов
make_all - подгружать все варианты нормальных форм (а не только первую),
Если self.dater != None, для всех дат в качестве части речи указывается "DD" "MM", "YY" или "YYYY",
а в качестве граммем - формат YYYY-MM-DD, YY-MM-DD, YYYY-MM или MM-DD с суффиксом -B, -L, -I или -U
"""
action = self.gram_all if make_all else self.gram_first
return dict(enumerate([tuple([token] + action(token)) for token in tokens]))
def make_sents(self, lemmtokens):
"""
Разбиение текста на предложения
lemmtokens - словарь лемматизированных токенов текста вида {номер: (лемматизированный токен)}
"""
bound = False
sentences = []
cur_sent = []
for ind, info in lemmtokens.items():
if re.match(self.splitter, info[0]): # Возможная граница предложения
cur_sent.append((ind, info))
if len(cur_sent) == 1:
bound = False
continue
if not re.match(self.bad_ender, cur_sent[-2][1][0]): # Последний токен предложения не может быть одной буквой
bound = True
continue
if bound and info[0].strip() == "": # Пробельные символы между предложениями
cur_sent.append((ind, info))
continue
if bound and not re.match(self.starter, info[0]):
bound = False
cur_sent.append((ind, info))
continue
if bound and re.match(self.starter, info[0]):# and cur_sent[-1][1][0].strip() == "": # Возможное начало предложения
sentences.append(cur_sent)
cur_sent = []
cur_sent.append((ind, info))
bound = False
continue
cur_sent.append((ind, info))
if cur_sent:
sentences.append(cur_sent)
return tuple(sentences)
def parse_simple(self, sent_tokens, sent_words):
"""
Снятие частеречной омонимии для однобуквенных и двухбуквенных слов предложения
"""
short_ambigs = [ind for ind in sent_words.keys() if re.match(self.short, sent_words[ind][0])]
for ind in short_ambigs:
try:
if re.match(self.splitter, sent_tokens[ind + 1][0]) and sent_words[ind][1][mc._pos] != mc.NOUN:
sent_words[ind][1][mc._pos] = mc.NOUN
sent_words[ind][1][mc._gram] = mc._abbr
except Exception:
continue
return sent_words
def parse_cases(self, sent_tokens, sent_words):
"""
Снятие падежной омонимии слов предложения
"""
caseambigs = [ind for ind in sent_words.keys()
if len(sent_words[ind]) > 2
and all(info[mc._pos] in mc._declinable for info in sent_words[ind][1:])]
for ind in caseambigs:
all_vars = [info for info in sent_words[ind][1:] if mc._gram in info.keys()]
all_cases = set([x for y in [info[mc._gram].intersection(mc._cases)
for info in all_vars] for x in y])
for cur in list(range(min(sent_tokens.keys()), ind))[::-1]:
if re.match(self.splitter, sent_tokens[cur][0]):
break
if not mc._pos in sent_tokens[cur][1].keys():
continue
if not sent_tokens[cur][1][mc._pos] in mc._btwn_prep_noun:
break
try:
if sent_tokens[cur][1][mc._pos] == mc.PREP:
prep = sent_tokens[cur][1][mc._lemma]
else:
prep = mc.NOPREP
if all_cases != {mc._nomn, mc._accs, mc._acc2} or prep != mc.NOPREP:
case = self.caserules[(prep, tuple(sorted(all_cases)))]
if case:
sent_words[ind] = xrestore_lemm(sent_words, case, ind)
else:
sent_words[ind] = nom_case_disamb(sent_words, ind)
except Exception:
continue
return True
def parse_sent(self, sentence, radius, suff_len, small_diff, process_cases):
"""
Снятие морфологической омонимии предложения
sentence - предложение (список нумерованных токенов),
radius - радиус контекста, который учитывается при выбора правильной леммы,
suff_len - длина суффиксов, на которых основано обучение,
small_diff - максимальная допустимая разность между двумя вариантами правила с наибольшей вероятностью,
Если self.dater != None, для всех дат в качестве части речи указывается "DD" "MM", "YY" или "YYYY",
а в качестве граммем - формат YYYY-MM-DD, YY-MM-DD, YYYY-MM или MM-DD с суффиксом -B, -L, -I или -U
process_cases=True -> снимаем падежную омонимию
"""
if len(sentence) == 1:
return sentence
# Словарь токенов данного предложения
sent_tokens = dict(sentence)
# Словарь слов данного предложения
sent_words = {ind: info for (ind, info) in sentence if len(info[1]) > 1}
# Список суффиксов словоформ
suffs = [suffix((info)[0].lower(), suff_len) for (ind, info) in sorted(sent_words.items(), key=lambda x: x[0])]
# Словарь формата {(номер_абс, номер_отн): отсортированный список суффиксов}
suffixes = OrderedDict([((ind, rel_num), get_suffixes(lemmtoken))
for (rel_num, (ind, lemmtoken)) in zip(range(len(sent_words.keys())), sorted(sent_words.items(), key=lambda x: x[0]))])
# Номера неоднозначностей (абс. и отн.)
ambigs = [(ind, rel_num) for ((ind, rel_num), suff_list) in sorted(suffixes.items(), key=lambda x: x[0][0]) if len(suff_list) > 1]
# Снятие частеречной омонимии для однобуквенных и двухбуквенных слов
sent_words = self.parse_simple(sent_tokens, sent_words)
# Снятие омонимии во всех остальных случаях
# Набор контекстов для данного предложения
contexts = {(num, rel_num):
[(-i, suff) for (i, suff) in zip(range(1, radius + 1), smart_slice(suffs, rel_num - radius, rel_num)[::-1])] +
[(i, suff) for (i, suff) in zip(range(1, radius + 1), smart_slice(suffs, rel_num + 1, rel_num + radius + 1))]
for (num, rel_num) in ambigs}
# Снятие омонимии на уровне лемм
for (ind, rel_num) in ambigs:
suff_list = suffixes[(ind, rel_num)]
pairs = contexts[(ind, rel_num)]
probs = [(var, sum([get_floatDAWG(self.freqs, "{0:d}\t{1}\t{2}\t{3}".format(rel_ind, sf, " ".join(suff_list), var), self.small) * self.weights[rel_ind]
for (rel_ind, sf) in pairs])) for var in suff_list]
arg_max = argmaxx(probs) # Список наиболее вероятных суффиксов
if arg_max:
if len(arg_max) == len(suff_list): # Если все варианты одинаковые, берем тот, который предлагает pymorphy
continue
second_prob = max([prob for (var, prob) in probs if prob < arg_max[0][1]])
if arg_max[0][1] - second_prob < small_diff: # Ограничение на разницу между двумя макс. вероятностями
continue
suffitem = sorted(arg_max)[0][0].replace(mc.NOSUFF, "") # Лучший суффикс
# Восстановление леммы по найденному суффиксу
sent_words[ind] = restore_lemm(sent_words, suffitem, ind)
if self.dater: # Обработка дат, если необходимо
self.dater.parse_dates(sent_words, sent_tokens)
if process_cases: # Снятие падежной омонимии, если необходимо
self.parse_cases(sent_tokens, sent_words)
new_sentence = [] # Предложение со снятой омонимией
for ind, info in sentence:
if ind in sent_words.keys():
new_sentence.append((ind, sent_words[ind]))
else:
new_sentence.append((ind, info))
return tuple(new_sentence)
def write_stream(self, lemmtokens, fout, radius, suff_len, sent_marks, process_cases, small_diff):
"""
Снятие морфологической омонимии текста и запись в поток вывода по предложениям
lemmtokens - нумерованные лемматизированные токены,
fout - поток вывода,
radius - радиус контекста, который учитывается при выбора правильной леммы,
suff_len - длина суффиксов, на которых основано обучение,
sent_marks=True -> разделяем предложения
Если self.dater != None, для всех дат в качестве части речи указывается "DD" "MM", "YY" или "YYYY",
а в качестве граммем - формат YYYY-MM-DD, YY-MM-DD, YYYY-MM или MM-DD с суффиксом -B, -L, -I или -U,
process_cases=True -> снимаем падежную омонимию,
small_diff - максимальная допустимая разность между двумя вариантами правила с наибольшей вероятностью,
"""
for sentence in self.make_sents(lemmtokens):
self.write_sentence(self.parse_sent(sentence, radius, suff_len, small_diff, process_cases), fout, sent_marks)
return True
def write_sentence(self, sentence, fout, sent_marks):
"""
Запись лемматизированного предложения в файл.
Лемма приводится к регистру словоформы.
Леммы женских фамилий - в женском роде (для согласования с разметкой НКРЯ).
fout - поток вывода,
sent_marks=True -> разделяем предложения
"""
if sent_marks:
fout.write(mc.BTAG)
for (ind, info) in sentence:
word = info[0].strip()
if word == "":
continue
lemma = info[1][mc._lemma]
grams = None
if mc._gram in info[1].keys():
grams = info[1][mc._gram]
if mc._surn in grams and mc._femn in grams:
lemma = adjust_female(self.morph, word, lemma)
elif mc._patr in grams:
lemma = adjust_patr(self.morph, word, lemma, mc._femn in grams)
fout.write("{0}\t{1}".format(info[0], get_same_caps(word, lemma)))
if mc._pos in info[1].keys():
fout.write("\t" + info[1][mc._pos])
if grams:
fout.write("\t" + ",".join(grams))
fout.write("\n")
if sent_marks:
fout.write(mc.ETAG)
return True
def parse_all(self, lemmtokens, outfile, radius=2, suff_len=3, sent_marks=False, process_cases=True, small_diff=0.01):
"""
Обработка всего текста сразу (с записью результата в файл)
lemmtokens - нумерованные лемматизированные токены,
outfile - файл, в который будет записан обработанный текст,
radius - радиус контекста, который учитывается при выбора правильной леммы,
suff_len - длина суффиксов, на которых основано обучение,
sent_marks=True -> разделяем предложения
Если self.dater != None, для всех дат в качестве части речи указывается "DD" "MM", "YY" или "YYYY",
а в качестве граммем - формат YYYY-MM-DD, YY-MM-DD, YYYY-MM или MM-DD с суффиксом -B, -L, -I или -U,
process_cases=True -> снимаем падежную омонимию,
small_diff - максимальная допустимая разность между двумя вариантами правила с наибольшей вероятностью,
"""
with open(outfile, "w", encoding="UTF8") as fout:
self.write_stream(lemmtokens, fout, radius, suff_len, sent_marks, process_cases, small_diff)
return True
def parse_chunks(self, filename, radius=2, suff_len=3, chunks=2000, sent_marks=False, process_cases=True, small_diff=0.01):
"""
Обработка текста частями и запись результата в файл
filename - исходный текстовый файл,
radius - радиус контекста, который учитывается при выбора правильной леммы,
suff_len - длина суффиксов, на которых основано обучение,
chunks - примерная длина одного чанка (в строках),
sent_marks=True -> разделяем предложения
Если self.dater != None, для всех дат в качестве части речи указывается "DD" "MM", "YY" или "YYYY",
а в качестве граммем - формат YYYY-MM-DD, YY-MM-DD, YYYY-MM или MM-DD с суффиксом -B, -L, -I или -U,
process_cases=True -> снимаем падежную омонимию,
small_diff - максимальная допустимая разность между двумя вариантами правила с наибольшей вероятностью,
"""
buff = []
counter = 0
tokens = {}
tok_r = Tokenizer()
# Читаем тестовый файл
with open(filename, "r", encoding = "UTF8") as fin, open(filename + ".lemma", "w", encoding = "UTF8") as fout:
for line in fin:
if len(buff) >= chunks and re.search(self.splitter, buff[-1]):
part_1 = re.split(self.splitter, buff[-1])[0] + re.findall(self.splitter, buff[-1])[0]
part_rest = buff[-1][len(part_1) + 1:]
self.parse_chunk(buff[:-1] + [part_1], fout, tok_r, radius, suff_len, sent_marks, process_cases, small_diff)
del buff[:]
buff = [part_rest]
counter += 1
print("chunk", counter, "done!")
buff.append(line)
if buff != []:
self.parse_chunk(buff, fout, tok_r, radius, suff_len, sent_marks, process_cases, small_diff)
def parse_chunk(self, buff, fout, tok_r, radius, suff_len, sent_marks, process_cases, small_diff):
"""
Снятие морфологической омонимии текстового фрагмента и запись результата в открытый поток вывода
buff - текущий текстовый фрагмент для обработки,
fout - поток вывода,
tok_r - используемый токенизатор,
radius - радиус контекста, который учитывается при выбора правильной леммы,
suff_len - длина суффиксов, на которых основано обучение,
sent_marks=True -> разделяем предложения
Если self.dater != None, для всех дат в качестве части речи указывается "DD" "MM", "YY" или "YYYY",
а в качестве граммем - формат YYYY-MM-DD, YY-MM-DD, YYYY-MM или MM-DD с суффиксом -B, -L, -I или -U,
process_cases=True -> снимаем падежную омонимию,
small_diff - максимальная допустимая разность между двумя вариантами правила с наибольшей вероятностью,
"""
lemmtokens = self.lemmatize(tok_r.tokenize("".join(buff))) # Словарь токенов
self.write_stream(lemmtokens, fout, radius, suff_len, sent_marks, process_cases, small_diff)
return True
def get_parsed_sents(self, tokens, radius=2, suff_len=3, process_cases=True, small_diff=0.01):
"""
Получение списка предложений со снятой морфологической омонимией
tokens - список токенов исходного текста,
radius - радиус контекста, который учитывается при выбора правильной леммы,
suff_len - длина суффиксов, на которых основано обучение,
Если self.dater != None, для всех дат в качестве части речи указывается "DD" "MM", "YY" или "YYYY",
а в качестве граммем - формат YYYY-MM-DD, YY-MM-DD, YYYY-MM или MM-DD с суффиксом -B, -L, -I или -U,
process_cases=True -> снимаем падежную омонимию,
small_diff - максимальная допустимая разность между двумя вариантами правила с наибольшей вероятностью,
"""
return [self.parse_sent(sentence, radius, suff_len, small_diff, process_cases) for sentence in self.make_sents(self.lemmatize(tokens))]
if __name__ == "__main__":
filename = os.path.join(os.path.dirname(sys.argv[0]), "test/delo.txt")
trainfile = os.path.join(os.path.dirname(sys.argv[0]),"dicts/ruscorpora.txt.lemma")
prepsfile = os.path.join(os.path.dirname(sys.argv[0]),"preps_stat.txt")
print("STARTED:", str(datetime.now()))
start = time.time()
morph = pymorphy2.MorphAnalyzer() # Подгружаем русский словарь
tok = Tokenizer() # Подгружаем токенизатор
dater = Dater() # Подгружаем обработчик дат
tagger = Tagger(morph, dater) # Подгружаем тэггер
t = time.time()
#tagger.prepare_cases(trainfile)
#print("Cases prepared! It took", time.time() - t)
#t = time.time()
#tagger.train_cases(trainfile + ".cases") # Обучаем тэггер падежам
#print("Cases trained! It took", time.time() - t)
#tagger.prepare_corpus(trainfile, 3)
#tagger.prepare_corpus(trainfile, 4)
#tagger.prepare_corpus(trainfile, 5)
#print("Corpus prepared!")
#tagger.train(trainfile + ".03.suffs", 3) # Обучаем тэггер суффиксам
#print("Suffix model trained!")
tagger.load_statistics(trainfile, 3) # Загружаем суффиксную статистику
#tagger.dump_preps(prepsfile) # Выписываем правила падежей в зависимости от предлогов в текстовый файл
print("Statistics loaded! It took", time.time() - start, "\nParsing file...")
tokens = []
with open(filename, "r", encoding="UTF8") as fin: # Читаем тестовый файл
tokens = tok.tokenize(fin.read()) # Список токенов
# Записываем результат в файл
tagger.parse_all(tagger.lemmatize(tokens), filename + ".lemma", sent_marks=True)
print("FINISHED:", str(datetime.now()))
print("Time elapsed: ", time.time() - start)
| mit |
jiangjinjinyxt/vnpy | vnpy/api/ksotp/pyscript/generate_td_functions.py | 25 | 10306 | # encoding: UTF-8
__author__ = 'CHENXY'
from string import join
from ksotp_struct import structDict
def processCallBack(line):
orignalLine = line
line = line.replace('\tvirtual void ', '') # 删除行首的无效内容
line = line.replace('{};\n', '') # 删除行尾的无效内容
content = line.split('(')
cbName = content[0] # 回调函数名称
cbArgs = content[1] # 回调函数参数
if cbArgs[-1] == ' ':
cbArgs = cbArgs.replace(') ', '')
else:
cbArgs = cbArgs.replace(')', '')
cbArgsList = cbArgs.split(', ') # 将每个参数转化为列表
cbArgsTypeList = []
cbArgsValueList = []
for arg in cbArgsList: # 开始处理参数
content = arg.split(' ')
if len(content) > 1:
cbArgsTypeList.append(content[0]) # 参数类型列表
cbArgsValueList.append(content[1]) # 参数数据列表
createTask(cbName, cbArgsTypeList, cbArgsValueList, orignalLine)
createProcess(cbName, cbArgsTypeList, cbArgsValueList)
# 生成.h文件中的process部分
process_line = 'void process' + cbName[2:] + '(Task task);\n'
fheaderprocess.write(process_line)
fheaderprocess.write('\n')
# 生成.h文件中的on部分
if 'OnRspError' in cbName:
on_line = 'virtual void on' + cbName[2:] + '(dict error, int id, bool last) {};\n'
elif 'OnRsp' in cbName:
on_line = 'virtual void on' + cbName[2:] + '(dict data, dict error, int id, bool last) {};\n'
elif 'OnRtn' in cbName:
on_line = 'virtual void on' + cbName[2:] + '(dict data) {};\n'
elif 'OnErrRtn' in cbName:
on_line = 'virtual void on' + cbName[2:] + '(dict data, dict error) {};\n'
else:
on_line = ''
fheaderon.write(on_line)
fheaderon.write('\n')
# 生成封装部分
createWrap(cbName)
#----------------------------------------------------------------------
def createWrap(cbName):
"""在Python封装段代码中进行处理"""
# 生成.h文件中的on部分
if 'OnRspError' in cbName:
on_line = 'virtual void on' + cbName[2:] + '(dict error, int id, bool last)\n'
override_line = '("on' + cbName[2:] + '")(error, id, last);\n'
elif 'OnRsp' in cbName:
on_line = 'virtual void on' + cbName[2:] + '(dict data, dict error, int id, bool last)\n'
override_line = '("on' + cbName[2:] + '")(data, error, id, last);\n'
elif 'OnRtn' in cbName:
on_line = 'virtual void on' + cbName[2:] + '(dict data)\n'
override_line = '("on' + cbName[2:] + '")(data);\n'
elif 'OnErrRtn' in cbName:
on_line = 'virtual void on' + cbName[2:] + '(dict data, dict error)\n'
override_line = '("on' + cbName[2:] + '")(data, error);\n'
else:
on_line = ''
if on_line is not '':
fwrap.write(on_line)
fwrap.write('{\n')
fwrap.write('\ttry\n')
fwrap.write('\t{\n')
fwrap.write('\t\tthis->get_override'+override_line)
fwrap.write('\t}\n')
fwrap.write('\tcatch (error_already_set const &)\n')
fwrap.write('\t{\n')
fwrap.write('\t\tPyErr_Print();\n')
fwrap.write('\t}\n')
fwrap.write('};\n')
fwrap.write('\n')
def createTask(cbName, cbArgsTypeList, cbArgsValueList, orignalLine):
# 从回调函数生成任务对象,并放入队列
funcline = orignalLine.replace('\tvirtual void ', 'void ' + apiName + '::')
funcline = funcline.replace('{};', '')
ftask.write(funcline)
ftask.write('{\n')
ftask.write("\tTask task = Task();\n")
ftask.write("\ttask.task_name = " + cbName.upper() + ";\n")
# define常量
global define_count
fdefine.write("#define " + cbName.upper() + ' ' + str(define_count) + '\n')
define_count = define_count + 1
# switch段代码
fswitch.write("case " + cbName.upper() + ':\n')
fswitch.write("{\n")
fswitch.write("\tthis->" + cbName.replace('On', 'process') + '(task);\n')
fswitch.write("\tbreak;\n")
fswitch.write("}\n")
fswitch.write("\n")
for i, type_ in enumerate(cbArgsTypeList):
if type_ == 'int':
ftask.write("\ttask.task_id = " + cbArgsValueList[i] + ";\n")
elif type_ == 'bool':
ftask.write("\ttask.task_last = " + cbArgsValueList[i] + ";\n")
elif 'RspInfoField' in type_:
ftask.write("\n")
ftask.write("\tif (pRspInfo)\n")
ftask.write("\t{\n")
ftask.write("\t\ttask.task_error = " + cbArgsValueList[i] + ";\n")
ftask.write("\t}\n")
ftask.write("\telse\n")
ftask.write("\t{\n")
ftask.write("\t\tCSecurityFtdcRspInfoField empty_error = CSecurityFtdcRspInfoField();\n")
ftask.write("\t\tmemset(&empty_error, 0, sizeof(empty_error));\n")
ftask.write("\t\ttask.task_error = empty_error;\n")
ftask.write("\t}\n")
else:
ftask.write("\n")
ftask.write("\tif (" + cbArgsValueList[i][1:] + ")\n")
ftask.write("\t{\n")
ftask.write("\t\ttask.task_data = " + cbArgsValueList[i] + ";\n")
ftask.write("\t}\n")
ftask.write("\telse\n")
ftask.write("\t{\n")
ftask.write("\t\t" + type_ + " empty_data = " + type_ + "();\n")
ftask.write("\t\tmemset(&empty_data, 0, sizeof(empty_data));\n")
ftask.write("\t\ttask.task_data = empty_data;\n")
ftask.write("\t}\n")
ftask.write("\tthis->task_queue.push(task);\n")
ftask.write("};\n")
ftask.write("\n")
def createProcess(cbName, cbArgsTypeList, cbArgsValueList):
# 从队列中提取任务,并转化为python字典
fprocess.write("void " + apiName + '::' + cbName.replace('On', 'process') + '(Task task)' + "\n")
fprocess.write("{\n")
fprocess.write("\tPyLock lock;\n")
onArgsList = []
for i, type_ in enumerate(cbArgsTypeList):
if 'RspInfoField' in type_:
fprocess.write("\t"+ type_ + ' task_error = any_cast<' + type_ + '>(task.task_error);\n')
fprocess.write("\t"+ "dict error;\n")
struct = structDict[type_]
for key in struct.keys():
fprocess.write("\t"+ 'error["' + key + '"] = task_error.' + key + ';\n')
fprocess.write("\n")
onArgsList.append('error')
elif type_ in structDict:
fprocess.write("\t"+ type_ + ' task_data = any_cast<' + type_ + '>(task.task_data);\n')
fprocess.write("\t"+ "dict data;\n")
struct = structDict[type_]
for key in struct.keys():
fprocess.write("\t"+ 'data["' + key + '"] = task_data.' + key + ';\n')
fprocess.write("\n")
onArgsList.append('data')
elif type_ == 'bool':
onArgsList.append('task.task_last')
elif type_ == 'int':
onArgsList.append('task.task_id')
onArgs = join(onArgsList, ', ')
fprocess.write('\tthis->' + cbName.replace('On', 'on') + '(' + onArgs +');\n')
fprocess.write("};\n")
fprocess.write("\n")
def processFunction(line):
line = line.replace('\tvirtual int ', '') # 删除行首的无效内容
line = line.replace(') = 0;\n', '') # 删除行尾的无效内容
content = line.split('(')
fcName = content[0] # 回调函数名称
fcArgs = content[1] # 回调函数参数
fcArgs = fcArgs.replace(')', '')
fcArgsList = fcArgs.split(', ') # 将每个参数转化为列表
fcArgsTypeList = []
fcArgsValueList = []
for arg in fcArgsList: # 开始处理参数
content = arg.split(' ')
if len(content) > 1:
fcArgsTypeList.append(content[0]) # 参数类型列表
fcArgsValueList.append(content[1]) # 参数数据列表
if len(fcArgsTypeList)>0 and fcArgsTypeList[0] in structDict:
createFunction(fcName, fcArgsTypeList, fcArgsValueList)
# 生成.h文件中的主动函数部分
if 'Req' in fcName:
req_line = 'int req' + fcName[3:] + '(dict req, int nRequestID);\n'
fheaderfunction.write(req_line)
fheaderfunction.write('\n')
def createFunction(fcName, fcArgsTypeList, fcArgsValueList):
type_ = fcArgsTypeList[0]
struct = structDict[type_]
ffunction.write('int TdApi::req' + fcName[3:] + '(dict req, int nRequestID)\n')
ffunction.write('{\n')
ffunction.write('\t' + type_ +' myreq = ' + type_ + '();\n')
ffunction.write('\tmemset(&myreq, 0, sizeof(myreq));\n')
for key, value in struct.items():
if value == 'string':
line = '\tgetChar(req, "' + key + '", myreq.' + key + ');\n'
elif value == 'int':
line = '\tgetInt(req, "' + key + '", &myreq.' + key + ');\n'
elif value == 'double':
line = '\tgetDouble(req, "' + key + '", &myreq.' + key + ');\n'
ffunction.write(line)
ffunction.write('\tint i = this->api->' + fcName + '(&myreq, nRequestID);\n')
ffunction.write('\treturn i;\n')
ffunction.write('};\n')
ffunction.write('\n')
#########################################################
apiName = 'TdApi'
fcpp = open('KSOTPTraderApi.h', 'r')
ftask = open('ksotp_td_task.cpp', 'w')
fprocess = open('ksotp_td_process.cpp', 'w')
ffunction = open('ksotp_td_function.cpp', 'w')
fdefine = open('ksotp_td_define.cpp', 'w')
fswitch = open('ksotp_td_switch.cpp', 'w')
fheaderprocess = open('ksotp_td_header_process.h', 'w')
fheaderon = open('ksotp_td_header_on.h', 'w')
fheaderfunction = open('ksotp_td_header_function.h', 'w')
fwrap = open('ksotp_td_wrap.cpp', 'w')
define_count = 1
for line in fcpp:
if "\tvirtual void On" in line:
processCallBack(line)
elif "\tvirtual int" in line:
processFunction(line)
fcpp.close()
ftask.close()
fprocess.close()
ffunction.close()
fswitch.close()
fdefine.close()
fheaderprocess.close()
fheaderon.close()
fheaderfunction.close()
fwrap.close() | mit |
naturali/tensorflow | tensorflow/python/kernel_tests/random_crop_test.py | 29 | 2638 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for random_crop."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
class RandomCropTest(tf.test.TestCase):
def testNoOp(self):
# No random cropping is performed since the size is value.shape.
for shape in (2, 1, 1), (2, 1, 3), (4, 5, 3):
value = np.arange(0, np.prod(shape), dtype=np.int32).reshape(shape)
with self.test_session():
crop = tf.random_crop(value, shape).eval()
self.assertAllEqual(crop, value)
def testContains(self):
with self.test_session():
shape = (3, 5, 7)
target = (2, 3, 4)
value = np.random.randint(1000000, size=shape)
value_set = set(tuple(value[i:i + 2, j:j + 3, k:k + 4].ravel())
for i in range(2) for j in range(3) for k in range(4))
crop = tf.random_crop(value, size=target)
for _ in range(20):
y = crop.eval()
self.assertAllEqual(y.shape, target)
self.assertTrue(tuple(y.ravel()) in value_set)
def testRandomization(self):
# Run 1x1 crop num_samples times in an image and ensure that one finds each
# pixel 1/size of the time.
num_samples = 1000
shape = [5, 4, 1]
size = np.prod(shape)
single = [1, 1, 1]
value = np.arange(size).reshape(shape)
with self.test_session():
crop = tf.random_crop(value, single, seed=7)
counts = np.zeros(size, dtype=np.int32)
for _ in range(num_samples):
y = crop.eval()
self.assertAllEqual(y.shape, single)
counts[y] += 1
# Calculate the mean and 4 * standard deviation.
mean = np.repeat(num_samples / size, size)
four_stddev = 4.0 * np.sqrt(mean)
# Ensure that each entry is observed in 1/size of the samples
# within 4 standard deviations.
self.assertAllClose(counts, mean, atol=four_stddev)
if __name__ == '__main__':
tf.test.main()
| apache-2.0 |
evro/CouchPotatoServer | libs/pyutil/test/out_of_shape/test_odict.py | 106 | 16600 | #!/usr/bin/env python
# Copyright (c) 2002-2010 Zooko Wilcox-O'Hearn
# This file is part of pyutil; see README.rst for licensing terms.
import random, unittest
from pyutil.humanreadable import hr
from pyutil import memutil
from pyutil import odict
class Bencher:
def __init__(self, klass, MAXREPS=2**8, MAXTIME=5):
print klass
self.klass = klass
self.MAXREPS = MAXREPS
self.MAXTIME = MAXTIME
self.d = {}
self.lrun = None
def _generic_benchmarking_init(self, n):
self.d.clear()
self.lrun = self.klass()
for i in range(n):
self.d[i] = i
self.lrun[n+i] = n+i
def _benchmark_init(self, n):
d2 = self.klass(initialdata=self.d)
assert len(d2) == len(self.d)
return True
def _benchmark_update(self, n):
d2 = self.klass()
assert len(d2) == 0
d2.update(self.d)
assert len(d2) == len(self.d)
return True
def _benchmark_insert(self, n):
d2 = self.klass()
assert len(d2) == 0
for k, v, in self.d.iteritems():
d2[k] = v
assert len(d2) == len(self.d)
return True
def _benchmark_init_and_popitem(self, n):
d2 = self.klass(initialdata=self.d)
assert len(d2) == len(self.d)
for i in range(len(d2), 0, -1):
assert len(d2) == i
d2.popitem()
return True
def _benchmark_init_and_has_key_and_del(self, n):
d2 = self.klass(initialdata=self.d)
assert len(d2) == len(self.d)
for k in self.d.iterkeys():
if d2.has_key(k):
del d2[k]
return True
def _benchmark_init_and_remove(self, n):
d2 = self.klass(initialdata=self.d)
assert len(d2) == len(self.d)
for k in self.d.iterkeys():
d2.remove(k, strictkey=False)
return True
def bench(self, BSIZES=(128, 250, 2048, 5000, 2**13, 2**20,)):
from pyutil import benchutil
funcs = ("_benchmark_insert", "_benchmark_init_and_has_key_and_del", "_benchmark_init_and_remove", "_benchmark_init_and_popitem", "_benchmark_update", "_benchmark_init",)
max = 0
for func in funcs:
if len(func) > max:
max = len(func)
for func in funcs:
print func + " " * (max + 1 - len(func))
for BSIZE in BSIZES:
f = getattr(self, func)
benchutil.rep_bench(f, BSIZE, self._generic_benchmarking_init, MAXREPS=self.MAXREPS, MAXTIME=self.MAXTIME)
def quick_bench():
Bencher(odict.LRUCache, MAXTIME=2).bench(BSIZES=(2**7, 2**12, 2**14, 2**15, 2**16,))
Bencher(odict.LinkedListLRUCache, MAXTIME=2).bench(BSIZES=(2**7, 2**12, 2**14, 2**15,))
Bencher(odict.SmallLRUCache, MAXTIME=2).bench(BSIZES=(2**7, 2**12, 2**14, 2**15,))
def slow_bench():
Bencher(odict.LRUCache, MAXTIME=5).bench(BSIZES=[2**x for x in range(7, 21)])
Bencher(odict.LinkedListLRUCache, MAXTIME=5).bench(BSIZES=[2**x for x in range(7, 21)])
Bencher(odict.SmallLRUCache, MAXTIME=5).bench(BSIZES=[2**x for x in range(7, 17)])
MUCHADDINGSIZE=2**4
# The following parameters are for testing for memory leakage.
MIN_SLOPE = 512.0 # If it leaks less than 512.0 bytes per iteration, then it's probably just some kind of noise from the interpreter or something...
SAMPLES = 2**5
# MIN_SLOPE is high because samples is low, which is because taking a statistically useful numbers of samples takes too long.
# For a *good* test, turn samples up as high as you can stand (maybe 2**10) and set MIN_SLOPE to about 1.0.
# For a *really* good test, add a variance measure to memutil.measure_mem_leakage(), and only consider it to be leaking if the slope is > 0.1 *and* is a "pretty good" fit for the data.
# MIN_SLOPE = 1.0
# SAMPLES = 2**10
class Testy(unittest.TestCase):
def _test_empty_lookup(self, d) :
self.failUnless(d.get('spam') is None)
def _test_key_error(self, C) :
d = C()
try:
d['spam']
self.fail(d)
except KeyError :
pass
def _test_insert_and_get_and_items(self, d) :
d.insert("spam", "eggs")
d["spam2"] = "eggs2"
self.failUnless(d.get("spam") == "eggs", str(d))
self.failUnless(d.get("spam2") == "eggs2")
self.failUnless(d["spam"] == "eggs")
self.failUnless(d["spam2"] == "eggs2")
self.failUnlessEqual(d.items(), [("spam", "eggs"), ("spam2", "eggs2")], d)
def _test_move_to_most_recent(self, d) :
d.insert("spam", "eggs")
d["spam2"] = "eggs2"
self.failUnless(d.get("spam") == "eggs", str(d))
self.failUnless(d.get("spam2") == "eggs2")
self.failUnless(d["spam"] == "eggs")
self.failUnless(d["spam2"] == "eggs2")
self.failUnlessEqual(d.items(), [("spam", "eggs"), ("spam2", "eggs2")])
d.move_to_most_recent("spam")
self.failUnlessEqual(d.items(), [("spam2", "eggs2"), ("spam", "eggs")])
def _test_insert_and_remove(self, d):
d.insert('spam', "eggs")
self.failUnless(d.has_key('spam'))
self.failUnless(d.get('spam') == "eggs")
self.failUnless(d['spam'] == "eggs")
self.failUnlessEqual(d.items(), [("spam", "eggs")])
x = d.remove('spam')
self.failUnless(x == "eggs", "x: %s" % `x`)
self.failUnless(not d.has_key('spam'))
self.failUnlessEqual(d.items(), [])
d['spam'] = "eggsy"
self.failUnless(d.has_key('spam'))
self.failUnless(d.get('spam') == "eggsy")
self.failUnless(d['spam'] == "eggsy")
self.failUnlessEqual(d.items(), [("spam", "eggsy")])
del d['spam']
self.failUnless(not d.has_key('spam'))
self.failUnlessEqual(d.items(), [])
def _test_setdefault(self, d):
d.setdefault('spam', "eggs")
self.failUnless(d.has_key('spam'))
self.failUnless(d.get('spam') == "eggs")
self.failUnless(d['spam'] == "eggs")
self.failUnlessEqual(d.items(), [("spam", "eggs")])
x = d.remove('spam')
self.failUnless(x == "eggs", "x: %s" % `x`)
self.failUnless(not d.has_key('spam'))
self.failUnlessEqual(d.items(), [])
def _test_extracted_bound_method(self, d):
insmeth = d.insert
insmeth('spammy', "eggsy")
self.failUnless(d.get('spammy') == "eggsy")
def _test_extracted_unbound_method(self, d):
insumeth = d.__class__.insert
insumeth(d, 'spammy', "eggsy")
self.failUnless(d.get('spammy') == "eggsy")
def _test_unbound_method(self, C, d):
umeth = C.insert
umeth(d, 'spammy', "eggsy")
self.failUnless(d.get('spammy') == "eggsy")
def _test_clear(self, d):
d[11] = 11
d._assert_invariants()
self.failUnless(len(d) == 1)
d.clear()
d._assert_invariants()
self.failUnless(len(d) == 0)
self.failUnlessEqual(d.items(), [])
def _test_update_from_dict(self, d):
self.failUnless(d._assert_invariants())
d['b'] = 99
self.failUnless(d._assert_invariants())
d2={ 'a': 0, 'b': 1, 'c': 2,}
d.update(d2)
self.failUnless(d._assert_invariants())
self.failUnless(d.get('a') == 0, "d.get('a'): %s" % d.get('a'))
self.failUnless(d._assert_invariants())
self.failUnless(d.get('b') == 1, "d.get('b'): %s" % d.get('b'))
self.failUnless(d._assert_invariants())
self.failUnless(d.get('c') == 2)
self.failUnless(d._assert_invariants())
def _test_update_from_odict(self, d):
self.failUnless(d._assert_invariants())
d['b'] = 99
self.failUnless(d._assert_invariants())
d2 = odict.OrderedDict()
d2['a'] = 0
d2['b'] = 1
d2['c'] = 2
d.update(d2)
self.failUnless(d._assert_invariants())
self.failUnless(d.get('a') == 0, "d.get('a'): %s" % d.get('a'))
self.failUnless(d._assert_invariants())
self.failUnless(d.get('b') == 1, "d.get('b'): %s" % d.get('b'))
self.failUnless(d._assert_invariants())
self.failUnless(d.get('c') == 2)
self.failUnless(d._assert_invariants())
self.failUnlessEqual(d.items(), [("b", 1), ("a", 0), ("c", 2)])
def _test_popitem(self, C):
c = C({"a": 1})
res = c.popitem()
self.failUnlessEqual(res, ("a", 1,))
c["a"] = 1
c["b"] = 2
res = c.popitem()
self.failUnlessEqual(res, ("b", 2,))
def _test_pop(self, C):
c = C({"a": 1})
res = c.pop()
self.failUnlessEqual(res, "a")
c["a"] = 1
c["b"] = 2
res = c.pop()
self.failUnlessEqual(res, "b")
def _test_iterate_items(self, C):
c = C({"a": 1})
c["b"] = 2
i = c.iteritems()
x = i.next()
self.failUnlessEqual(x, ("a", 1,))
x = i.next()
self.failUnlessEqual(x, ("b", 2,))
try:
i.next()
self.fail() # Should have gotten StopIteration exception
except StopIteration:
pass
def _test_iterate_keys(self, C):
c = C({"a": 1})
c["b"] = 2
i = c.iterkeys()
x = i.next()
self.failUnlessEqual(x, "a")
x = i.next()
self.failUnlessEqual(x, "b")
try:
i.next()
self.fail() # Should have gotten StopIteration exception
except StopIteration:
pass
def _test_iterate_values(self, C):
c = C({"a": 1})
c["b"] = 2
i = c.itervalues()
x = i.next()
self.failUnless(x == 1)
x = i.next()
self.failUnless(x == 2)
try:
i.next()
self.fail() # Should have gotten StopIteration exception
except StopIteration:
pass
def _test_much_adding_some_removing(self, C):
c = C()
for i in range(MUCHADDINGSIZE):
c[i] = i
if (i % 4) == 0:
k = random.choice(c.keys())
del c[k]
for i in range(MUCHADDINGSIZE):
c[i] = i
self.failUnlessEqual(len(c), MUCHADDINGSIZE)
def _test_1(self, C):
c = C()
c[11] = 11
c._assert_invariants()
c[11] = 11
c._assert_invariants()
c[11] = 1001
c._assert_invariants()
c[11] = 11
c._assert_invariants()
c[11] = 1001
c._assert_invariants()
c[11] = 1001
c._assert_invariants()
c[11] = 1001
c._assert_invariants()
def _test_2(self, C):
c = C()
c[11] = 11
c._assert_invariants()
del c[11]
c._assert_invariants()
c[11] = 11
c._assert_invariants()
c[11] = 11
c._assert_invariants()
def _test_3(self, C):
c = C()
c[11] = 11
c._assert_invariants()
c[11] = 12
c._assert_invariants()
c[11] = 13
c._assert_invariants()
del c[11]
c._assert_invariants()
c[11] = 14
c._assert_invariants()
c[11] = 15
c._assert_invariants()
c[11] = 16
c._assert_invariants()
def _test_has_key(self, C):
c = C()
c._assert_invariants()
for i in xrange(11):
c._assert_invariants()
c[i] = i
c._assert_invariants()
del c[0]
self.failUnless(len(c) == 10)
self.failUnless(10 in c.values())
self.failUnless(0 not in c.values())
c.has_key(1) # this touches `1' but does not make it fresher so that it will get popped next time we pop.
c[1] = 1 # this touches `1' but does not make it fresher so that it will get popped.
c._assert_invariants()
x = c.pop()
self.failUnlessEqual(x, 10)
c[99] = 99
c._assert_invariants()
self.failUnless(len(c) == 10)
self.failUnless(1 in c.values(), "C: %s, c.values(): %s" % (hr(C), hr(c.values(),),))
self.failUnless(2 in c.values(), "C: %s, c.values(): %s" % (hr(C), hr(c.values(),),))
self.failIf(10 in c.values(), "C: %s, c.values(): %s" % (hr(C), hr(c.values(),),))
self.failUnless(99 in c.values())
def _test_em(self):
for klass in (odict.OrderedDict,):
for testfunc in (self._test_empty_lookup, self._test_insert_and_get_and_items, self._test_insert_and_remove, self._test_extracted_bound_method, self._test_extracted_unbound_method, self._test_clear, self._test_update_from_dict, self._test_update_from_odict, self._test_setdefault,):
testfunc(klass())
for testfunc in (self._test_pop, self._test_popitem, self._test_iterate_items, self._test_iterate_keys, self._test_iterate_values, self._test_key_error, ):
testfunc(klass)
self._test_unbound_method(klass, klass())
for klass in (odict.OrderedDict,):
for testfunc in (self._test_1, self._test_2, self._test_3, self._test_has_key,):
testfunc(klass)
def test_em(self):
self._test_em()
def _mem_test_much_adding_some_removing(self):
for klass in (odict.LRUCache, odict.SmallLRUCache,):
return self._test_much_adding_some_removing(klass)
def test_mem_leakage(self):
try:
self._test_mem_leakage()
except memutil.NotSupportedException:
print "Skipping memory leak test since measurement of current mem usage isn't implemented on this platform."
pass
del test_mem_leakage # This test takes too long.
def _test_mem_leakage(self):
# measure one and throw it away, in order to reach a "steady state" in terms of initialization of memory state.
memutil.measure_mem_leakage(self.test_em, max(2**3, SAMPLES/2**3), iterspersample=2**0)
slope = memutil.measure_mem_leakage(self.test_em, max(2**3, SAMPLES/2**3), iterspersample=2**0)
self.failUnless(slope <= MIN_SLOPE, "%s leaks memory at a rate of approximately %s system bytes per invocation" % (self.test_em, "%0.3f" % slope,))
def test_mem_leakage_much_adding_some_removing(self):
try:
self._test_mem_leakage_much_adding_some_removing()
except memutil.NotSupportedException:
print "Skipping memory leak test since measurement of current mem usage isn't implemented on this platform."
pass
del test_mem_leakage_much_adding_some_removing # This test takes too long.
def _test_mem_leakage_much_adding_some_removing(self):
# measure one and throw it away, in order to reach a "steady state" in terms of initialization of memory state.
memutil.measure_mem_leakage(self._mem_test_much_adding_some_removing, SAMPLES, iterspersample=2**0)
slope = memutil.measure_mem_leakage(self._mem_test_much_adding_some_removing, SAMPLES, iterspersample=2**0)
self.failUnless(slope <= MIN_SLOPE, "%s leaks memory at a rate of approximately %s system bytes per invocation" % (self._mem_test_much_adding_some_removing, "%0.3f" % slope,))
def test_obj_leakage(self):
self._test_obj_leakage()
del test_obj_leakage # This test takes too long.
def _test_obj_leakage(self):
# measure one and throw it away, in order to reach a "steady state" in terms of initialization of objects state.
memutil.measure_obj_leakage(self.test_em, max(2**3, SAMPLES/2**3), iterspersample=2**0)
slope = memutil.measure_obj_leakage(self.test_em, max(2**3, SAMPLES/2**3), iterspersample=2**0)
self.failUnless(slope <= MIN_SLOPE, "%s leaks objects at a rate of approximately %s system bytes per invocation" % (self.test_em, "%0.3f" % slope,))
def test_obj_leakage_much_adding_some_removing(self):
self._test_obj_leakage_much_adding_some_removing()
del test_obj_leakage_much_adding_some_removing # This test takes too long.
def _test_obj_leakage_much_adding_some_removing(self):
# measure one and throw it away, in order to reach a "steady state" in terms of initialization of objects state.
memutil.measure_obj_leakage(self._mem_test_much_adding_some_removing, SAMPLES, iterspersample=2**0)
slope = memutil.measure_obj_leakage(self._mem_test_much_adding_some_removing, SAMPLES, iterspersample=2**0)
self.failUnless(slope <= MIN_SLOPE, "%s leaks objects at a rate of approximately %s system bytes per invocation" % (self._mem_test_much_adding_some_removing, "%0.3f" % slope,))
| gpl-3.0 |
si618/pi-time | node_modules/grunt-pylint/tasks/lib/astroid/tests/unittest_scoped_nodes.py | 1 | 46381 | # copyright 2003-2014 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
# contact http://www.logilab.fr/ -- mailto:[email protected]
#
# This file is part of astroid.
#
# astroid is free software: you can redistribute it and/or modify it
# under the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation, either version 2.1 of the License, or (at your
# option) any later version.
#
# astroid is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License
# for more details.
#
# You should have received a copy of the GNU Lesser General Public License along
# with astroid. If not, see <http://www.gnu.org/licenses/>.
"""tests for specific behaviour of astroid scoped nodes (i.e. module, class and
function)
"""
import os
import sys
from functools import partial
import unittest
import warnings
from astroid import YES, builder, nodes, scoped_nodes, \
InferenceError, NotFoundError, NoDefault, ResolveError
from astroid.bases import BUILTINS, Instance, BoundMethod, UnboundMethod
from astroid import __pkginfo__
from astroid import test_utils
from astroid.tests import resources
def _test_dict_interface(self, node, test_attr):
self.assertIs(node[test_attr], node[test_attr])
self.assertIn(test_attr, node)
node.keys()
node.values()
node.items()
iter(node)
class ModuleLoader(resources.SysPathSetup):
def setUp(self):
super(ModuleLoader, self).setUp()
self.module = resources.build_file('data/module.py', 'data.module')
self.module2 = resources.build_file('data/module2.py', 'data.module2')
self.nonregr = resources.build_file('data/nonregr.py', 'data.nonregr')
self.pack = resources.build_file('data/__init__.py', 'data')
class ModuleNodeTest(ModuleLoader, unittest.TestCase):
def test_special_attributes(self):
self.assertEqual(len(self.module.getattr('__name__')), 1)
self.assertIsInstance(self.module.getattr('__name__')[0], nodes.Const)
self.assertEqual(self.module.getattr('__name__')[0].value, 'data.module')
self.assertEqual(len(self.module.getattr('__doc__')), 1)
self.assertIsInstance(self.module.getattr('__doc__')[0], nodes.Const)
self.assertEqual(self.module.getattr('__doc__')[0].value, 'test module for astroid\n')
self.assertEqual(len(self.module.getattr('__file__')), 1)
self.assertIsInstance(self.module.getattr('__file__')[0], nodes.Const)
self.assertEqual(self.module.getattr('__file__')[0].value,
os.path.abspath(resources.find('data/module.py')))
self.assertEqual(len(self.module.getattr('__dict__')), 1)
self.assertIsInstance(self.module.getattr('__dict__')[0], nodes.Dict)
self.assertRaises(NotFoundError, self.module.getattr, '__path__')
self.assertEqual(len(self.pack.getattr('__path__')), 1)
self.assertIsInstance(self.pack.getattr('__path__')[0], nodes.List)
def test_dict_interface(self):
_test_dict_interface(self, self.module, 'YO')
def test_getattr(self):
yo = self.module.getattr('YO')[0]
self.assertIsInstance(yo, nodes.Class)
self.assertEqual(yo.name, 'YO')
red = next(self.module.igetattr('redirect'))
self.assertIsInstance(red, nodes.Function)
self.assertEqual(red.name, 'four_args')
pb = next(self.module.igetattr('pb'))
self.assertIsInstance(pb, nodes.Class)
self.assertEqual(pb.name, 'ProgressBar')
# resolve packageredirection
mod = resources.build_file('data/appl/myConnection.py',
'data.appl.myConnection')
ssl = next(mod.igetattr('SSL1'))
cnx = next(ssl.igetattr('Connection'))
self.assertEqual(cnx.__class__, nodes.Class)
self.assertEqual(cnx.name, 'Connection')
self.assertEqual(cnx.root().name, 'data.SSL1.Connection1')
self.assertEqual(len(self.nonregr.getattr('enumerate')), 2)
# raise ResolveError
self.assertRaises(InferenceError, self.nonregr.igetattr, 'YOAA')
def test_wildard_import_names(self):
m = resources.build_file('data/all.py', 'all')
self.assertEqual(m.wildcard_import_names(), ['Aaa', '_bla', 'name'])
m = resources.build_file('data/notall.py', 'notall')
res = sorted(m.wildcard_import_names())
self.assertEqual(res, ['Aaa', 'func', 'name', 'other'])
m = test_utils.build_module('''
from missing import tzop
trop = "test"
__all__ = (trop, "test1", tzop, 42)
''')
res = sorted(m.wildcard_import_names())
self.assertEqual(res, ["test", "test1"])
m = test_utils.build_module('''
test = tzop = 42
__all__ = ('test', ) + ('tzop', )
''')
res = sorted(m.wildcard_import_names())
self.assertEqual(res, ['test', 'tzop'])
def test_module_getattr(self):
data = '''
appli = application
appli += 2
del appli
'''
astroid = test_utils.build_module(data, __name__)
# test del statement not returned by getattr
self.assertEqual(len(astroid.getattr('appli')), 2,
astroid.getattr('appli'))
def test_relative_to_absolute_name(self):
# package
mod = nodes.Module('very.multi.package', 'doc')
mod.package = True
modname = mod.relative_to_absolute_name('utils', 1)
self.assertEqual(modname, 'very.multi.package.utils')
modname = mod.relative_to_absolute_name('utils', 2)
self.assertEqual(modname, 'very.multi.utils')
modname = mod.relative_to_absolute_name('utils', 0)
self.assertEqual(modname, 'very.multi.package.utils')
modname = mod.relative_to_absolute_name('', 1)
self.assertEqual(modname, 'very.multi.package')
# non package
mod = nodes.Module('very.multi.module', 'doc')
mod.package = False
modname = mod.relative_to_absolute_name('utils', 0)
self.assertEqual(modname, 'very.multi.utils')
modname = mod.relative_to_absolute_name('utils', 1)
self.assertEqual(modname, 'very.multi.utils')
modname = mod.relative_to_absolute_name('utils', 2)
self.assertEqual(modname, 'very.utils')
modname = mod.relative_to_absolute_name('', 1)
self.assertEqual(modname, 'very.multi')
def test_import_1(self):
data = '''from . import subpackage'''
sys.path.insert(0, resources.find('data'))
astroid = test_utils.build_module(data, 'package', 'data/package/__init__.py')
try:
m = astroid.import_module('', level=1)
self.assertEqual(m.name, 'package')
infered = list(astroid.igetattr('subpackage'))
self.assertEqual(len(infered), 1)
self.assertEqual(infered[0].name, 'package.subpackage')
finally:
del sys.path[0]
def test_import_2(self):
data = '''from . import subpackage as pouet'''
astroid = test_utils.build_module(data, 'package', 'data/package/__init__.py')
sys.path.insert(0, resources.find('data'))
try:
m = astroid.import_module('', level=1)
self.assertEqual(m.name, 'package')
infered = list(astroid.igetattr('pouet'))
self.assertEqual(len(infered), 1)
self.assertEqual(infered[0].name, 'package.subpackage')
finally:
del sys.path[0]
def test_file_stream_in_memory(self):
data = '''irrelevant_variable is irrelevant'''
astroid = test_utils.build_module(data, 'in_memory')
with warnings.catch_warnings(record=True):
self.assertEqual(astroid.file_stream.read().decode(), data)
def test_file_stream_physical(self):
path = resources.find('data/all.py')
astroid = builder.AstroidBuilder().file_build(path, 'all')
with open(path, 'rb') as file_io:
with warnings.catch_warnings(record=True):
self.assertEqual(astroid.file_stream.read(), file_io.read())
def test_file_stream_api(self):
path = resources.find('data/all.py')
astroid = builder.AstroidBuilder().file_build(path, 'all')
if __pkginfo__.numversion >= (1, 6):
# file_stream is slated for removal in astroid 1.6.
with self.assertRaises(AttributeError):
astroid.file_stream
else:
# Until astroid 1.6, Module.file_stream will emit
# PendingDeprecationWarning in 1.4, DeprecationWarning
# in 1.5 and finally it will be removed in 1.6, leaving
# only Module.stream as the recommended way to retrieve
# its file stream.
with warnings.catch_warnings(record=True) as cm:
warnings.simplefilter("always")
self.assertIsNot(astroid.file_stream, astroid.file_stream)
self.assertGreater(len(cm), 1)
self.assertEqual(cm[0].category, PendingDeprecationWarning)
def test_stream_api(self):
path = resources.find('data/all.py')
astroid = builder.AstroidBuilder().file_build(path, 'all')
stream = astroid.stream()
self.assertTrue(hasattr(stream, 'close'))
with stream:
with open(path, 'rb') as file_io:
self.assertEqual(stream.read(), file_io.read())
class FunctionNodeTest(ModuleLoader, unittest.TestCase):
def test_special_attributes(self):
func = self.module2['make_class']
self.assertEqual(len(func.getattr('__name__')), 1)
self.assertIsInstance(func.getattr('__name__')[0], nodes.Const)
self.assertEqual(func.getattr('__name__')[0].value, 'make_class')
self.assertEqual(len(func.getattr('__doc__')), 1)
self.assertIsInstance(func.getattr('__doc__')[0], nodes.Const)
self.assertEqual(func.getattr('__doc__')[0].value, 'check base is correctly resolved to Concrete0')
self.assertEqual(len(self.module.getattr('__dict__')), 1)
self.assertIsInstance(self.module.getattr('__dict__')[0], nodes.Dict)
def test_dict_interface(self):
_test_dict_interface(self, self.module['global_access'], 'local')
def test_default_value(self):
func = self.module2['make_class']
self.assertIsInstance(func.args.default_value('base'), nodes.Getattr)
self.assertRaises(NoDefault, func.args.default_value, 'args')
self.assertRaises(NoDefault, func.args.default_value, 'kwargs')
self.assertRaises(NoDefault, func.args.default_value, 'any')
#self.assertIsInstance(func.mularg_class('args'), nodes.Tuple)
#self.assertIsInstance(func.mularg_class('kwargs'), nodes.Dict)
#self.assertIsNone(func.mularg_class('base'))
def test_navigation(self):
function = self.module['global_access']
self.assertEqual(function.statement(), function)
l_sibling = function.previous_sibling()
# check taking parent if child is not a stmt
self.assertIsInstance(l_sibling, nodes.Assign)
child = function.args.args[0]
self.assertIs(l_sibling, child.previous_sibling())
r_sibling = function.next_sibling()
self.assertIsInstance(r_sibling, nodes.Class)
self.assertEqual(r_sibling.name, 'YO')
self.assertIs(r_sibling, child.next_sibling())
last = r_sibling.next_sibling().next_sibling().next_sibling()
self.assertIsInstance(last, nodes.Assign)
self.assertIsNone(last.next_sibling())
first = l_sibling.previous_sibling().previous_sibling().previous_sibling().previous_sibling().previous_sibling()
self.assertIsNone(first.previous_sibling())
def test_nested_args(self):
if sys.version_info >= (3, 0):
self.skipTest("nested args has been removed in py3.x")
code = '''
def nested_args(a, (b, c, d)):
"nested arguments test"
'''
tree = test_utils.build_module(code)
func = tree['nested_args']
self.assertEqual(sorted(func.locals), ['a', 'b', 'c', 'd'])
self.assertEqual(func.args.format_args(), 'a, (b, c, d)')
def test_four_args(self):
func = self.module['four_args']
#self.assertEqual(func.args.args, ['a', ('b', 'c', 'd')])
local = sorted(func.keys())
self.assertEqual(local, ['a', 'b', 'c', 'd'])
self.assertEqual(func.type, 'function')
def test_format_args(self):
func = self.module2['make_class']
self.assertEqual(func.args.format_args(),
'any, base=data.module.YO, *args, **kwargs')
func = self.module['four_args']
self.assertEqual(func.args.format_args(), 'a, b, c, d')
def test_is_generator(self):
self.assertTrue(self.module2['generator'].is_generator())
self.assertFalse(self.module2['not_a_generator'].is_generator())
self.assertFalse(self.module2['make_class'].is_generator())
def test_is_abstract(self):
method = self.module2['AbstractClass']['to_override']
self.assertTrue(method.is_abstract(pass_is_abstract=False))
self.assertEqual(method.qname(), 'data.module2.AbstractClass.to_override')
self.assertEqual(method.pytype(), '%s.instancemethod' % BUILTINS)
method = self.module2['AbstractClass']['return_something']
self.assertFalse(method.is_abstract(pass_is_abstract=False))
# non regression : test raise "string" doesn't cause an exception in is_abstract
func = self.module2['raise_string']
self.assertFalse(func.is_abstract(pass_is_abstract=False))
def test_is_abstract_decorated(self):
methods = test_utils.extract_node("""
import abc
class Klass(object):
@abc.abstractproperty
def prop(self): #@
pass
@abc.abstractmethod
def method1(self): #@
pass
some_other_decorator = lambda x: x
@some_other_decorator
def method2(self): #@
pass
""")
self.assertTrue(methods[0].is_abstract(pass_is_abstract=False))
self.assertTrue(methods[1].is_abstract(pass_is_abstract=False))
self.assertFalse(methods[2].is_abstract(pass_is_abstract=False))
## def test_raises(self):
## method = self.module2['AbstractClass']['to_override']
## self.assertEqual([str(term) for term in method.raises()],
## ["CallFunc(Name('NotImplementedError'), [], None, None)"] )
## def test_returns(self):
## method = self.module2['AbstractClass']['return_something']
## # use string comp since Node doesn't handle __cmp__
## self.assertEqual([str(term) for term in method.returns()],
## ["Const('toto')", "Const(None)"])
def test_lambda_pytype(self):
data = '''
def f():
g = lambda: None
'''
astroid = test_utils.build_module(data)
g = list(astroid['f'].ilookup('g'))[0]
self.assertEqual(g.pytype(), '%s.function' % BUILTINS)
def test_lambda_qname(self):
astroid = test_utils.build_module('lmbd = lambda: None', __name__)
self.assertEqual('%s.<lambda>' % __name__, astroid['lmbd'].parent.value.qname())
def test_is_method(self):
data = '''
class A:
def meth1(self):
return 1
@classmethod
def meth2(cls):
return 2
@staticmethod
def meth3():
return 3
def function():
return 0
@staticmethod
def sfunction():
return -1
'''
astroid = test_utils.build_module(data)
self.assertTrue(astroid['A']['meth1'].is_method())
self.assertTrue(astroid['A']['meth2'].is_method())
self.assertTrue(astroid['A']['meth3'].is_method())
self.assertFalse(astroid['function'].is_method())
self.assertFalse(astroid['sfunction'].is_method())
def test_argnames(self):
if sys.version_info < (3, 0):
code = 'def f(a, (b, c), *args, **kwargs): pass'
else:
code = 'def f(a, b, c, *args, **kwargs): pass'
astroid = test_utils.build_module(code, __name__)
self.assertEqual(astroid['f'].argnames(), ['a', 'b', 'c', 'args', 'kwargs'])
def test_return_nothing(self):
"""test infered value on a function with empty return"""
data = '''
def func():
return
a = func()
'''
astroid = test_utils.build_module(data)
call = astroid.body[1].value
func_vals = call.infered()
self.assertEqual(len(func_vals), 1)
self.assertIsInstance(func_vals[0], nodes.Const)
self.assertIsNone(func_vals[0].value)
def test_func_instance_attr(self):
"""test instance attributes for functions"""
data = """
def test():
print(test.bar)
test.bar = 1
test()
"""
astroid = test_utils.build_module(data, 'mod')
func = astroid.body[2].value.func.infered()[0]
self.assertIsInstance(func, nodes.Function)
self.assertEqual(func.name, 'test')
one = func.getattr('bar')[0].infered()[0]
self.assertIsInstance(one, nodes.Const)
self.assertEqual(one.value, 1)
def test_type_builtin_descriptor_subclasses(self):
astroid = test_utils.build_module("""
class classonlymethod(classmethod):
pass
class staticonlymethod(staticmethod):
pass
class Node:
@classonlymethod
def clsmethod_subclass(cls):
pass
@classmethod
def clsmethod(cls):
pass
@staticonlymethod
def staticmethod_subclass(cls):
pass
@staticmethod
def stcmethod(cls):
pass
""")
node = astroid.locals['Node'][0]
self.assertEqual(node.locals['clsmethod_subclass'][0].type,
'classmethod')
self.assertEqual(node.locals['clsmethod'][0].type,
'classmethod')
self.assertEqual(node.locals['staticmethod_subclass'][0].type,
'staticmethod')
self.assertEqual(node.locals['stcmethod'][0].type,
'staticmethod')
def test_decorator_builtin_descriptors(self):
astroid = test_utils.build_module("""
def static_decorator(platform=None, order=50):
def wrapper(f):
f.cgm_module = True
f.cgm_module_order = order
f.cgm_module_platform = platform
return staticmethod(f)
return wrapper
def long_classmethod_decorator(platform=None, order=50):
def wrapper(f):
def wrapper2(f):
def wrapper3(f):
f.cgm_module = True
f.cgm_module_order = order
f.cgm_module_platform = platform
return classmethod(f)
return wrapper3(f)
return wrapper2(f)
return wrapper
def classmethod_decorator(platform=None):
def wrapper(f):
f.platform = platform
return classmethod(f)
return wrapper
def classmethod_wrapper(fn):
def wrapper(cls, *args, **kwargs):
result = fn(cls, *args, **kwargs)
return result
return classmethod(wrapper)
def staticmethod_wrapper(fn):
def wrapper(*args, **kwargs):
return fn(*args, **kwargs)
return staticmethod(wrapper)
class SomeClass(object):
@static_decorator()
def static(node, cfg):
pass
@classmethod_decorator()
def classmethod(cls):
pass
@static_decorator
def not_so_static(node):
pass
@classmethod_decorator
def not_so_classmethod(node):
pass
@classmethod_wrapper
def classmethod_wrapped(cls):
pass
@staticmethod_wrapper
def staticmethod_wrapped():
pass
@long_classmethod_decorator()
def long_classmethod(cls):
pass
""")
node = astroid.locals['SomeClass'][0]
self.assertEqual(node.locals['static'][0].type,
'staticmethod')
self.assertEqual(node.locals['classmethod'][0].type,
'classmethod')
self.assertEqual(node.locals['not_so_static'][0].type,
'method')
self.assertEqual(node.locals['not_so_classmethod'][0].type,
'method')
self.assertEqual(node.locals['classmethod_wrapped'][0].type,
'classmethod')
self.assertEqual(node.locals['staticmethod_wrapped'][0].type,
'staticmethod')
self.assertEqual(node.locals['long_classmethod'][0].type,
'classmethod')
class ClassNodeTest(ModuleLoader, unittest.TestCase):
def test_dict_interface(self):
_test_dict_interface(self, self.module['YOUPI'], 'method')
def test_cls_special_attributes_1(self):
cls = self.module['YO']
self.assertEqual(len(cls.getattr('__bases__')), 1)
self.assertEqual(len(cls.getattr('__name__')), 1)
self.assertIsInstance(cls.getattr('__name__')[0], nodes.Const)
self.assertEqual(cls.getattr('__name__')[0].value, 'YO')
self.assertEqual(len(cls.getattr('__doc__')), 1)
self.assertIsInstance(cls.getattr('__doc__')[0], nodes.Const)
self.assertEqual(cls.getattr('__doc__')[0].value, 'hehe')
self.assertEqual(len(cls.getattr('__module__')), 1)
self.assertIsInstance(cls.getattr('__module__')[0], nodes.Const)
self.assertEqual(cls.getattr('__module__')[0].value, 'data.module')
self.assertEqual(len(cls.getattr('__dict__')), 1)
if not cls.newstyle:
self.assertRaises(NotFoundError, cls.getattr, '__mro__')
for cls in (nodes.List._proxied, nodes.Const(1)._proxied):
self.assertEqual(len(cls.getattr('__bases__')), 1)
self.assertEqual(len(cls.getattr('__name__')), 1)
self.assertEqual(len(cls.getattr('__doc__')), 1, (cls, cls.getattr('__doc__')))
self.assertEqual(cls.getattr('__doc__')[0].value, cls.doc)
self.assertEqual(len(cls.getattr('__module__')), 1)
self.assertEqual(len(cls.getattr('__dict__')), 1)
self.assertEqual(len(cls.getattr('__mro__')), 1)
def test_cls_special_attributes_2(self):
astroid = test_utils.build_module('''
class A: pass
class B: pass
A.__bases__ += (B,)
''', __name__)
self.assertEqual(len(astroid['A'].getattr('__bases__')), 2)
self.assertIsInstance(astroid['A'].getattr('__bases__')[0], nodes.Tuple)
self.assertIsInstance(astroid['A'].getattr('__bases__')[1], nodes.AssAttr)
def test_instance_special_attributes(self):
for inst in (Instance(self.module['YO']), nodes.List(), nodes.Const(1)):
self.assertRaises(NotFoundError, inst.getattr, '__mro__')
self.assertRaises(NotFoundError, inst.getattr, '__bases__')
self.assertRaises(NotFoundError, inst.getattr, '__name__')
self.assertEqual(len(inst.getattr('__dict__')), 1)
self.assertEqual(len(inst.getattr('__doc__')), 1)
def test_navigation(self):
klass = self.module['YO']
self.assertEqual(klass.statement(), klass)
l_sibling = klass.previous_sibling()
self.assertTrue(isinstance(l_sibling, nodes.Function), l_sibling)
self.assertEqual(l_sibling.name, 'global_access')
r_sibling = klass.next_sibling()
self.assertIsInstance(r_sibling, nodes.Class)
self.assertEqual(r_sibling.name, 'YOUPI')
def test_local_attr_ancestors(self):
klass2 = self.module['YOUPI']
it = klass2.local_attr_ancestors('__init__')
anc_klass = next(it)
self.assertIsInstance(anc_klass, nodes.Class)
self.assertEqual(anc_klass.name, 'YO')
if sys.version_info[0] == 2:
self.assertRaises(StopIteration, partial(next, it))
else:
anc_klass = next(it)
self.assertIsInstance(anc_klass, nodes.Class)
self.assertEqual(anc_klass.name, 'object')
self.assertRaises(StopIteration, partial(next, it))
it = klass2.local_attr_ancestors('method')
self.assertRaises(StopIteration, partial(next, it))
def test_instance_attr_ancestors(self):
klass2 = self.module['YOUPI']
it = klass2.instance_attr_ancestors('yo')
anc_klass = next(it)
self.assertIsInstance(anc_klass, nodes.Class)
self.assertEqual(anc_klass.name, 'YO')
self.assertRaises(StopIteration, partial(next, it))
klass2 = self.module['YOUPI']
it = klass2.instance_attr_ancestors('member')
self.assertRaises(StopIteration, partial(next, it))
def test_methods(self):
expected_methods = {'__init__', 'class_method', 'method', 'static_method'}
klass2 = self.module['YOUPI']
methods = {m.name for m in klass2.methods()}
self.assertTrue(
methods.issuperset(expected_methods))
methods = {m.name for m in klass2.mymethods()}
self.assertSetEqual(expected_methods, methods)
klass2 = self.module2['Specialization']
methods = {m.name for m in klass2.mymethods()}
self.assertSetEqual(set([]), methods)
method_locals = klass2.local_attr('method')
self.assertEqual(len(method_locals), 1)
self.assertEqual(method_locals[0].name, 'method')
self.assertRaises(NotFoundError, klass2.local_attr, 'nonexistant')
methods = {m.name for m in klass2.methods()}
self.assertTrue(methods.issuperset(expected_methods))
#def test_rhs(self):
# my_dict = self.module['MY_DICT']
# self.assertIsInstance(my_dict.rhs(), nodes.Dict)
# a = self.module['YO']['a']
# value = a.rhs()
# self.assertIsInstance(value, nodes.Const)
# self.assertEqual(value.value, 1)
@unittest.skipIf(sys.version_info[0] >= 3, "Python 2 class semantics required.")
def test_ancestors(self):
klass = self.module['YOUPI']
self.assertEqual(['YO'], [a.name for a in klass.ancestors()])
klass = self.module2['Specialization']
self.assertEqual(['YOUPI', 'YO'], [a.name for a in klass.ancestors()])
@unittest.skipIf(sys.version_info[0] < 3, "Python 3 class semantics required.")
def test_ancestors_py3(self):
klass = self.module['YOUPI']
self.assertEqual(['YO', 'object'], [a.name for a in klass.ancestors()])
klass = self.module2['Specialization']
self.assertEqual(['YOUPI', 'YO', 'object'], [a.name for a in klass.ancestors()])
def test_type(self):
klass = self.module['YOUPI']
self.assertEqual(klass.type, 'class')
klass = self.module2['Metaclass']
self.assertEqual(klass.type, 'metaclass')
klass = self.module2['MyException']
self.assertEqual(klass.type, 'exception')
klass = self.module2['MyIFace']
self.assertEqual(klass.type, 'interface')
klass = self.module2['MyError']
self.assertEqual(klass.type, 'exception')
# the following class used to be detected as a metaclass
# after the fix which used instance._proxied in .ancestors(),
# when in fact it is a normal class
klass = self.module2['NotMetaclass']
self.assertEqual(klass.type, 'class')
def test_interfaces(self):
for klass, interfaces in (('Concrete0', ['MyIFace']),
('Concrete1', ['MyIFace', 'AnotherIFace']),
('Concrete2', ['MyIFace', 'AnotherIFace']),
('Concrete23', ['MyIFace', 'AnotherIFace'])):
klass = self.module2[klass]
self.assertEqual([i.name for i in klass.interfaces()],
interfaces)
def test_concat_interfaces(self):
astroid = test_utils.build_module('''
class IMachin: pass
class Correct2:
"""docstring"""
__implements__ = (IMachin,)
class BadArgument:
"""docstring"""
__implements__ = (IMachin,)
class InterfaceCanNowBeFound:
"""docstring"""
__implements__ = BadArgument.__implements__ + Correct2.__implements__
''')
self.assertEqual([i.name for i in astroid['InterfaceCanNowBeFound'].interfaces()],
['IMachin'])
def test_inner_classes(self):
eee = self.nonregr['Ccc']['Eee']
self.assertEqual([n.name for n in eee.ancestors()], ['Ddd', 'Aaa', 'object'])
def test_classmethod_attributes(self):
data = '''
class WebAppObject(object):
def registered(cls, application):
cls.appli = application
cls.schema = application.schema
cls.config = application.config
return cls
registered = classmethod(registered)
'''
astroid = test_utils.build_module(data, __name__)
cls = astroid['WebAppObject']
self.assertEqual(sorted(cls.locals.keys()),
['appli', 'config', 'registered', 'schema'])
def test_class_getattr(self):
data = '''
class WebAppObject(object):
appli = application
appli += 2
del self.appli
'''
astroid = test_utils.build_module(data, __name__)
cls = astroid['WebAppObject']
# test del statement not returned by getattr
self.assertEqual(len(cls.getattr('appli')), 2)
def test_instance_getattr(self):
data = '''
class WebAppObject(object):
def __init__(self, application):
self.appli = application
self.appli += 2
del self.appli
'''
astroid = test_utils.build_module(data)
inst = Instance(astroid['WebAppObject'])
# test del statement not returned by getattr
self.assertEqual(len(inst.getattr('appli')), 2)
def test_instance_getattr_with_class_attr(self):
data = '''
class Parent:
aa = 1
cc = 1
class Klass(Parent):
aa = 0
bb = 0
def incr(self, val):
self.cc = self.aa
if val > self.aa:
val = self.aa
if val < self.bb:
val = self.bb
self.aa += val
'''
astroid = test_utils.build_module(data)
inst = Instance(astroid['Klass'])
self.assertEqual(len(inst.getattr('aa')), 3, inst.getattr('aa'))
self.assertEqual(len(inst.getattr('bb')), 1, inst.getattr('bb'))
self.assertEqual(len(inst.getattr('cc')), 2, inst.getattr('cc'))
def test_getattr_method_transform(self):
data = '''
class Clazz(object):
def m1(self, value):
self.value = value
m2 = m1
def func(arg1, arg2):
"function that will be used as a method"
return arg1.value + arg2
Clazz.m3 = func
inst = Clazz()
inst.m4 = func
'''
astroid = test_utils.build_module(data)
cls = astroid['Clazz']
# test del statement not returned by getattr
for method in ('m1', 'm2', 'm3'):
inferred = list(cls.igetattr(method))
self.assertEqual(len(inferred), 1)
self.assertIsInstance(inferred[0], UnboundMethod)
inferred = list(Instance(cls).igetattr(method))
self.assertEqual(len(inferred), 1)
self.assertIsInstance(inferred[0], BoundMethod)
inferred = list(Instance(cls).igetattr('m4'))
self.assertEqual(len(inferred), 1)
self.assertIsInstance(inferred[0], nodes.Function)
def test_getattr_from_grandpa(self):
data = '''
class Future:
attr = 1
class Present(Future):
pass
class Past(Present):
pass
'''
astroid = test_utils.build_module(data)
past = astroid['Past']
attr = past.getattr('attr')
self.assertEqual(len(attr), 1)
attr1 = attr[0]
self.assertIsInstance(attr1, nodes.AssName)
self.assertEqual(attr1.name, 'attr')
def test_function_with_decorator_lineno(self):
data = '''
@f(a=2,
b=3)
def g1(x):
print(x)
@f(a=2,
b=3)
def g2():
pass
'''
astroid = test_utils.build_module(data)
self.assertEqual(astroid['g1'].fromlineno, 4)
self.assertEqual(astroid['g1'].tolineno, 5)
self.assertEqual(astroid['g2'].fromlineno, 9)
self.assertEqual(astroid['g2'].tolineno, 10)
@test_utils.require_version(maxver='3.0')
def test_simple_metaclass(self):
astroid = test_utils.build_module("""
class Test(object):
__metaclass__ = type
""")
klass = astroid['Test']
metaclass = klass.metaclass()
self.assertIsInstance(metaclass, scoped_nodes.Class)
self.assertEqual(metaclass.name, 'type')
def test_metaclass_error(self):
astroid = test_utils.build_module("""
class Test(object):
__metaclass__ = typ
""")
klass = astroid['Test']
self.assertFalse(klass.metaclass())
@test_utils.require_version(maxver='3.0')
def test_metaclass_imported(self):
astroid = test_utils.build_module("""
from abc import ABCMeta
class Test(object):
__metaclass__ = ABCMeta
""")
klass = astroid['Test']
metaclass = klass.metaclass()
self.assertIsInstance(metaclass, scoped_nodes.Class)
self.assertEqual(metaclass.name, 'ABCMeta')
def test_metaclass_yes_leak(self):
astroid = test_utils.build_module("""
# notice `ab` instead of `abc`
from ab import ABCMeta
class Meta(object):
__metaclass__ = ABCMeta
""")
klass = astroid['Meta']
self.assertIsNone(klass.metaclass())
@test_utils.require_version(maxver='3.0')
def test_newstyle_and_metaclass_good(self):
astroid = test_utils.build_module("""
from abc import ABCMeta
class Test:
__metaclass__ = ABCMeta
""")
klass = astroid['Test']
self.assertTrue(klass.newstyle)
self.assertEqual(klass.metaclass().name, 'ABCMeta')
astroid = test_utils.build_module("""
from abc import ABCMeta
__metaclass__ = ABCMeta
class Test:
pass
""")
klass = astroid['Test']
self.assertTrue(klass.newstyle)
self.assertEqual(klass.metaclass().name, 'ABCMeta')
@test_utils.require_version(maxver='3.0')
def test_nested_metaclass(self):
astroid = test_utils.build_module("""
from abc import ABCMeta
class A(object):
__metaclass__ = ABCMeta
class B: pass
__metaclass__ = ABCMeta
class C:
__metaclass__ = type
class D: pass
""")
a = astroid['A']
b = a.locals['B'][0]
c = astroid['C']
d = c.locals['D'][0]
self.assertEqual(a.metaclass().name, 'ABCMeta')
self.assertFalse(b.newstyle)
self.assertIsNone(b.metaclass())
self.assertEqual(c.metaclass().name, 'type')
self.assertEqual(d.metaclass().name, 'ABCMeta')
@test_utils.require_version(maxver='3.0')
def test_parent_metaclass(self):
astroid = test_utils.build_module("""
from abc import ABCMeta
class Test:
__metaclass__ = ABCMeta
class SubTest(Test): pass
""")
klass = astroid['SubTest']
self.assertTrue(klass.newstyle)
metaclass = klass.metaclass()
self.assertIsInstance(metaclass, scoped_nodes.Class)
self.assertEqual(metaclass.name, 'ABCMeta')
@test_utils.require_version(maxver='3.0')
def test_metaclass_ancestors(self):
astroid = test_utils.build_module("""
from abc import ABCMeta
class FirstMeta(object):
__metaclass__ = ABCMeta
class SecondMeta(object):
__metaclass__ = type
class Simple(object):
pass
class FirstImpl(FirstMeta): pass
class SecondImpl(FirstImpl): pass
class ThirdImpl(Simple, SecondMeta):
pass
""")
classes = {
'ABCMeta': ('FirstImpl', 'SecondImpl'),
'type': ('ThirdImpl', )
}
for metaclass, names in classes.items():
for name in names:
impl = astroid[name]
meta = impl.metaclass()
self.assertIsInstance(meta, nodes.Class)
self.assertEqual(meta.name, metaclass)
def test_metaclass_type(self):
klass = test_utils.extract_node("""
def with_metaclass(meta, base=object):
return meta("NewBase", (base, ), {})
class ClassWithMeta(with_metaclass(type)): #@
pass
""")
self.assertEqual(
['NewBase', 'object'],
[base.name for base in klass.ancestors()])
def test_metaclass_generator_hack(self):
klass = test_utils.extract_node("""
import six
class WithMeta(six.with_metaclass(type, object)): #@
pass
""")
self.assertEqual(
['object'],
[base.name for base in klass.ancestors()])
self.assertEqual(
'type', klass.metaclass().name)
def test_nonregr_infer_callresult(self):
astroid = test_utils.build_module("""
class Delegate(object):
def __get__(self, obj, cls):
return getattr(obj._subject, self.attribute)
class CompositeBuilder(object):
__call__ = Delegate()
builder = CompositeBuilder(result, composite)
tgts = builder()
""")
instance = astroid['tgts']
# used to raise "'_Yes' object is not iterable", see
# https://bitbucket.org/logilab/astroid/issue/17
self.assertEqual(list(instance.infer()), [YES])
def test_slots(self):
astroid = test_utils.build_module("""
from collections import deque
from textwrap import dedent
class First(object):
__slots__ = ("a", "b", 1)
class Second(object):
__slots__ = "a"
class Third(object):
__slots__ = deque(["a", "b", "c"])
class Fourth(object):
__slots__ = {"a": "a", "b": "b"}
class Fifth(object):
__slots__ = list
class Sixth(object):
__slots__ = ""
class Seventh(object):
__slots__ = dedent.__name__
class Eight(object):
__slots__ = ("parens")
class Ninth(object):
pass
class Ten(object):
__slots__ = dict({"a": "b", "c": "d"})
""")
first = astroid['First']
first_slots = first.slots()
self.assertEqual(len(first_slots), 2)
self.assertIsInstance(first_slots[0], nodes.Const)
self.assertIsInstance(first_slots[1], nodes.Const)
self.assertEqual(first_slots[0].value, "a")
self.assertEqual(first_slots[1].value, "b")
second_slots = astroid['Second'].slots()
self.assertEqual(len(second_slots), 1)
self.assertIsInstance(second_slots[0], nodes.Const)
self.assertEqual(second_slots[0].value, "a")
third_slots = astroid['Third'].slots()
self.assertIsNone(third_slots)
fourth_slots = astroid['Fourth'].slots()
self.assertEqual(len(fourth_slots), 2)
self.assertIsInstance(fourth_slots[0], nodes.Const)
self.assertIsInstance(fourth_slots[1], nodes.Const)
self.assertEqual(fourth_slots[0].value, "a")
self.assertEqual(fourth_slots[1].value, "b")
fifth_slots = astroid['Fifth'].slots()
self.assertIsNone(fifth_slots)
sixth_slots = astroid['Sixth'].slots()
self.assertIsNone(sixth_slots)
seventh_slots = astroid['Seventh'].slots()
self.assertIsNone(seventh_slots)
eight_slots = astroid['Eight'].slots()
self.assertEqual(len(eight_slots), 1)
self.assertIsInstance(eight_slots[0], nodes.Const)
self.assertEqual(eight_slots[0].value, "parens")
self.assertIsNone(astroid['Ninth'].slots())
tenth_slots = astroid['Ten'].slots()
self.assertEqual(len(tenth_slots), 2)
self.assertEqual(
[slot.value for slot in tenth_slots],
["a", "c"])
@test_utils.require_version(maxver='3.0')
def test_slots_py2(self):
module = test_utils.build_module("""
class UnicodeSlots(object):
__slots__ = (u"a", u"b", "c")
""")
slots = module['UnicodeSlots'].slots()
self.assertEqual(len(slots), 3)
self.assertEqual(slots[0].value, "a")
self.assertEqual(slots[1].value, "b")
self.assertEqual(slots[2].value, "c")
def assertEqualMro(self, klass, expected_mro):
self.assertEqual(
[member.name for member in klass.mro()],
expected_mro)
@test_utils.require_version(maxver='3.0')
def test_no_mro_for_old_style(self):
node = test_utils.extract_node("""
class Old: pass""")
with self.assertRaises(NotImplementedError) as cm:
node.mro()
self.assertEqual(str(cm.exception), "Could not obtain mro for "
"old-style classes.")
def test_mro(self):
astroid = test_utils.build_module("""
class C(object): pass
class D(dict, C): pass
class A1(object): pass
class B1(A1): pass
class C1(A1): pass
class D1(B1, C1): pass
class E1(C1, B1): pass
class F1(D1, E1): pass
class G1(E1, D1): pass
class Boat(object): pass
class DayBoat(Boat): pass
class WheelBoat(Boat): pass
class EngineLess(DayBoat): pass
class SmallMultihull(DayBoat): pass
class PedalWheelBoat(EngineLess, WheelBoat): pass
class SmallCatamaran(SmallMultihull): pass
class Pedalo(PedalWheelBoat, SmallCatamaran): pass
class OuterA(object):
class Inner(object):
pass
class OuterB(OuterA):
class Inner(OuterA.Inner):
pass
class OuterC(OuterA):
class Inner(OuterA.Inner):
pass
class OuterD(OuterC):
class Inner(OuterC.Inner, OuterB.Inner):
pass
""")
self.assertEqualMro(astroid['D'], ['D', 'dict', 'C', 'object'])
self.assertEqualMro(astroid['D1'], ['D1', 'B1', 'C1', 'A1', 'object'])
self.assertEqualMro(astroid['E1'], ['E1', 'C1', 'B1', 'A1', 'object'])
with self.assertRaises(ResolveError) as cm:
astroid['F1'].mro()
self.assertEqual(str(cm.exception),
"Cannot create a consistent method resolution order "
"for bases (B1, C1, A1, object), "
"(C1, B1, A1, object)")
with self.assertRaises(ResolveError) as cm:
astroid['G1'].mro()
self.assertEqual(str(cm.exception),
"Cannot create a consistent method resolution order "
"for bases (C1, B1, A1, object), "
"(B1, C1, A1, object)")
self.assertEqualMro(
astroid['PedalWheelBoat'],
["PedalWheelBoat", "EngineLess",
"DayBoat", "WheelBoat", "Boat", "object"])
self.assertEqualMro(
astroid["SmallCatamaran"],
["SmallCatamaran", "SmallMultihull", "DayBoat", "Boat", "object"])
self.assertEqualMro(
astroid["Pedalo"],
["Pedalo", "PedalWheelBoat", "EngineLess", "SmallCatamaran",
"SmallMultihull", "DayBoat", "WheelBoat", "Boat", "object"])
self.assertEqualMro(
astroid['OuterD']['Inner'],
['Inner', 'Inner', 'Inner', 'Inner', 'object'])
if __name__ == '__main__':
unittest.main()
| gpl-3.0 |
rec/echomesh | code/python/external/platform/darwin/numpy/oldnumeric/compat.py | 13 | 3182 | # Compatibility module containing deprecated names
__all__ = ['NewAxis',
'UFuncType', 'UfuncType', 'ArrayType', 'arraytype',
'LittleEndian', 'arrayrange', 'matrixmultiply',
'array_constructor', 'pickle_array',
'DumpArray', 'LoadArray', 'multiarray',
# from cPickle
'dump', 'dumps', 'load', 'loads',
'Unpickler', 'Pickler'
]
import numpy.core.multiarray as multiarray
import numpy.core.umath as um
from numpy.core.numeric import array
import functions
import sys
from cPickle import dump, dumps
mu = multiarray
#Use this to add a new axis to an array
#compatibility only
NewAxis = None
#deprecated
UFuncType = type(um.sin)
UfuncType = type(um.sin)
ArrayType = mu.ndarray
arraytype = mu.ndarray
LittleEndian = (sys.byteorder == 'little')
from numpy import deprecate
# backward compatibility
arrayrange = deprecate(functions.arange, 'arrayrange', 'arange')
# deprecated names
matrixmultiply = deprecate(mu.dot, 'matrixmultiply', 'dot')
def DumpArray(m, fp):
m.dump(fp)
def LoadArray(fp):
import cPickle
return cPickle.load(fp)
def array_constructor(shape, typecode, thestr, Endian=LittleEndian):
if typecode == "O":
x = array(thestr, "O")
else:
x = mu.fromstring(thestr, typecode)
x.shape = shape
if LittleEndian != Endian:
return x.byteswap(True)
else:
return x
def pickle_array(a):
if a.dtype.hasobject:
return (array_constructor,
a.shape, a.dtype.char, a.tolist(), LittleEndian)
else:
return (array_constructor,
(a.shape, a.dtype.char, a.tostring(), LittleEndian))
def loads(astr):
import cPickle
arr = cPickle.loads(astr.replace('Numeric', 'numpy.oldnumeric'))
return arr
def load(fp):
return loads(fp.read())
def _LoadArray(fp):
import typeconv
ln = fp.readline().split()
if ln[0][0] == 'A': ln[0] = ln[0][1:]
typecode = ln[0][0]
endian = ln[0][1]
itemsize = int(ln[0][2:])
shape = [int(x) for x in ln[1:]]
sz = itemsize
for val in shape:
sz *= val
dstr = fp.read(sz)
m = mu.fromstring(dstr, typeconv.convtypecode(typecode))
m.shape = shape
if (LittleEndian and endian == 'B') or (not LittleEndian and endian == 'L'):
return m.byteswap(True)
else:
return m
import pickle, copy
if sys.version_info[0] >= 3:
class Unpickler(pickle.Unpickler):
# XXX: should we implement this? It's not completely straightforward
# to do.
def __init__(self, *a, **kw):
raise NotImplementedError(
"numpy.oldnumeric.Unpickler is not supported on Python 3")
else:
class Unpickler(pickle.Unpickler):
def load_array(self):
self.stack.append(_LoadArray(self))
dispatch = copy.copy(pickle.Unpickler.dispatch)
dispatch['A'] = load_array
class Pickler(pickle.Pickler):
def __init__(self, *args, **kwds):
raise NotImplementedError("Don't pickle new arrays with this")
def save_array(self, object):
raise NotImplementedError("Don't pickle new arrays with this")
| mit |
WillisXChen/django-oscar | oscar/lib/python2.7/site-packages/requests/packages/chardet/chardetect.py | 1786 | 2504 | #!/usr/bin/env python
"""
Script which takes one or more file paths and reports on their detected
encodings
Example::
% chardetect somefile someotherfile
somefile: windows-1252 with confidence 0.5
someotherfile: ascii with confidence 1.0
If no paths are provided, it takes its input from stdin.
"""
from __future__ import absolute_import, print_function, unicode_literals
import argparse
import sys
from io import open
from chardet import __version__
from chardet.universaldetector import UniversalDetector
def description_of(lines, name='stdin'):
"""
Return a string describing the probable encoding of a file or
list of strings.
:param lines: The lines to get the encoding of.
:type lines: Iterable of bytes
:param name: Name of file or collection of lines
:type name: str
"""
u = UniversalDetector()
for line in lines:
u.feed(line)
u.close()
result = u.result
if result['encoding']:
return '{0}: {1} with confidence {2}'.format(name, result['encoding'],
result['confidence'])
else:
return '{0}: no result'.format(name)
def main(argv=None):
'''
Handles command line arguments and gets things started.
:param argv: List of arguments, as if specified on the command-line.
If None, ``sys.argv[1:]`` is used instead.
:type argv: list of str
'''
# Get command line arguments
parser = argparse.ArgumentParser(
description="Takes one or more file paths and reports their detected \
encodings",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
conflict_handler='resolve')
parser.add_argument('input',
help='File whose encoding we would like to determine.',
type=argparse.FileType('rb'), nargs='*',
default=[sys.stdin])
parser.add_argument('--version', action='version',
version='%(prog)s {0}'.format(__version__))
args = parser.parse_args(argv)
for f in args.input:
if f.isatty():
print("You are running chardetect interactively. Press " +
"CTRL-D twice at the start of a blank line to signal the " +
"end of your input. If you want help, run chardetect " +
"--help\n", file=sys.stderr)
print(description_of(f, f.name))
if __name__ == '__main__':
main()
| bsd-3-clause |
chenbaihu/grpc | src/python/src/grpc/framework/base/packets/_ends.py | 5 | 17140 | # Copyright 2015, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Implementations of Fronts and Backs."""
import collections
import threading
import uuid
# _interfaces and packets are referenced from specification in this module.
from grpc.framework.base import interfaces as base_interfaces
from grpc.framework.base.packets import _cancellation
from grpc.framework.base.packets import _context
from grpc.framework.base.packets import _emission
from grpc.framework.base.packets import _expiration
from grpc.framework.base.packets import _ingestion
from grpc.framework.base.packets import _interfaces # pylint: disable=unused-import
from grpc.framework.base.packets import _reception
from grpc.framework.base.packets import _termination
from grpc.framework.base.packets import _transmission
from grpc.framework.base.packets import interfaces
from grpc.framework.base.packets import packets # pylint: disable=unused-import
from grpc.framework.foundation import callable_util
_IDLE_ACTION_EXCEPTION_LOG_MESSAGE = 'Exception calling idle action!'
_OPERATION_OUTCOMES = (
base_interfaces.Outcome.COMPLETED,
base_interfaces.Outcome.CANCELLED,
base_interfaces.Outcome.EXPIRED,
base_interfaces.Outcome.RECEPTION_FAILURE,
base_interfaces.Outcome.TRANSMISSION_FAILURE,
base_interfaces.Outcome.SERVICER_FAILURE,
base_interfaces.Outcome.SERVICED_FAILURE,
)
class _EasyOperation(base_interfaces.Operation):
"""A trivial implementation of base_interfaces.Operation."""
def __init__(self, emission_manager, context, cancellation_manager):
"""Constructor.
Args:
emission_manager: The _interfaces.EmissionManager for the operation that
will accept values emitted by customer code.
context: The base_interfaces.OperationContext for use by the customer
during the operation.
cancellation_manager: The _interfaces.CancellationManager for the
operation.
"""
self.consumer = emission_manager
self.context = context
self._cancellation_manager = cancellation_manager
def cancel(self):
self._cancellation_manager.cancel()
class _Endlette(object):
"""Utility for stateful behavior common to Fronts and Backs."""
def __init__(self, pool):
"""Constructor.
Args:
pool: A thread pool to use when calling registered idle actions.
"""
self._lock = threading.Lock()
self._pool = pool
# Dictionary from operation IDs to ReceptionManager-or-None. A None value
# indicates an in-progress fire-and-forget operation for which the customer
# has chosen to ignore results.
self._operations = {}
self._stats = {outcome: 0 for outcome in _OPERATION_OUTCOMES}
self._idle_actions = []
def terminal_action(self, operation_id):
"""Constructs the termination action for a single operation.
Args:
operation_id: An operation ID.
Returns:
A callable that takes an operation outcome for an argument to be used as
the termination action for the operation associated with the given
operation ID.
"""
def termination_action(outcome):
with self._lock:
self._stats[outcome] += 1
self._operations.pop(operation_id, None)
if not self._operations:
for action in self._idle_actions:
self._pool.submit(callable_util.with_exceptions_logged(
action, _IDLE_ACTION_EXCEPTION_LOG_MESSAGE))
self._idle_actions = []
return termination_action
def __enter__(self):
self._lock.acquire()
def __exit__(self, exc_type, exc_val, exc_tb):
self._lock.release()
def get_operation(self, operation_id):
return self._operations.get(operation_id, None)
def add_operation(self, operation_id, operation_reception_manager):
self._operations[operation_id] = operation_reception_manager
def operation_stats(self):
with self._lock:
return dict(self._stats)
def add_idle_action(self, action):
with self._lock:
if self._operations:
self._idle_actions.append(action)
else:
self._pool.submit(callable_util.with_exceptions_logged(
action, _IDLE_ACTION_EXCEPTION_LOG_MESSAGE))
class _FrontManagement(
collections.namedtuple(
'_FrontManagement',
('reception', 'emission', 'operation', 'cancellation'))):
"""Just a trivial helper class to bundle four fellow-traveling objects."""
def _front_operate(
callback, work_pool, transmission_pool, utility_pool,
termination_action, operation_id, name, payload, complete, timeout,
subscription, trace_id):
"""Constructs objects necessary for front-side operation management.
Args:
callback: A callable that accepts packets.FrontToBackPackets and delivers
them to the other side of the operation. Execution of this callable may
take any arbitrary length of time.
work_pool: A thread pool in which to execute customer code.
transmission_pool: A thread pool to use for transmitting to the other side
of the operation.
utility_pool: A thread pool for utility tasks.
termination_action: A no-arg behavior to be called upon operation
completion.
operation_id: An object identifying the operation.
name: The name of the method being called during the operation.
payload: The first customer-significant value to be transmitted to the other
side. May be None if there is no such value or if the customer chose not
to pass it at operation invocation.
complete: A boolean indicating whether or not additional payloads will be
supplied by the customer.
timeout: A length of time in seconds to allow for the operation.
subscription: A base_interfaces.ServicedSubscription describing the
customer's interest in the results of the operation.
trace_id: A uuid.UUID identifying a set of related operations to which this
operation belongs. May be None.
Returns:
A _FrontManagement object bundling together the
_interfaces.ReceptionManager, _interfaces.EmissionManager,
_context.OperationContext, and _interfaces.CancellationManager for the
operation.
"""
lock = threading.Lock()
with lock:
termination_manager = _termination.front_termination_manager(
work_pool, utility_pool, termination_action, subscription.kind)
transmission_manager = _transmission.front_transmission_manager(
lock, transmission_pool, callback, operation_id, name,
subscription.kind, trace_id, timeout, termination_manager)
operation_context = _context.OperationContext(
lock, operation_id, packets.Kind.SERVICED_FAILURE,
termination_manager, transmission_manager)
emission_manager = _emission.front_emission_manager(
lock, termination_manager, transmission_manager)
ingestion_manager = _ingestion.front_ingestion_manager(
lock, work_pool, subscription, termination_manager,
transmission_manager, operation_context)
expiration_manager = _expiration.front_expiration_manager(
lock, termination_manager, transmission_manager, ingestion_manager,
timeout)
reception_manager = _reception.front_reception_manager(
lock, termination_manager, transmission_manager, ingestion_manager,
expiration_manager)
cancellation_manager = _cancellation.CancellationManager(
lock, termination_manager, transmission_manager, ingestion_manager,
expiration_manager)
termination_manager.set_expiration_manager(expiration_manager)
transmission_manager.set_ingestion_and_expiration_managers(
ingestion_manager, expiration_manager)
operation_context.set_ingestion_and_expiration_managers(
ingestion_manager, expiration_manager)
emission_manager.set_ingestion_manager_and_expiration_manager(
ingestion_manager, expiration_manager)
ingestion_manager.set_expiration_manager(expiration_manager)
transmission_manager.inmit(payload, complete)
if subscription.kind is base_interfaces.ServicedSubscription.Kind.NONE:
returned_reception_manager = None
else:
returned_reception_manager = reception_manager
return _FrontManagement(
returned_reception_manager, emission_manager, operation_context,
cancellation_manager)
class Front(interfaces.Front):
"""An implementation of interfaces.Front."""
def __init__(self, work_pool, transmission_pool, utility_pool):
"""Constructor.
Args:
work_pool: A thread pool to be used for executing customer code.
transmission_pool: A thread pool to be used for transmitting values to
the other side of the operation.
utility_pool: A thread pool to be used for utility tasks.
"""
self._endlette = _Endlette(utility_pool)
self._work_pool = work_pool
self._transmission_pool = transmission_pool
self._utility_pool = utility_pool
self._callback = None
self._operations = {}
def join_rear_link(self, rear_link):
"""See interfaces.ForeLink.join_rear_link for specification."""
with self._endlette:
self._callback = rear_link.accept_front_to_back_ticket
def operation_stats(self):
"""See base_interfaces.End.operation_stats for specification."""
return self._endlette.operation_stats()
def add_idle_action(self, action):
"""See base_interfaces.End.add_idle_action for specification."""
self._endlette.add_idle_action(action)
def operate(
self, name, payload, complete, timeout, subscription, trace_id):
"""See base_interfaces.Front.operate for specification."""
operation_id = uuid.uuid4()
with self._endlette:
management = _front_operate(
self._callback, self._work_pool, self._transmission_pool,
self._utility_pool, self._endlette.terminal_action(operation_id),
operation_id, name, payload, complete, timeout, subscription,
trace_id)
self._endlette.add_operation(operation_id, management.reception)
return _EasyOperation(
management.emission, management.operation, management.cancellation)
def accept_back_to_front_ticket(self, ticket):
"""See interfaces.End.act for specification."""
with self._endlette:
reception_manager = self._endlette.get_operation(ticket.operation_id)
if reception_manager:
reception_manager.receive_packet(ticket)
def _back_operate(
servicer, callback, work_pool, transmission_pool, utility_pool,
termination_action, ticket, default_timeout, maximum_timeout):
"""Constructs objects necessary for back-side operation management.
Also begins back-side operation by feeding the first received ticket into the
constructed _interfaces.ReceptionManager.
Args:
servicer: An interfaces.Servicer for servicing operations.
callback: A callable that accepts packets.BackToFrontPackets and delivers
them to the other side of the operation. Execution of this callable may
take any arbitrary length of time.
work_pool: A thread pool in which to execute customer code.
transmission_pool: A thread pool to use for transmitting to the other side
of the operation.
utility_pool: A thread pool for utility tasks.
termination_action: A no-arg behavior to be called upon operation
completion.
ticket: The first packets.FrontToBackPacket received for the operation.
default_timeout: A length of time in seconds to be used as the default
time alloted for a single operation.
maximum_timeout: A length of time in seconds to be used as the maximum
time alloted for a single operation.
Returns:
The _interfaces.ReceptionManager to be used for the operation.
"""
lock = threading.Lock()
with lock:
termination_manager = _termination.back_termination_manager(
work_pool, utility_pool, termination_action, ticket.subscription)
transmission_manager = _transmission.back_transmission_manager(
lock, transmission_pool, callback, ticket.operation_id,
termination_manager, ticket.subscription)
operation_context = _context.OperationContext(
lock, ticket.operation_id, packets.Kind.SERVICER_FAILURE,
termination_manager, transmission_manager)
emission_manager = _emission.back_emission_manager(
lock, termination_manager, transmission_manager)
ingestion_manager = _ingestion.back_ingestion_manager(
lock, work_pool, servicer, termination_manager,
transmission_manager, operation_context, emission_manager)
expiration_manager = _expiration.back_expiration_manager(
lock, termination_manager, transmission_manager, ingestion_manager,
ticket.timeout, default_timeout, maximum_timeout)
reception_manager = _reception.back_reception_manager(
lock, termination_manager, transmission_manager, ingestion_manager,
expiration_manager)
termination_manager.set_expiration_manager(expiration_manager)
transmission_manager.set_ingestion_and_expiration_managers(
ingestion_manager, expiration_manager)
operation_context.set_ingestion_and_expiration_managers(
ingestion_manager, expiration_manager)
emission_manager.set_ingestion_manager_and_expiration_manager(
ingestion_manager, expiration_manager)
ingestion_manager.set_expiration_manager(expiration_manager)
reception_manager.receive_packet(ticket)
return reception_manager
class Back(interfaces.Back):
"""An implementation of interfaces.Back."""
def __init__(
self, servicer, work_pool, transmission_pool, utility_pool,
default_timeout, maximum_timeout):
"""Constructor.
Args:
servicer: An interfaces.Servicer for servicing operations.
work_pool: A thread pool in which to execute customer code.
transmission_pool: A thread pool to use for transmitting to the other side
of the operation.
utility_pool: A thread pool for utility tasks.
default_timeout: A length of time in seconds to be used as the default
time alloted for a single operation.
maximum_timeout: A length of time in seconds to be used as the maximum
time alloted for a single operation.
"""
self._endlette = _Endlette(utility_pool)
self._servicer = servicer
self._work_pool = work_pool
self._transmission_pool = transmission_pool
self._utility_pool = utility_pool
self._default_timeout = default_timeout
self._maximum_timeout = maximum_timeout
self._callback = None
def join_fore_link(self, fore_link):
"""See interfaces.RearLink.join_fore_link for specification."""
with self._endlette:
self._callback = fore_link.accept_back_to_front_ticket
def accept_front_to_back_ticket(self, ticket):
"""See interfaces.RearLink.accept_front_to_back_ticket for specification."""
with self._endlette:
reception_manager = self._endlette.get_operation(ticket.operation_id)
if reception_manager is None:
reception_manager = _back_operate(
self._servicer, self._callback, self._work_pool,
self._transmission_pool, self._utility_pool,
self._endlette.terminal_action(ticket.operation_id), ticket,
self._default_timeout, self._maximum_timeout)
self._endlette.add_operation(ticket.operation_id, reception_manager)
else:
reception_manager.receive_packet(ticket)
def operation_stats(self):
"""See base_interfaces.End.operation_stats for specification."""
return self._endlette.operation_stats()
def add_idle_action(self, action):
"""See base_interfaces.End.add_idle_action for specification."""
self._endlette.add_idle_action(action)
| bsd-3-clause |
mihaip/NewsBlur | apps/rss_feeds/migrations/0014_count_story_histories.py | 18 | 8822 | # encoding: utf-8
import datetime
from south.db import db
from south.v2 import DataMigration
from django.db import models
class Migration(DataMigration):
def forwards(self, orm):
"Write your forwards methods here."
from apps.rss_feeds.models import Feed
for feed in Feed.objects.all():
feed.story_count_history = None
feed.save()
try:
feed.count_stories(verbose=True)
except Exception, e:
print ' ---> Exception: %s' % e
def backwards(self, orm):
"Write your backwards methods here."
models = {
'rss_feeds.feed': {
'Meta': {'object_name': 'Feed', 'db_table': "'feeds'"},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'average_stories_per_month': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'creation': ('django.db.models.fields.DateField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'days_to_trim': ('django.db.models.fields.IntegerField', [], {'default': '90'}),
'etag': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'feed_address': ('django.db.models.fields.URLField', [], {'unique': 'True', 'max_length': '255'}),
'feed_link': ('django.db.models.fields.URLField', [], {'default': "''", 'max_length': '1000', 'null': 'True', 'blank': 'True'}),
'feed_tagline': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '1024', 'null': 'True', 'blank': 'True'}),
'feed_title': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '255', 'null': 'True', 'blank': 'True'}),
'fetched_once': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_load_time': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'last_modified': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'last_update': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'min_to_decay': ('django.db.models.fields.IntegerField', [], {'default': '15'}),
'next_scheduled_update': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'num_subscribers': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'popular_authors': ('django.db.models.fields.CharField', [], {'max_length': '2048', 'null': 'True', 'blank': 'True'}),
'popular_tags': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'null': 'True', 'blank': 'True'}),
'stories_last_month': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'story_count_history': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'})
},
'rss_feeds.feedfetchhistory': {
'Meta': {'object_name': 'FeedFetchHistory'},
'exception': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'feed': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'feed_fetch_history'", 'to': "orm['rss_feeds.Feed']"}),
'fetch_date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'message': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'status_code': ('django.db.models.fields.CharField', [], {'max_length': '10', 'null': 'True', 'blank': 'True'})
},
'rss_feeds.feedpage': {
'Meta': {'object_name': 'FeedPage'},
'feed': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'feed_page'", 'unique': 'True', 'to': "orm['rss_feeds.Feed']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'page_data': ('utils.compressed_textfield.StoryField', [], {'null': 'True', 'blank': 'True'})
},
'rss_feeds.feedupdatehistory': {
'Meta': {'object_name': 'FeedUpdateHistory'},
'average_per_feed': ('django.db.models.fields.DecimalField', [], {'max_digits': '4', 'decimal_places': '1'}),
'fetch_date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'number_of_feeds': ('django.db.models.fields.IntegerField', [], {}),
'seconds_taken': ('django.db.models.fields.IntegerField', [], {})
},
'rss_feeds.feedxml': {
'Meta': {'object_name': 'FeedXML'},
'feed': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'feed_xml'", 'unique': 'True', 'to': "orm['rss_feeds.Feed']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'rss_xml': ('utils.compressed_textfield.StoryField', [], {'null': 'True', 'blank': 'True'})
},
'rss_feeds.pagefetchhistory': {
'Meta': {'object_name': 'PageFetchHistory'},
'exception': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'feed': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'page_fetch_history'", 'to': "orm['rss_feeds.Feed']"}),
'fetch_date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'message': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'status_code': ('django.db.models.fields.CharField', [], {'max_length': '10', 'null': 'True', 'blank': 'True'})
},
'rss_feeds.story': {
'Meta': {'unique_together': "(('story_feed', 'story_guid_hash'),)", 'object_name': 'Story', 'db_table': "'stories'"},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'story_author': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['rss_feeds.StoryAuthor']"}),
'story_author_name': ('django.db.models.fields.CharField', [], {'max_length': '500', 'null': 'True', 'blank': 'True'}),
'story_content': ('utils.compressed_textfield.StoryField', [], {'null': 'True', 'blank': 'True'}),
'story_content_type': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'story_date': ('django.db.models.fields.DateTimeField', [], {}),
'story_feed': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'stories'", 'to': "orm['rss_feeds.Feed']"}),
'story_guid': ('django.db.models.fields.CharField', [], {'max_length': '1000'}),
'story_guid_hash': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'story_original_content': ('utils.compressed_textfield.StoryField', [], {'null': 'True', 'blank': 'True'}),
'story_past_trim_date': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'story_permalink': ('django.db.models.fields.CharField', [], {'max_length': '1000'}),
'story_tags': ('django.db.models.fields.CharField', [], {'max_length': '2000', 'null': 'True', 'blank': 'True'}),
'story_title': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'tags': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['rss_feeds.Tag']", 'symmetrical': 'False'})
},
'rss_feeds.storyauthor': {
'Meta': {'object_name': 'StoryAuthor'},
'author_name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'feed': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['rss_feeds.Feed']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'rss_feeds.tag': {
'Meta': {'object_name': 'Tag'},
'feed': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['rss_feeds.Feed']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'})
}
}
complete_apps = ['rss_feeds']
| mit |
timothycrosley/thedom | thedom/dom.py | 1 | 31933 | '''
DOM.py
Contains all elements defined in the most recent version of the HTML specification
(currently version 5)
Copyright (C) 2015 Timothy Edmund Crosley
This program is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License
as published by the Free Software Foundation; either version 2
of the License, or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
'''
from . import DictUtils, Factory
from .Base import Node, Settings
from .MethodUtils import CallBack
from .MultiplePythonSupport import *
Factory = Factory.Factory("DOM")
class A(Node):
"""
Defines a link that when clicked changes the currently viewed page
"""
__slots__ = ()
tagName = "a"
properties = Node.properties.copy()
properties['href'] = {'action':'attribute'}
properties['media'] = {'action':'attribute'}
properties['rel'] = {'action':'attribute'}
properties['target'] = {'action':'attribute'}
properties['type'] = {'action':'attribute'}
Factory.addProduct(A)
class Abr(Node):
"""
Defines an abbreviation or an acronym
"""
__slots__ = ()
tagName = "abr"
Factory.addProduct(Abr)
class Address(Node):
"""
Defines contact info for the author of a document or article
"""
__slots__ = ()
tagName = "address"
Factory.addProduct(Address)
class Area(Node):
"""
Defines an area inside an image map
"""
__slots__ = ()
tagName = "area"
properties = Node.properties.copy()
properties['alt'] = {'action':'attribute'}
properties['coords'] = {'action':'attribute'}
properties['href'] = {'action':'attribute'}
properties['hreflang'] = {'action':'attribute'}
properties['media'] = {'action':'attribute'}
properties['rel'] = {'action':'attribute'}
properties['shape'] = {'action':'attribute'}
properties['target'] = {'action':'attribute'}
properties['type'] = {'action':'attribute'}
Factory.addProduct(Area)
class Article(Node):
"""
Defines an independent, self-contained content
"""
__slots__ = ()
tagName = "article"
Factory.addProduct(Article)
class Aside(Node):
"""
Defines content as being aside from the content it is placed in
"""
__slots__ = ()
tagName = "aside"
Factory.addProduct(Aside)
class Audio(Node):
"""
Defines sound, such as music or other audio streams
"""
__slots__ = ()
tagName = "audio"
properties = Node.properties.copy()
properties['autoplay'] = {'action':'attribute', 'type':'bool'}
properties['controls'] = {'action':'attribute', 'type':'bool'}
properties['loop'] = {'action':'attribute', 'type':'bool'}
properties['src'] = {'action':'attribute'}
Factory.addProduct(Audio)
class B(Node):
"""
Defines bold text
NOTE: avoid using this element, when possible use elements that describe the content
instead of the appearance
"""
__slots__ = ()
tagName = "b"
Factory.addProduct(B)
class Base(Node):
"""
Defines the base URL for all relative URLs in a document
"""
__slots__ = ()
tagName = "base"
properties = Node.properties.copy()
properties['href'] = {'action':'attribute'}
properties['target'] = {'action':'attribute'}
Factory.addProduct(Base)
class BDI(Node):
"""
Defines a part of text that should be formatted in a different direction
from the other text outside it
"""
__slots__ = ()
tagName = "bdi"
Factory.addProduct(BDI)
class BDO(Node):
"""
Defines an override of the current text-direction
"""
__slots__ = ()
tagName = "bdo"
properties = Node.properties.copy()
properties['dir'] = {'action':'attribute'}
Factory.addProduct(BDO)
class BlockQuote(Node):
"""
Defines a section that is quoted from another source
"""
__slots__ = ()
tagName = "blockquote"
properties = Node.properties.copy()
properties['cite'] = {'action':'attribute'}
Factory.addProduct(BlockQuote)
class Body(Node):
"""
Defines the document's body - which contains all the visible parts of an HTML document
"""
__slots__ = ()
tagName = "body"
Factory.addProduct(Body)
class Br(Node):
"""
Defines a single line break
"""
__slots__ = ()
tagName = "br"
tagSelfCloses = True
allowsChildren = False
Factory.addProduct(Br)
class Button(Node):
"""
Defines a click-able button
"""
__slots__ = ()
tagName = "button"
properties = Node.properties.copy()
properties['autofocus'] = {'action':'attribute', 'type':'bool'}
properties['disabled'] = {'action':'attribute', 'type':'bool'}
properties['form'] = {'action':'attribute'}
properties['formaction'] = {'action':'attribute'}
properties['formenctype'] = {'action':'attribute'}
properties['formnovalidate'] = {'action':'attribute', 'type':'bool'}
properties['formtarget'] = {'action':'attribute'}
properties['type'] = {'action':'attribute'}
properties['value'] = {'action':'attribute'}
Factory.addProduct(Button)
class Canvas(Node):
"""
Defines an area of the screen to draw graphic on the fly
"""
__slots__ = ()
tagName = "canvas"
allowsChildren = False
properties = Node.properties.copy()
properties['height'] = {'action':'attribute', 'type':'int'}
properties['width'] = {'action':'attribute', 'type':'int'}
Factory.addProduct(Canvas)
class Caption(Node):
"""
Defines a table caption
"""
__slots__ = ()
tagName = "caption"
Factory.addProduct(Caption)
class Cite(Node):
"""
Defines the title of a work
"""
__slots__ = ()
tagName = "cite"
Factory.addProduct(Cite)
class Code(Node):
"""
Defines a piece of programming code
"""
__slots__ = ()
tagName = "code"
Factory.addProduct(Code)
class Col(Node):
"""
Defines a table column
"""
__slots__ = ()
tagName = "col"
properties = Node.properties.copy()
properties['span'] = {'action':'attribute', 'type':'int'}
Factory.addProduct(Col)
class ColGroup(Node):
"""
Defines a group of one or more columns in a table
"""
__slots__ = ()
tagName = "colgroup"
properties = Node.properties.copy()
properties['span'] = {'action':'attribute', 'type':'int'}
Factory.addProduct(ColGroup)
class Command(Node):
"""
Defines a click-able command button
"""
__slots__ = ()
tagName = "command"
properties = Node.properties.copy()
properties['checked'] = {'action':'attribute', 'type':'bool'}
properties['disabled'] = {'action':'attribute', 'type':'bool'}
properties['icon'] = {'action':'attribute'}
properties['label'] = {'action':'attribute'}
properties['radiogroup'] = {'action':'attribute'}
properties['type'] = {'action':'attribute'}
Factory.addProduct(Command)
class DataList(Node):
"""
Defines a list of pre-defined options for input controls
"""
__slots__ = ()
tagName = "datalist"
Factory.addProduct(DataList)
class DD(Node):
"""
Defines a description of an item in a definition list
"""
__slots__ = ()
tagName = "dd"
Factory.addProduct(DD)
class Del(Node):
"""
Defines text that has been deleted from a document
"""
__slots__ = ()
tagName = "del"
properties = Node.properties.copy()
properties['cite'] = {'action':'attribute'}
properties['datetime'] = {'action':'attribute'}
Factory.addProduct(Del)
class Details(Node):
"""
Defines collapse-able details
"""
__slots__ = ()
tagName = "details"
properties = Node.properties.copy()
properties['open'] = {'action':'attribute'}
Factory.addProduct(Details)
class Dfn(Node):
"""
Defines a definition term
"""
__slots__ = ()
tagName = "dfn"
Factory.addProduct(Dfn)
class Div(Node):
"""
Defines a section of a document
"""
__slots__ = ()
tagName = "div"
Factory.addProduct(Div)
class DL(Node):
"""
Defines a definition list
"""
__slots__ = ()
tagName = "dl"
Factory.addProduct(DL)
class DT(Node):
"""
Defines a term (an item) in a definition list
"""
__slots__ = ()
tagName = "dt"
Factory.addProduct(DT)
class Em(Node):
"""
Defines emphasized text
"""
__slots__ = ()
tagName = "em"
Factory.addProduct(Em)
class Embed(Node):
"""
Defines a container for an external (non-HTML) application
"""
__slots__ = ()
tagName = "embed"
properties = Node.properties.copy()
properties['height'] = {'action':'attribute'}
properties['src'] = {'action':'attribute'}
properties['types'] = {'action':'attribute'}
properties['width'] = {'action':'attribute', 'type':'int'}
Factory.addProduct(Embed)
class FieldSet(Node):
"""
Defines a group of related elements in a form
"""
__slots__ = ()
tagName = "fieldset"
properties = Node.properties.copy()
properties['disabled'] = {'action':'attribute', 'type':'bool'}
properties['form'] = {'action':'attribute'}
Factory.addProduct(FieldSet)
class FigCaption(Node):
"""
Defines a caption for a figure element
"""
__slots__ = ()
tagName = "figcaption"
Factory.addProduct(FigCaption)
class Figure(Node):
"""
Defines self-contained figure content
"""
__slots__ = ()
tagName = "figure"
Factory.addProduct(Figure)
class Footer(Node):
"""
Defines a footer for a document or section
"""
__slots__ = ()
tagName = "footer"
Factory.addProduct(Footer)
class Form(Node):
"""
Defines a form for user input
"""
__slots__ = ()
tagName = "form"
properties = Node.properties.copy()
properties['accept'] = {'action':'attribute'}
properties['accept-charset'] = {'action':'attribute'}
properties['action'] = {'action':'attribute'}
properties['autocomplete'] = {'action':'attribute', 'type':'bool'}
properties['enctype'] = {'action':'attribute'}
properties['method'] = {'action':'attribute'}
properties['name'] = {'action':'attribute'}
properties['novalidate'] = {'action':'attribute'}
properties['target'] = {'action':'attribute'}
Factory.addProduct(Form)
class H(Node):
"""
Defines the abstract concept of an HTML header
"""
__slots__ = ()
class H1(H):
"""
Defines the most important heading
"""
__slots__ = ()
tagName = "h1"
Factory.addProduct(H1)
class H2(H):
"""
Defines the 2nd most important heading
"""
__slots__ = ()
tagName = "h2"
Factory.addProduct(H2)
class H3(H):
"""
Defines the 3rd most important heading
"""
__slots__ = ()
tagName = "h3"
Factory.addProduct(H3)
class H4(H):
"""
Defines the 4th most important heading
"""
__slots__ = ()
tagName = "h4"
Factory.addProduct(H4)
class H5(H):
"""
Defines the 5th most important heading
"""
__slots__ = ()
tagName = "h5"
Factory.addProduct(H5)
class H6(H):
"""
Defines the least important heading
"""
__slots__ = ()
tagName = "h6"
Factory.addProduct(H6)
class Head(Node):
"""
Defines information about the document
"""
__slots__ = ()
tagName = "head"
Factory.addProduct(Head)
class Header(Node):
"""
Defines a header for a document or section
"""
__slots__ = ()
tagName = "header"
Factory.addProduct(Header)
class HGroup(Node):
"""
Defines a grouping of multiple header elements
"""
__slots__ = ()
tagName = "hgroup"
Factory.addProduct(HGroup)
class HR(Node):
"""
Defines a thematic change in the content horizontally
"""
__slots__ = ()
tagName = "hr"
tagSelfCloses = True
allowsChildren = False
Factory.addProduct(HR)
class HTML(Node):
"""
Defines the root of an HTML document
"""
__slots__ = ()
tagName = "html"
properties = Node.properties.copy()
properties['manifest'] = {'action':'attribute'}
Factory.addProduct(HTML)
class I(Node):
"""
Defines text that is in an alternate voice or mood
NOTE: avoid using this element, when possible use elements that describe the content
instead of the appearance
"""
__slots__ = ()
tagName = "i"
Factory.addProduct(I)
class IFrame(Node):
"""
Defines an inline frame
"""
__slots__ = ()
tagName = "iframe"
properties = Node.properties.copy()
properties['sandbox'] = {'action':'attribute'}
properties['seamless'] = {'action':'attribute', 'type':'bool'}
properties['src'] = {'action':'attribute'}
properties['srcdoc'] = {'action':'attribute'}
properties['width'] = {'action':'attribute', 'type':'int'}
properties['frameborder'] = {'action':'attribute'}
Factory.addProduct(IFrame)
class Img(Node):
"""
Defines an image
"""
__slots__ = ()
tagName = "img"
tagSelfCloses = True
allowsChildren = False
properties = Node.properties.copy()
properties['src'] = {'action':'setImage'}
properties['alt'] = {'action':'attribute'}
properties['crossorigin'] = {'action':'attribute'}
properties['ismap'] = {'action':'attribute', 'type':'bool'}
properties['width'] = {'action':'attribute', 'type':'int'}
properties['height'] = {'action':'attribute', 'type':'int'}
def setImage(self, image):
self.attributes['src'] = Settings.STATIC_URL + image
def image(self):
return self.attributes['src'].replace(Settings.STATIC_URL, "")
Factory.addProduct(Img)
class Input(Node):
"""
Defines an input control
"""
__slots__ = ()
tagName = "input"
tagSelfCloses = True
allowsChildren = False
properties = Node.properties.copy()
properties['accept'] = {'action':'attribute'}
properties['alt'] = {'action':'attribute'}
properties['autocomplete'] = {'action':'attribute', 'type':'bool'}
properties['autofocus'] = {'action':'attribute', 'type':'bool'}
properties['checked'] = {'action':'attribute', 'type':'bool'}
properties['disabled'] = {'action':'attribute', 'type':'bool'}
properties['form'] = {'action':'attribute'}
properties['formaction'] = {'action':'attribute'}
properties['formenctype'] = {'action':'attribute'}
properties['formmethod'] = {'action':'attribute'}
properties['formnovalidate'] = {'action':'attribute'}
properties['formtarget'] = {'action':'attribute'}
properties['height'] = {'action':'attribute', 'type':'int'}
properties['list'] = {'action':'attribute'}
properties['max'] = {'action':'attribute'}
properties['maxlength'] = {'action':'attribute', 'type':'int'}
properties['min'] = {'action':'attribute'}
properties['multiple'] = {'action':'attribute', 'type':'bool'}
properties['pattern'] = {'action':'attribute'}
properties['placeholder'] = {'action':'attribute'}
properties['readonly'] = {'action':'attribute', 'type':'bool'}
properties['required'] = {'action':'attribute', 'type':'bool'}
properties['size'] = {'action':'attribute', 'type':'int'}
properties['src'] = {'action':'attribute'}
properties['step'] = {'action':'attribute', 'type':'int'}
properties['type'] = {'action':'attribute'}
properties['value'] = {'action':'attribute'}
properties['width'] = {'action':'attribute', 'type':'int'}
Factory.addProduct(Input)
class Ins(Node):
"""
Defines text that has been inserted into a document
"""
__slots__ = ()
tagName = "ins"
properties = Node.properties.copy()
properties['cite'] = {'action':'attribute'}
properties['datetime'] = {'action':'attribute'}
Factory.addProduct(Ins)
class Kbd(Node):
"""
Defines keyboard input
"""
__slots__ = ()
tagName = "kbd"
Factory.addProduct(Kbd)
class KeyGen(Node):
"""
Defines a key-pair generator field
"""
__slots__ = ()
tagName = "keygen"
properties = Node.properties.copy()
properties['autofocus'] = {'action':'attribute', 'type':'bool'}
properties['challenge'] = {'action':'attribute', 'type':'bool'}
properties['disabled'] = {'action':'attribute', 'type':'bool'}
properties['form'] = {'action':'attribute'}
properties['keytype'] = {'action':'attribute'}
properties['name'] = {'action':'attribute'}
Factory.addProduct(KeyGen)
class Label(Node):
"""
Defines a label for an input element
"""
__slots__ = ()
tagName = "label"
properties = Node.properties.copy()
properties['for'] = {'action':'attribute'}
properties['form'] = {'action':'attribute'}
Factory.addProduct(Label)
class Legend(Node):
"""
Defines a caption for a fieldset, figure or details element
"""
__slots__ = ()
tagName = "legend"
Factory.addProduct(Legend)
class LI(Node):
"""
Defines a list item
"""
__slots__ = ()
tagName = "li"
properties = Node.properties.copy()
properties['value'] = {'action':'attribute', 'type':'int'}
Factory.addProduct(LI)
class Link(Node):
"""
Defines the relationship between a document an external resource
"""
__slots__ = ()
tagName = "link"
tagSelfCloses = True
allowsChildren = False
properties = Node.properties.copy()
properties['charset'] = {'action':'attribute'}
properties['src'] = {'action':'setSource'}
properties['href'] = {'action':'setHref'}
properties['hreflang'] = {'action':'attribute'}
properties['media'] = {'action':'attribute'}
properties['rel'] = {'action':'attribute'}
properties['type'] = {'action':'attribute'}
properties['sizes'] = {'action':'attribute'}
def setHref(self, href):
self.attributes['href'] = Settings.STATIC_URL + href
def href(self):
return self.attributes['href'].replace(Settings.STATIC_URL, "")
def setSource(self, source):
self.attributes['src'] = Settings.STATIC_URL + source
def source(self, source):
return self.attributes['src'].replace(Settings.STATIC_URL, "")
Factory.addProduct(Link)
class Map(Node):
"""
Defines a client side image map
"""
__slots__ = ()
tagName = "map"
Factory.addProduct(Map)
class Mark(Node):
"""
Defines marked / highlighted text
"""
__slots__ = ()
tagName = "mark"
Factory.addProduct(Mark)
class Meta(Node):
"""
Defines metadata about an HTML document
"""
__slots__ = ()
tagName = "meta"
tagSelfCloses = True
allowsChildren = False
properties = Node.properties.copy()
properties['charset'] = {'action':'attribute'}
properties['content'] = {'action':'attribute'}
properties['http-equiv'] = {'action':'attribute'}
Factory.addProduct(Meta)
class Meter(Node):
"""
Defines a scalar measurement within a known range
"""
__slots__ = ()
tagName = "meter"
properties = Node.properties.copy()
properties['form'] = {'action':'attribute'}
properties['high'] = {'action':'attribute', 'type':'int'}
properties['low'] = {'action':'attribute', 'type':'int'}
properties['max'] = {'action':'attribute', 'type':'int'}
properties['min'] = {'action':'attribute', 'type':'int'}
properties['optimum'] = {'action':'attribute', 'type':'int'}
properties['value'] = {'action':'attribute', 'type':'int'}
Factory.addProduct(Meter)
class Nav(Node):
"""
Defines navigation links
"""
__slots__ = ()
tagName = "nav"
Factory.addProduct(Nav)
class NoScript(Node):
"""
Defines alternate content for users that do not support client side scripts
"""
__slots__ = ()
tagName = "noscript"
Factory.addProduct(NoScript)
class Object(Node):
"""
Defines an embedded object
"""
__slots__ = ()
tagName = "object"
properties = Node.properties.copy()
properties['form'] = {'action':'attribute'}
properties['height'] = {'action':'attribute', 'type':'int'}
properties['type'] = {'action':'attribute'}
properties['usemap'] = {'action':'attribute'}
properties['width'] = {'action':'attribute', 'type':'int'}
Factory.addProduct(Object)
class OL(Node):
"""
Defines an ordered list
"""
__slots__ = ()
tagName = "ol"
properties = Node.properties.copy()
properties['reversed'] = {'action':'attribute', 'type':'bool'}
properties['start'] = {'action':'attribute', 'type':'int'}
properties['type'] = {'action':'attribute'}
Factory.addProduct(OL)
class OptGroup(Node):
"""
Defines a group of related options in a drop-down list
"""
__slots__ = ()
tagName = "optgroup"
properties = Node.properties.copy()
properties['disabled'] = {'action':'attribute', 'type':'bool'}
properties['label'] = {'action':'attribute'}
Factory.addProduct(OptGroup)
class Option(Node):
"""
Defines an option in a drop-down list
"""
__slots__ = ()
tagName = "option"
properties = Node.properties.copy()
properties['disabled'] = {'action':'attribute', 'type':'bool'}
properties['label'] = {'action':'attribute'}
properties['selected'] = {'action':'attribute', 'type':'bool'}
properties['value'] = {'action':'attribute'}
Factory.addProduct(Option)
class Output(Node):
"""
Defines the result of a calculation
"""
__slots__ = ()
tagName = "output"
properties = Node.properties.copy()
properties['for'] = {'action':'attribute'}
properties['form'] = {'action':'attribute'}
Factory.addProduct(Output)
class P(Node):
"""
Defines a paragraph
"""
__slots__ = ()
tagName = "p"
Factory.addProduct(P)
class Param(Node):
"""
Defines a parameter for an object
"""
__slots__ = ()
tagName = "param"
tagSelfCloses = True
allowsChildren = False
properties = Node.properties.copy()
properties['value'] = {'action':'attribute'}
Factory.addProduct(Param)
class Pre(Node):
"""
Defines pre formatted text
"""
__slots__ = ()
tagName = "pre"
Factory.addProduct(Pre)
class Progress(Node):
"""
Defines the progress of a task
"""
__slots__ = ()
tagName = "progress"
properties = Node.properties.copy()
properties['max'] = {'action':'attribute', 'type':'int'}
properties['value'] = {'action':'attribute', 'type':'int'}
Factory.addProduct(Progress)
class Q(Node):
"""
Defines a short quotation
"""
__slots__ = ()
tagName = "q"
properties = Node.properties.copy()
properties['cite'] = {'action':'attribute'}
Factory.addProduct(Q)
class RP(Node):
"""
Defines what to show in browsers that do not support ruby annotations
"""
__slots__ = ()
tagName = "rp"
Factory.addProduct(RP)
class RT(Node):
"""
Defines an explanation / pronunciation of characters (for East Asian typography)
"""
__slots__ = ()
tagName = "rt"
Factory.addProduct(RT)
class Ruby(Node):
"""
Defines ruby annotations (for East Asian typography)
"""
__slots__ = ()
tagName = "ruby"
Factory.addProduct(Ruby)
class S(Node):
"""
Defines text that is no longer correct
"""
__slots__ = ()
tagName = "s"
Factory.addProduct(S)
class Samp(Node):
"""
Defines sample output from a computer program
"""
__slots__ = ()
tagName = "samp"
Factory.addProduct(Samp)
class Script(Node):
"""
Defines a client-side script
"""
__slots__ = ()
tagName = "script"
properties = Node.properties.copy()
properties['async'] = {'action':'attribute', 'type':'bool'}
properties['defer'] = {'action':'attribute', 'type':'bool'}
properties['type'] = {'action':'attribute'}
properties['charset'] = {'action':'attribute'}
properties['src'] = {'action':'setScriptFile'}
def setScriptFile(self, scriptFile):
self.attributes['src'] = Settings.STATIC_URL + scriptFile
def scriptFile(self):
return self.attributes['src'].replace(Settings.STATIC_URL, "")
Factory.addProduct(Script)
class Section(Node):
"""
Defines a section of the document
"""
__slots__ = ()
tagName = "section"
Factory.addProduct(Section)
class Select(Node):
"""
Defines a drop-down list
"""
__slots__ = ()
tagName = "select"
properties = Node.properties.copy()
properties['autofocus'] = {'action':'attribute', 'type':'bool'}
properties['disabled'] = {'action':'attribute', 'type':'bool'}
properties['form'] = {'action':'attribute'}
properties['multiple'] = {'action':'attribute', 'type':'bool'}
properties['size'] = {'action':'attribute', 'type':'int'}
Factory.addProduct(Select)
class Small(Node):
"""
Defines smaller text
NOTE: avoid using this element, when possible use elements that describe the content
instead of the appearance
"""
__slots__ = ()
tagName = "small"
Factory.addProduct(Small)
class Source(Node):
"""
Defines multiple media resources for media elements
"""
__slots__ = ()
tagName = "source"
properties = Node.properties.copy()
properties['media'] = {'action':'attribute'}
properties['src'] = {'action':'attribute'}
properties['type'] = {'action':'attribute'}
Factory.addProduct(Source)
class Span(Node):
"""
Defines a section in a document
"""
__slots__ = ()
tagName = "span"
Factory.addProduct(Span)
class Strong(Node):
"""
Defines important text
"""
__slots__ = ()
tagName = "strong"
Factory.addProduct(Strong)
class Style(Node):
"""
Defines style information for a document
"""
__slots__ = ()
tagName = "style"
properties = Node.properties.copy()
properties['media'] = {'action':'attribute'}
properties['scoped'] = {'action':'attribute', 'type':'bool'}
properties['type'] = {'action':'attribute'}
Factory.addProduct(Style)
class Sub(Node):
"""
Defines sub-scripted text
"""
__slots__ = ()
tagName = "sub"
Factory.addProduct(Sub)
class Summary(Node):
"""
Defines a visible heading for a details element
"""
__slots__ = ()
tagName = "summary"
Factory.addProduct(Summary)
class Sup(Node):
"""
Defines super-scripted text
"""
__slots__ = ()
tagName = "sup"
Factory.addProduct(Sup)
class Table(Node):
"""
Defines a table - should be used for tables of data only (not for layout)
"""
__slots__ = ()
tagName = "table"
properties = Node.properties.copy()
properties['border'] = {'action':'attribute', 'type':'bool'}
Factory.addProduct(Table)
class TBody(Node):
"""
Defines a group of content within a table
"""
__slots__ = ()
tagName = "tbody"
Factory.addProduct(TBody)
class TD(Node):
"""
Defines a table cell
"""
__slots__ = ()
tagName = "td"
properties = Node.properties.copy()
properties['colspan'] = {'action':'attribute', 'type':'number'}
properties['headers'] = {'action':'attribute'}
properties['rowspan'] = {'action':'attribute', 'type':'number'}
Factory.addProduct(TD)
class TextArea(Node):
"""
Defines multi-line text input
"""
__slots__ = ()
tagName = "textarea"
properties = Node.properties.copy()
properties['autofocus'] = {'action':'attribute', 'type':'bool'}
properties['cols'] = {'action':'attribute', 'type':'int'}
properties['disabled'] = {'action':'attribute', 'type':'bool'}
properties['form'] = {'action':'attribute'}
properties['maxlength'] = {'action':'attribute', 'type':'int'}
properties['placeholder'] = {'action':'attribute'}
properties['readonly'] = {'action':'attribute', 'type':'bool'}
properties['required'] = {'action':'attribute', 'type':'bool'}
properties['rows'] = {'action':'attribute', 'type':'int'}
properties['wrap'] = {'action':'attribute'}
Factory.addProduct(TextArea)
class TFoot(Node):
"""
Defines the footer of a table
"""
__slots__ = ()
tagName = "tfoot"
Factory.addProduct(TFoot)
class TH(Node):
"""
Defines the header cell within a table
"""
__slots__ = ()
tagName = "th"
properties = Node.properties.copy()
properties['colspan'] = {'action':'attribute', 'type':'int'}
properties['headers'] = {'action':'attribute'}
properties['rowspan'] = {'action':'attribute', 'type':'int'}
properties['scope'] = {'action':'attribute'}
Factory.addProduct(TH)
class THead(Node):
"""
Defines header content within a table
"""
__slots__ = ()
tagName = "thead"
Factory.addProduct(THead)
class Time(Node):
"""
Defines a date / time
"""
__slots__ = ()
tagName = "time"
properties = Node.properties.copy()
properties['datetime'] = {'action':'attribute'}
properties['pubdate'] = {'action':'attribute'}
Factory.addProduct(Time)
class Title(Node):
"""
Defines the title of a document
"""
__slots__ = ()
tagName = "title"
Factory.addProduct(Title)
class TR(Node):
"""
Defines a table row
"""
__slots__ = ()
tagName = "tr"
Factory.addProduct(TR)
class Track(Node):
"""
Defines text tracks for media elements
"""
__slots__ = ()
tagName = "track"
properties = Node.properties.copy()
properties['default'] = {'action':'attribute', 'type':'bool'}
properties['kind'] = {'action':'attribute'}
properties['label'] = {'action':'attribute'}
properties['src'] = {'action':'attribute'}
properties['srclang'] = {'action':'attribute'}
Factory.addProduct(Track)
class U(Node):
"""
Defines text that should be stylistically different from normal text
NOTE: avoid using this element, when possible use elements that describe the content
instead of the appearance
"""
__slots__ = ()
tagName = "u"
Factory.addProduct(U)
class UL(Node):
"""
Defines an unordered list
"""
__slots__ = ()
tagName = "ul"
Factory.addProduct(UL)
class Var(Node):
"""
Defines a variable
"""
__slots__ = ()
tagName = "var"
Factory.addProduct(Var)
class Video(Node):
"""
Defines a video or movie
"""
__slots__ = ()
tagName = "video"
properties = Node.properties.copy()
properties['autoplay'] = {'action':'attribute', 'type':'bool'}
properties['controls'] = {'action':'attribute', 'type':'bool'}
properties['height'] = {'action':'attribute', 'type':'int'}
properties['loop'] = {'action':'attribute', 'type':'bool'}
properties['muted'] = {'action':'attribute', 'type':'bool'}
properties['poster'] = {'action':'attribute'}
properties['preload'] = {'action':'attribute'}
properties['src'] = {'action':'attribute'}
properties['width'] = {'action':'attribute', 'type':'int'}
Factory.addProduct(Video)
class Wbr(Node):
"""
Defines a possible line-break
"""
__slots__ = ()
tagName = "wbr"
Factory.addProduct(Wbr)
| gpl-2.0 |
wdzhou/mantid | Framework/PythonInterface/plugins/algorithms/CalibrateRectangularDetectors.py | 1 | 26300 | #pylint: disable=no-init,invalid-name
from __future__ import (absolute_import, division, print_function)
from mantid.api import *
from mantid.kernel import *
from mantid.simpleapi import *
import os
from time import strftime
from mantid.kernel import Direction
COMPRESS_TOL_TOF = .01
EXTENSIONS_NXS = ["_event.nxs", ".nxs.h5"]
def getBasename(filename):
name = os.path.split(filename)[-1]
for extension in EXTENSIONS_NXS:
name = name.replace(extension, '')
return name
#pylint: disable=too-many-instance-attributes
class CalibrateRectangularDetectors(PythonAlgorithm):
_filterBadPulses = None
_xpixelbin = None
_ypixelbin = None
_grouping = None
_smoothoffsets = None
_smoothGroups = None
_peakpos = None
_peakpos1 = None
_peakmin = None
_peakmax = None
_peakpos2 = None
_peakmin2 = None
_peakmax2 = None
_peakpos3 = None
_peakmin3 = None
_peakmax3 = None
_lastpixel = None
_lastpixel2 = None
_lastpixel3 = None
_ccnumber = None
_maxoffset = None
_diffractionfocus = None
_outDir = None
_outTypes = None
_binning = None
def category(self):
return "Diffraction\\Calibration"
def name(self):
return "CalibrateRectangularDetectors"
def summary(self):
return "Calibrate the detector pixels and write a calibration file"
def PyInit(self):
self.declareProperty(MultipleFileProperty(name="RunNumber",
extensions=EXTENSIONS_NXS),
"Event file")
validator = IntArrayBoundedValidator()
validator.setLower(0)
self.declareProperty(IntArrayProperty("Background", values=[0], direction=Direction.Input,
validator=validator))
self.declareProperty("XPixelSum", 1,
"Sum detector pixels in X direction. Must be a factor of X total pixels. Default is 1.")
self.declareProperty("YPixelSum", 1,
"Sum detector pixels in Y direction. Must be a factor of Y total pixels. Default is 1.")
self.declareProperty("SmoothSummedOffsets", False,
"If the data was summed for calibration, smooth the resulting offsets workspace.")
self.declareProperty("SmoothGroups", "",
"Comma delimited number of points for smoothing pixels in each group. Default is no Smoothing.")
self.declareProperty("UnwrapRef", 0.,
"Reference total flight path for frame unwrapping. Zero skips the correction")
self.declareProperty("LowResRef", 0.,
"Reference DIFC for resolution removal. Zero skips the correction")
self.declareProperty("MaxOffset", 1.0,
"Maximum absolute value of offsets; default is 1")
self.declareProperty("CrossCorrelation", True,
"CrossCorrelation if True; minimize using many peaks if False.")
validator = FloatArrayBoundedValidator()
validator.setLower(0.)
self.declareProperty(FloatArrayProperty("PeakPositions", []),
"Comma delimited d-space positions of reference peaks. Use 1-3 for Cross Correlation. "+
"Unlimited for many peaks option.")
self.declareProperty("PeakWindowMax", 0.,
"Maximum window around a peak to search for it. Optional.")
self.declareProperty(ITableWorkspaceProperty("FitwindowTableWorkspace", "", Direction.Input, PropertyMode.Optional),
"Name of input table workspace containing the fit window information for each spectrum. ")
self.declareProperty("MinimumPeakHeight", 2., "Minimum value allowed for peak height")
self.declareProperty("MinimumPeakHeightObs", 0.,
"Minimum value of a peak's maximum observed Y value for this peak to be used to calculate offset.")
self.declareProperty(MatrixWorkspaceProperty("DetectorResolutionWorkspace", "", Direction.Input, PropertyMode.Optional),
"Name of optional input matrix workspace for each detector's resolution (D(d)/d).")
self.declareProperty(FloatArrayProperty("AllowedResRange", [0.25, 4.0], direction=Direction.Input),
"Range of allowed individual peak's resolution factor to input detector's resolution.")
self.declareProperty("PeakFunction", "Gaussian", StringListValidator(["BackToBackExponential", "Gaussian", "Lorentzian"]),
"Type of peak to fit. Used only with CrossCorrelation=False")
self.declareProperty("BackgroundType", "Flat", StringListValidator(['Flat', 'Linear', 'Quadratic']),
"Used only with CrossCorrelation=False")
self.declareProperty(IntArrayProperty("DetectorsPeaks", []),
"Comma delimited numbers of detector banks for each peak if using 2-3 peaks for Cross Correlation. "+
"Default is all.")
self.declareProperty("PeakHalfWidth", 0.05,
"Half width of d-space around peaks for Cross Correlation. Default is 0.05")
self.declareProperty("CrossCorrelationPoints", 100,
"Number of points to find peak from Cross Correlation. Default is 100")
self.declareProperty(FloatArrayProperty("Binning", [0.,0.,0.]),
"Min, Step, and Max of d-space bins. Logarithmic binning is used if Step is negative.")
self.declareProperty("DiffractionFocusWorkspace", False, "Diffraction focus by detectors. Default is False")
grouping = ["All", "Group", "Column", "bank"]
self.declareProperty("GroupDetectorsBy", "All", StringListValidator(grouping),
"Detector groups to use for future focussing: All detectors as one group, "+
"Groups (East,West for SNAP), Columns for SNAP, detector banks")
self.declareProperty("FilterBadPulses", True, "Filter out events measured while proton charge is more than 5% below average")
self.declareProperty("FilterByTimeMin", 0.,
"Relative time to start filtering by in seconds. Applies only to sample.")
self.declareProperty("FilterByTimeMax", 0.,
"Relative time to stop filtering by in seconds. Applies only to sample.")
outfiletypes = ['dspacemap', 'calibration', 'dspacemap and calibration']
self.declareProperty("SaveAs", "calibration", StringListValidator(outfiletypes))
self.declareProperty(FileProperty("OutputDirectory", "", FileAction.Directory))
self.declareProperty("OutputFilename", "", Direction.Output)
return
def validateInputs(self):
"""
Validate inputs
:return:
"""
messages = {}
detectors = self.getProperty("DetectorsPeaks").value
if self.getProperty("CrossCorrelation").value:
positions = self.getProperty("PeakPositions").value
if len(detectors) <= 1:
if len(positions) != 1:
messages["PeakPositions"] = "Can only have one cross correlation peak without " \
"specifying 'DetectorsPeaks'"
else:
if len(detectors) != len(positions):
messages["PeakPositions"] = "Must be the same length as 'DetectorsPeaks' (%d != %d)" \
% (len(positions), len(detectors))
messages["DetectorsPeaks"] = "Must be the same length as 'PeakPositions' or empty"
elif len(detectors) > 3:
messages["DetectorsPeaks"] = "Up to 3 peaks are supported"
elif bool(detectors):
messages["DetectorsPeaks"] = "Only allowed for CrossCorrelation=True"
return messages
def _loadData(self, filename, filterWall=None):
if filename is None or len(filename) <= 0:
return None
kwargs = {"Precount":False}
if filterWall is not None:
if filterWall[0] > 0.:
kwargs["FilterByTimeStart"] = filterWall[0]
if filterWall[1] > 0.:
kwargs["FilterByTimeStop"] = filterWall[1]
wkspName = getBasename(filename)
LoadEventNexus(Filename=filename, OutputWorkspace=wkspName, **kwargs)
FilterBadPulses(InputWorkspace=wkspName, OutputWorkspace=wkspName)
CompressEvents(InputWorkspace=wkspName, OutputWorkspace=wkspName,
Tolerance=COMPRESS_TOL_TOF) # 100ns
return wkspName
def _saveCalibration(self, wkspName, calibFilePrefix):
outfilename = None
if "dspacemap" in self._outTypes:
outfilename = calibFilePrefix.replace('_d', '_dspacemap_d') + '.dat'
if os.path.exists(outfilename):
os.unlink(outfilename)
#write Dspacemap file
SaveDspacemap(InputWorkspace=wkspName+"offset",
DspacemapFile=outfilename)
if "calibration" in self._outTypes:
# for the sake of legacy
SaveCalFile(OffsetsWorkspace=wkspName+"offset",
GroupingWorkspace=wkspName+"group",
MaskWorkspace=wkspName+"mask",Filename=calibFilePrefix + '.cal')
# the real version
outfilename = calibFilePrefix + '.h5'
if os.path.exists(outfilename):
os.unlink(outfilename)
ConvertDiffCal(OffsetsWorkspace=wkspName+"offset",
OutputWorkspace=wkspName+"cal")
SaveDiffCal(CalibrationWorkspace=wkspName+"cal",
GroupingWorkspace=wkspName+"group",
MaskWorkspace=wkspName+"mask",
Filename=outfilename)
if outfilename is not None:
self.setProperty("OutputFilename", outfilename)
def _createGrouping(self, wkspName):
(_, numGroupedSpectra, numGroups) = CreateGroupingWorkspace(InputWorkspace=wkspName,
GroupDetectorsBy=self._grouping,
OutputWorkspace=wkspName+"group")
if (numGroupedSpectra==0) or (numGroups==0):
raise RuntimeError("%d spectra will be in %d groups" % (numGroupedSpectra, numGroups))
#pylint: disable=too-many-branches
def _cccalibrate(self, wksp):
if wksp is None:
return None
# Bin events in d-Spacing
Rebin(InputWorkspace=wksp, OutputWorkspace=wksp,
Params=str(self._peakmin)+","+str(abs(self._binning[1]))+","+str(self._peakmax))
#Find good peak for reference
ymax = 0
for s in range(0,mtd[wksp].getNumberHistograms()):
y_s = mtd[wksp].readY(s)
midBin = int(mtd[wksp].blocksize()/2)
if y_s[midBin] > ymax:
refpixel = s
ymax = y_s[midBin]
self.log().information("Reference spectra=%s" % refpixel)
# Cross correlate spectra using interval around peak at peakpos (d-Spacing)
if self._lastpixel == 0:
self._lastpixel = mtd[wksp].getNumberHistograms()-1
else:
self._lastpixel = int(mtd[wksp].getNumberHistograms()*self._lastpixel/self._lastpixel3) - 1
self.log().information("Last pixel=%s" % self._lastpixel)
CrossCorrelate(InputWorkspace=wksp, OutputWorkspace=wksp+"cc",
ReferenceSpectra=refpixel, WorkspaceIndexMin=0,
WorkspaceIndexMax=self._lastpixel,
XMin=self._peakmin, XMax=self._peakmax)
# Get offsets for pixels using interval around cross correlations center and peak at peakpos (d-Spacing)
GetDetectorOffsets(InputWorkspace=wksp+"cc", OutputWorkspace=wksp+"offset",
Step=abs(self._binning[1]), DReference=self._peakpos1,
XMin=-self._ccnumber, XMax=self._ccnumber,
MaxOffset=self._maxoffset, MaskWorkspace=wksp+"mask")
if AnalysisDataService.doesExist(wksp+"cc"):
AnalysisDataService.remove(wksp+"cc")
if self._peakpos2 > 0.0:
Rebin(InputWorkspace=wksp, OutputWorkspace=wksp,
Params=str(self._peakmin2)+","+str(abs(self._binning[1]))+","+str(self._peakmax2))
#Find good peak for reference
ymax = 0
for s in range(0,mtd[wksp].getNumberHistograms()):
y_s = mtd[wksp].readY(s)
midBin = int(mtd[wksp].blocksize()/2)
if y_s[midBin] > ymax:
refpixel = s
ymax = y_s[midBin]
msg = "Reference spectra = %s, lastpixel_3 = %s" % (refpixel, self._lastpixel3)
self.log().information(msg)
self._lastpixel2 = int(mtd[wksp].getNumberHistograms()*self._lastpixel2/self._lastpixel3) - 1
CrossCorrelate(InputWorkspace=wksp, OutputWorkspace=wksp+"cc2",
ReferenceSpectra=refpixel, WorkspaceIndexMin=self._lastpixel+1,
WorkspaceIndexMax=self._lastpixel2,
XMin=self._peakmin2, XMax=self._peakmax2)
# Get offsets for pixels using interval around cross correlations center and peak at peakpos (d-Spacing)
GetDetectorOffsets(InputWorkspace=wksp+"cc2", OutputWorkspace=wksp+"offset2",
Step=abs(self._binning[1]), DReference=self._peakpos2,
XMin=-self._ccnumber, XMax=self._ccnumber,
MaxOffset=self._maxoffset, MaskWorkspace=wksp+"mask2")
Plus(LHSWorkspace=wksp+"offset", RHSWorkspace=wksp+"offset2",
OutputWorkspace=wksp+"offset")
Plus(LHSWorkspace=wksp+"mask", RHSWorkspace=wksp+"mask2",
OutputWorkspace=wksp+"mask")
for ws in [wksp+"cc2", wksp+"offset2", wksp+"mask2"]:
if AnalysisDataService.doesExist(ws):
AnalysisDataService.remove(ws)
if self._peakpos3 > 0.0:
Rebin(InputWorkspace=wksp, OutputWorkspace=wksp,
Params=str(self._peakmin3)+","+str(abs(self._binning[1]))+","+str(self._peakmax3))
#Find good peak for reference
ymax = 0
for s in range(0,mtd[wksp].getNumberHistograms()):
y_s = mtd[wksp].readY(s)
midBin = mtd[wksp].blocksize()/2
if y_s[midBin] > ymax:
refpixel = s
ymax = y_s[midBin]
self.log().information("Reference spectra=%s" % refpixel)
CrossCorrelate(InputWorkspace=wksp, OutputWorkspace=wksp+"cc3",
ReferenceSpectra=refpixel,
WorkspaceIndexMin=self._lastpixel2+1,
WorkspaceIndexMax=mtd[wksp].getNumberHistograms()-1,
XMin=self._peakmin3, XMax=self._peakmax3)
# Get offsets for pixels using interval around cross correlations center and peak at peakpos (d-Spacing)
GetDetectorOffsets(InputWorkspace=wksp+"cc3", OutputWorkspace=wksp+"offset3",
Step=abs(self._binning[1]), DReference=self._peakpos3,
XMin=-self._ccnumber, XMax=self._ccnumber,
MaxOffset=self._maxoffset, MaskWorkspace=wksp+"mask3")
Plus(LHSWorkspace=wksp+"offset", RHSWorkspace=wksp+"offset3",
OutputWorkspace=str(wksp)+"offset")
Plus(LHSWorkspace=wksp+"mask", RHSWorkspace=wksp+"mask3",
OutputWorkspace=wksp+"mask")
for ws in [wksp+"cc3", wksp+"offset3", wksp+"mask3"]:
if AnalysisDataService.doesExist(ws):
AnalysisDataService.remove(ws)
return str(wksp)
#pylint: disable=too-many-branches
def _multicalibrate(self, wksp):
if wksp is None:
return None
# Bin events in d-Spacing
Rebin(InputWorkspace=wksp, OutputWorkspace=wksp,
Params=str(self._binning[0])+","+str((self._binning[1]))+","+str(self._binning[2]))
if len(self._smoothGroups) > 0:
SmoothData(InputWorkspace=wksp, OutputWorkspace=wksp,
NPoints=self._smoothGroups, GroupingWorkspace=wksp+"group")
# Get the fit window input workspace
fitwinws = self.getProperty("FitwindowTableWorkspace").value
# Set up resolution workspace
resws = self.getProperty("DetectorResolutionWorkspace").value
if resws is not None:
resrange = self.getProperty("AllowedResRange").value
if len(resrange) < 2:
raise NotImplementedError("With input of 'DetectorResolutionWorkspace', "+
"number of allowed resolution range must be equal to 2.")
reslowf = resrange[0]
resupf = resrange[1]
if reslowf >= resupf:
raise NotImplementedError("Allowed resolution range factor, lower boundary "+
"(%f) must be smaller than upper boundary (%f)."
% (reslowf, resupf))
else:
reslowf = 0.0
resupf = 0.0
# Get offsets for pixels using interval around cross correlations center and peak at peakpos (d-Spacing)
GetDetOffsetsMultiPeaks(InputWorkspace=wksp, OutputWorkspace=wksp+"offset",
DReference=self._peakpos,
FitWindowMaxWidth=self.getProperty("PeakWindowMax").value,
MinimumPeakHeight=self.getProperty("MinimumPeakHeight").value,
MinimumPeakHeightObs=self.getProperty("MinimumPeakHeightObs").value,
BackgroundType=self.getProperty("BackgroundType").value,
MaxOffset=self._maxoffset, NumberPeaksWorkspace=wksp+"peaks",
MaskWorkspace=wksp+"mask",
FitwindowTableWorkspace = fitwinws,
InputResolutionWorkspace=resws,
MinimumResolutionFactor = reslowf,
MaximumResolutionFactor = resupf)
#Fixed SmoothNeighbours for non-rectangular and rectangular
if self._smoothoffsets and self._xpixelbin*self._ypixelbin>1: # Smooth data if it was summed
SmoothNeighbours(InputWorkspace=wksp+"offset", OutputWorkspace=wksp+"offset",
WeightedSum="Flat",
AdjX=self._xpixelbin, AdjY=self._ypixelbin)
Rebin(InputWorkspace=wksp, OutputWorkspace=wksp,
Params=str(self._binning[0])+","+str((self._binning[1]))+","+str(self._binning[2]))
return str(wksp)
def _focus(self, wksp):
if wksp is None:
return None
MaskDetectors(Workspace=wksp, MaskedWorkspace=str(wksp)+"mask")
wksp = AlignDetectors(InputWorkspace=wksp, OutputWorkspace=wksp,
CalibrationWorkspace=str(wksp)+"cal")
# Diffraction focusing using new calibration file with offsets
if self._diffractionfocus:
wksp = DiffractionFocussing(InputWorkspace=wksp, OutputWorkspace=wksp,
GroupingWorkspace=str(wksp)+"group")
wksp = Rebin(InputWorkspace=wksp, OutputWorkspace=wksp, Params=self._binning)
return wksp
def _initCCpars(self):
self._peakpos1 = self._peakpos[0]
self._peakpos2 = 0
self._peakpos3 = 0
self._lastpixel = 0
self._lastpixel2 = 0
self._lastpixel3 = 0
peakhalfwidth = self.getProperty("PeakHalfWidth").value
self._peakmin = self._peakpos1-peakhalfwidth
self._peakmax = self._peakpos1+peakhalfwidth
if len(self._peakpos) >= 2:
self._peakpos2 = self._peakpos[1]
self._peakmin2 = self._peakpos2-peakhalfwidth
self._peakmax2 = self._peakpos2+peakhalfwidth
if len(self._peakpos) >= 3:
self._peakpos3 = self._peakpos[2]
self._peakmin3 = self._peakpos3-peakhalfwidth
self._peakmax3 = self._peakpos3+peakhalfwidth
detectors = self.getProperty("DetectorsPeaks").value
if len(detectors) == 0:
detectors = [0]
if detectors[0]:
self._lastpixel = int(detectors[0])
self._lastpixel3 = self._lastpixel
if len(detectors) >= 2:
self._lastpixel2 = self._lastpixel+int(detectors[1])
self._lastpixel3 = self._lastpixel2
if len(detectors) >= 3:
self._lastpixel3 = self._lastpixel2+int(detectors[2])
self._ccnumber = self.getProperty("CrossCorrelationPoints").value
#pylint: disable=too-many-branches
def PyExec(self):
# get generic information
self._binning = self.getProperty("Binning").value
if len(self._binning) != 1 and len(self._binning) != 3:
raise RuntimeError("Can only specify (width) or (start,width,stop) for binning. Found %d values." % len(self._binning))
if len(self._binning) == 3:
if self._binning[0] == 0. and self._binning[1] == 0. and self._binning[2] == 0.:
raise RuntimeError("Failed to specify the binning")
self._grouping = self.getProperty("GroupDetectorsBy").value
self._xpixelbin = self.getProperty("XPixelSum").value
self._ypixelbin = self.getProperty("YPixelSum").value
self._smoothoffsets = self.getProperty("SmoothSummedOffsets").value
self._smoothGroups = self.getProperty("SmoothGroups").value
self._peakpos = self.getProperty("PeakPositions").value
if self.getProperty("CrossCorrelation").value:
self._initCCpars()
self._maxoffset = self.getProperty("MaxOffset").value
self._diffractionfocus = self.getProperty("DiffractionFocusWorkspace").value
self._filterBadPulses = self.getProperty("FilterBadPulses").value
self._outDir = self.getProperty("OutputDirectory").value+"/"
self._outTypes = self.getProperty("SaveAs").value
samRuns = self.getProperty("RunNumber").value
backRuns = self.getProperty("Background").value
if len(samRuns) != len(backRuns):
if (len(backRuns) == 1 and backRuns[0] == 0) or (len(backRuns) <= 0):
backRuns = [0]*len(samRuns)
else:
raise RuntimeError("Number of samples and backgrounds must match (%d!=%d)" % (len(samRuns), len(backRuns)))
filterWall = (self.getProperty("FilterByTimeMin").value, self.getProperty("FilterByTimeMax").value)
stuff = getBasename(samRuns[0])
stuff = stuff.split('_')
(instrument, runNumber) = ('_'.join(stuff[:-1]), stuff[-1])
calib = instrument+"_calibrate_d"+runNumber+strftime("_%Y_%m_%d")
calib = os.path.join(self._outDir, calib)
for (samNum, backNum) in zip(samRuns, backRuns):
# first round of processing the sample
samRun = self._loadData(samNum, filterWall)
samRun = str(samRun)
if backNum > 0:
backRun = self._loadData(instrument+'_'+str(backNum), filterWall)
Minus(LHSWorkspace=samRun, RHSWorkspace=backRun,
OutputWorkspace=samRun)
DeleteWorkspace(backRun)
CompressEvents(samRun, OutputWorkspace=samRun,
Tolerance=COMPRESS_TOL_TOF) # 100ns
self._createGrouping(samRun)
LRef = self.getProperty("UnwrapRef").value
DIFCref = self.getProperty("LowResRef").value
# super special Jason stuff
if LRef > 0:
UnwrapSNS(InputWorkspace=samRun, OutputWorkspace=samRun, LRef=LRef)
if DIFCref > 0:
RemoveLowResTOF(InputWorkspace=samRun, OutputWorkspace=samRun,
ReferenceDIFC=DIFCref)
ConvertUnits(InputWorkspace=samRun, OutputWorkspace=samRun, Target="dSpacing")
# Sum pixelbin X pixelbin blocks of pixels
if self._xpixelbin*self._ypixelbin>1:
SumNeighbours(InputWorkspace=samRun, OutputWorkspace=samRun,
SumX=self._xpixelbin, SumY=self._ypixelbin)
if self.getProperty("CrossCorrelation").value:
samRun = self._cccalibrate(samRun)
else:
samRun = self._multicalibrate(samRun)
self._saveCalibration(samRun, calib)
if self._xpixelbin*self._ypixelbin>1 or len(self._smoothGroups) > 0:
if AnalysisDataService.doesExist(samRun):
AnalysisDataService.remove(samRun)
samRun = self._loadData(samNum, filterWall)
LRef = self.getProperty("UnwrapRef").value
DIFCref = self.getProperty("LowResRef").value
# super special Jason stuff
if LRef > 0:
samRun = UnwrapSNS(InputWorkspace=samRun, OutputWorkspace=samRun,
LRef=LRef)
if DIFCref > 0:
samRun = RemoveLowResTOF(InputWorkspace=samRun, OutputWorkspace=samRun,
ReferenceDIFC=DIFCref)
else:
samRun = ConvertUnits(InputWorkspace=samRun, OutputWorkspace=samRun,
Target="TOF")
samRun = self._focus(samRun)
RenameWorkspace(InputWorkspace=samRun, OutputWorkspace=str(samRun)+"_calibrated")
AlgorithmFactory.subscribe(CalibrateRectangularDetectors)
| gpl-3.0 |
BaladiDogGames/baladidoggames.github.io | mingw/bin/lib/threading.py | 31 | 32474 | """Thread module emulating a subset of Java's threading model."""
import sys as _sys
try:
import thread
except ImportError:
del _sys.modules[__name__]
raise
import warnings
from time import time as _time, sleep as _sleep
from traceback import format_exc as _format_exc
# Note regarding PEP 8 compliant aliases
# This threading model was originally inspired by Java, and inherited
# the convention of camelCase function and method names from that
# language. While those names are not in any imminent danger of being
# deprecated, starting with Python 2.6, the module now provides a
# PEP 8 compliant alias for any such method name.
# Using the new PEP 8 compliant names also facilitates substitution
# with the multiprocessing module, which doesn't provide the old
# Java inspired names.
# Rename some stuff so "from threading import *" is safe
__all__ = ['activeCount', 'active_count', 'Condition', 'currentThread',
'current_thread', 'enumerate', 'Event',
'Lock', 'RLock', 'Semaphore', 'BoundedSemaphore', 'Thread',
'Timer', 'setprofile', 'settrace', 'local', 'stack_size']
_start_new_thread = thread.start_new_thread
_allocate_lock = thread.allocate_lock
_get_ident = thread.get_ident
ThreadError = thread.error
del thread
# sys.exc_clear is used to work around the fact that except blocks
# don't fully clear the exception until 3.0.
warnings.filterwarnings('ignore', category=DeprecationWarning,
module='threading', message='sys.exc_clear')
# Debug support (adapted from ihooks.py).
# All the major classes here derive from _Verbose. We force that to
# be a new-style class so that all the major classes here are new-style.
# This helps debugging (type(instance) is more revealing for instances
# of new-style classes).
_VERBOSE = False
if __debug__:
class _Verbose(object):
def __init__(self, verbose=None):
if verbose is None:
verbose = _VERBOSE
self.__verbose = verbose
def _note(self, format, *args):
if self.__verbose:
format = format % args
# Issue #4188: calling current_thread() can incur an infinite
# recursion if it has to create a DummyThread on the fly.
ident = _get_ident()
try:
name = _active[ident].name
except KeyError:
name = "<OS thread %d>" % ident
format = "%s: %s\n" % (name, format)
_sys.stderr.write(format)
else:
# Disable this when using "python -O"
class _Verbose(object):
def __init__(self, verbose=None):
pass
def _note(self, *args):
pass
# Support for profile and trace hooks
_profile_hook = None
_trace_hook = None
def setprofile(func):
global _profile_hook
_profile_hook = func
def settrace(func):
global _trace_hook
_trace_hook = func
# Synchronization classes
Lock = _allocate_lock
def RLock(*args, **kwargs):
return _RLock(*args, **kwargs)
class _RLock(_Verbose):
def __init__(self, verbose=None):
_Verbose.__init__(self, verbose)
self.__block = _allocate_lock()
self.__owner = None
self.__count = 0
def __repr__(self):
owner = self.__owner
try:
owner = _active[owner].name
except KeyError:
pass
return "<%s owner=%r count=%d>" % (
self.__class__.__name__, owner, self.__count)
def acquire(self, blocking=1):
me = _get_ident()
if self.__owner == me:
self.__count = self.__count + 1
if __debug__:
self._note("%s.acquire(%s): recursive success", self, blocking)
return 1
rc = self.__block.acquire(blocking)
if rc:
self.__owner = me
self.__count = 1
if __debug__:
self._note("%s.acquire(%s): initial success", self, blocking)
else:
if __debug__:
self._note("%s.acquire(%s): failure", self, blocking)
return rc
__enter__ = acquire
def release(self):
if self.__owner != _get_ident():
raise RuntimeError("cannot release un-acquired lock")
self.__count = count = self.__count - 1
if not count:
self.__owner = None
self.__block.release()
if __debug__:
self._note("%s.release(): final release", self)
else:
if __debug__:
self._note("%s.release(): non-final release", self)
def __exit__(self, t, v, tb):
self.release()
# Internal methods used by condition variables
def _acquire_restore(self, count_owner):
count, owner = count_owner
self.__block.acquire()
self.__count = count
self.__owner = owner
if __debug__:
self._note("%s._acquire_restore()", self)
def _release_save(self):
if __debug__:
self._note("%s._release_save()", self)
count = self.__count
self.__count = 0
owner = self.__owner
self.__owner = None
self.__block.release()
return (count, owner)
def _is_owned(self):
return self.__owner == _get_ident()
def Condition(*args, **kwargs):
return _Condition(*args, **kwargs)
class _Condition(_Verbose):
def __init__(self, lock=None, verbose=None):
_Verbose.__init__(self, verbose)
if lock is None:
lock = RLock()
self.__lock = lock
# Export the lock's acquire() and release() methods
self.acquire = lock.acquire
self.release = lock.release
# If the lock defines _release_save() and/or _acquire_restore(),
# these override the default implementations (which just call
# release() and acquire() on the lock). Ditto for _is_owned().
try:
self._release_save = lock._release_save
except AttributeError:
pass
try:
self._acquire_restore = lock._acquire_restore
except AttributeError:
pass
try:
self._is_owned = lock._is_owned
except AttributeError:
pass
self.__waiters = []
def __enter__(self):
return self.__lock.__enter__()
def __exit__(self, *args):
return self.__lock.__exit__(*args)
def __repr__(self):
return "<Condition(%s, %d)>" % (self.__lock, len(self.__waiters))
def _release_save(self):
self.__lock.release() # No state to save
def _acquire_restore(self, x):
self.__lock.acquire() # Ignore saved state
def _is_owned(self):
# Return True if lock is owned by current_thread.
# This method is called only if __lock doesn't have _is_owned().
if self.__lock.acquire(0):
self.__lock.release()
return False
else:
return True
def wait(self, timeout=None):
if not self._is_owned():
raise RuntimeError("cannot wait on un-acquired lock")
waiter = _allocate_lock()
waiter.acquire()
self.__waiters.append(waiter)
saved_state = self._release_save()
try: # restore state no matter what (e.g., KeyboardInterrupt)
if timeout is None:
waiter.acquire()
if __debug__:
self._note("%s.wait(): got it", self)
else:
# Balancing act: We can't afford a pure busy loop, so we
# have to sleep; but if we sleep the whole timeout time,
# we'll be unresponsive. The scheme here sleeps very
# little at first, longer as time goes on, but never longer
# than 20 times per second (or the timeout time remaining).
endtime = _time() + timeout
delay = 0.0005 # 500 us -> initial delay of 1 ms
while True:
gotit = waiter.acquire(0)
if gotit:
break
remaining = endtime - _time()
if remaining <= 0:
break
delay = min(delay * 2, remaining, .05)
_sleep(delay)
if not gotit:
if __debug__:
self._note("%s.wait(%s): timed out", self, timeout)
try:
self.__waiters.remove(waiter)
except ValueError:
pass
else:
if __debug__:
self._note("%s.wait(%s): got it", self, timeout)
finally:
self._acquire_restore(saved_state)
def notify(self, n=1):
if not self._is_owned():
raise RuntimeError("cannot notify on un-acquired lock")
__waiters = self.__waiters
waiters = __waiters[:n]
if not waiters:
if __debug__:
self._note("%s.notify(): no waiters", self)
return
self._note("%s.notify(): notifying %d waiter%s", self, n,
n!=1 and "s" or "")
for waiter in waiters:
waiter.release()
try:
__waiters.remove(waiter)
except ValueError:
pass
def notifyAll(self):
self.notify(len(self.__waiters))
notify_all = notifyAll
def Semaphore(*args, **kwargs):
return _Semaphore(*args, **kwargs)
class _Semaphore(_Verbose):
# After Tim Peters' semaphore class, but not quite the same (no maximum)
def __init__(self, value=1, verbose=None):
if value < 0:
raise ValueError("semaphore initial value must be >= 0")
_Verbose.__init__(self, verbose)
self.__cond = Condition(Lock())
self.__value = value
def acquire(self, blocking=1):
rc = False
self.__cond.acquire()
while self.__value == 0:
if not blocking:
break
if __debug__:
self._note("%s.acquire(%s): blocked waiting, value=%s",
self, blocking, self.__value)
self.__cond.wait()
else:
self.__value = self.__value - 1
if __debug__:
self._note("%s.acquire: success, value=%s",
self, self.__value)
rc = True
self.__cond.release()
return rc
__enter__ = acquire
def release(self):
self.__cond.acquire()
self.__value = self.__value + 1
if __debug__:
self._note("%s.release: success, value=%s",
self, self.__value)
self.__cond.notify()
self.__cond.release()
def __exit__(self, t, v, tb):
self.release()
def BoundedSemaphore(*args, **kwargs):
return _BoundedSemaphore(*args, **kwargs)
class _BoundedSemaphore(_Semaphore):
"""Semaphore that checks that # releases is <= # acquires"""
def __init__(self, value=1, verbose=None):
_Semaphore.__init__(self, value, verbose)
self._initial_value = value
def release(self):
if self._Semaphore__value >= self._initial_value:
raise ValueError, "Semaphore released too many times"
return _Semaphore.release(self)
def Event(*args, **kwargs):
return _Event(*args, **kwargs)
class _Event(_Verbose):
# After Tim Peters' event class (without is_posted())
def __init__(self, verbose=None):
_Verbose.__init__(self, verbose)
self.__cond = Condition(Lock())
self.__flag = False
def _reset_internal_locks(self):
# private! called by Thread._reset_internal_locks by _after_fork()
self.__cond.__init__()
def isSet(self):
return self.__flag
is_set = isSet
def set(self):
self.__cond.acquire()
try:
self.__flag = True
self.__cond.notify_all()
finally:
self.__cond.release()
def clear(self):
self.__cond.acquire()
try:
self.__flag = False
finally:
self.__cond.release()
def wait(self, timeout=None):
self.__cond.acquire()
try:
if not self.__flag:
self.__cond.wait(timeout)
return self.__flag
finally:
self.__cond.release()
# Helper to generate new thread names
_counter = 0
def _newname(template="Thread-%d"):
global _counter
_counter = _counter + 1
return template % _counter
# Active thread administration
_active_limbo_lock = _allocate_lock()
_active = {} # maps thread id to Thread object
_limbo = {}
# Main class for threads
class Thread(_Verbose):
__initialized = False
# Need to store a reference to sys.exc_info for printing
# out exceptions when a thread tries to use a global var. during interp.
# shutdown and thus raises an exception about trying to perform some
# operation on/with a NoneType
__exc_info = _sys.exc_info
# Keep sys.exc_clear too to clear the exception just before
# allowing .join() to return.
__exc_clear = _sys.exc_clear
def __init__(self, group=None, target=None, name=None,
args=(), kwargs=None, verbose=None):
assert group is None, "group argument must be None for now"
_Verbose.__init__(self, verbose)
if kwargs is None:
kwargs = {}
self.__target = target
self.__name = str(name or _newname())
self.__args = args
self.__kwargs = kwargs
self.__daemonic = self._set_daemon()
self.__ident = None
self.__started = Event()
self.__stopped = False
self.__block = Condition(Lock())
self.__initialized = True
# sys.stderr is not stored in the class like
# sys.exc_info since it can be changed between instances
self.__stderr = _sys.stderr
def _reset_internal_locks(self):
# private! Called by _after_fork() to reset our internal locks as
# they may be in an invalid state leading to a deadlock or crash.
if hasattr(self, '_Thread__block'): # DummyThread deletes self.__block
self.__block.__init__()
self.__started._reset_internal_locks()
@property
def _block(self):
# used by a unittest
return self.__block
def _set_daemon(self):
# Overridden in _MainThread and _DummyThread
return current_thread().daemon
def __repr__(self):
assert self.__initialized, "Thread.__init__() was not called"
status = "initial"
if self.__started.is_set():
status = "started"
if self.__stopped:
status = "stopped"
if self.__daemonic:
status += " daemon"
if self.__ident is not None:
status += " %s" % self.__ident
return "<%s(%s, %s)>" % (self.__class__.__name__, self.__name, status)
def start(self):
if not self.__initialized:
raise RuntimeError("thread.__init__() not called")
if self.__started.is_set():
raise RuntimeError("threads can only be started once")
if __debug__:
self._note("%s.start(): starting thread", self)
with _active_limbo_lock:
_limbo[self] = self
try:
_start_new_thread(self.__bootstrap, ())
except Exception:
with _active_limbo_lock:
del _limbo[self]
raise
self.__started.wait()
def run(self):
try:
if self.__target:
self.__target(*self.__args, **self.__kwargs)
finally:
# Avoid a refcycle if the thread is running a function with
# an argument that has a member that points to the thread.
del self.__target, self.__args, self.__kwargs
def __bootstrap(self):
# Wrapper around the real bootstrap code that ignores
# exceptions during interpreter cleanup. Those typically
# happen when a daemon thread wakes up at an unfortunate
# moment, finds the world around it destroyed, and raises some
# random exception *** while trying to report the exception in
# __bootstrap_inner() below ***. Those random exceptions
# don't help anybody, and they confuse users, so we suppress
# them. We suppress them only when it appears that the world
# indeed has already been destroyed, so that exceptions in
# __bootstrap_inner() during normal business hours are properly
# reported. Also, we only suppress them for daemonic threads;
# if a non-daemonic encounters this, something else is wrong.
try:
self.__bootstrap_inner()
except:
if self.__daemonic and _sys is None:
return
raise
def _set_ident(self):
self.__ident = _get_ident()
def __bootstrap_inner(self):
try:
self._set_ident()
self.__started.set()
with _active_limbo_lock:
_active[self.__ident] = self
del _limbo[self]
if __debug__:
self._note("%s.__bootstrap(): thread started", self)
if _trace_hook:
self._note("%s.__bootstrap(): registering trace hook", self)
_sys.settrace(_trace_hook)
if _profile_hook:
self._note("%s.__bootstrap(): registering profile hook", self)
_sys.setprofile(_profile_hook)
try:
self.run()
except SystemExit:
if __debug__:
self._note("%s.__bootstrap(): raised SystemExit", self)
except:
if __debug__:
self._note("%s.__bootstrap(): unhandled exception", self)
# If sys.stderr is no more (most likely from interpreter
# shutdown) use self.__stderr. Otherwise still use sys (as in
# _sys) in case sys.stderr was redefined since the creation of
# self.
if _sys:
_sys.stderr.write("Exception in thread %s:\n%s\n" %
(self.name, _format_exc()))
else:
# Do the best job possible w/o a huge amt. of code to
# approximate a traceback (code ideas from
# Lib/traceback.py)
exc_type, exc_value, exc_tb = self.__exc_info()
try:
print>>self.__stderr, (
"Exception in thread " + self.name +
" (most likely raised during interpreter shutdown):")
print>>self.__stderr, (
"Traceback (most recent call last):")
while exc_tb:
print>>self.__stderr, (
' File "%s", line %s, in %s' %
(exc_tb.tb_frame.f_code.co_filename,
exc_tb.tb_lineno,
exc_tb.tb_frame.f_code.co_name))
exc_tb = exc_tb.tb_next
print>>self.__stderr, ("%s: %s" % (exc_type, exc_value))
# Make sure that exc_tb gets deleted since it is a memory
# hog; deleting everything else is just for thoroughness
finally:
del exc_type, exc_value, exc_tb
else:
if __debug__:
self._note("%s.__bootstrap(): normal return", self)
finally:
# Prevent a race in
# test_threading.test_no_refcycle_through_target when
# the exception keeps the target alive past when we
# assert that it's dead.
self.__exc_clear()
finally:
with _active_limbo_lock:
self.__stop()
try:
# We don't call self.__delete() because it also
# grabs _active_limbo_lock.
del _active[_get_ident()]
except:
pass
def __stop(self):
self.__block.acquire()
self.__stopped = True
self.__block.notify_all()
self.__block.release()
def __delete(self):
"Remove current thread from the dict of currently running threads."
# Notes about running with dummy_thread:
#
# Must take care to not raise an exception if dummy_thread is being
# used (and thus this module is being used as an instance of
# dummy_threading). dummy_thread.get_ident() always returns -1 since
# there is only one thread if dummy_thread is being used. Thus
# len(_active) is always <= 1 here, and any Thread instance created
# overwrites the (if any) thread currently registered in _active.
#
# An instance of _MainThread is always created by 'threading'. This
# gets overwritten the instant an instance of Thread is created; both
# threads return -1 from dummy_thread.get_ident() and thus have the
# same key in the dict. So when the _MainThread instance created by
# 'threading' tries to clean itself up when atexit calls this method
# it gets a KeyError if another Thread instance was created.
#
# This all means that KeyError from trying to delete something from
# _active if dummy_threading is being used is a red herring. But
# since it isn't if dummy_threading is *not* being used then don't
# hide the exception.
try:
with _active_limbo_lock:
del _active[_get_ident()]
# There must not be any python code between the previous line
# and after the lock is released. Otherwise a tracing function
# could try to acquire the lock again in the same thread, (in
# current_thread()), and would block.
except KeyError:
if 'dummy_threading' not in _sys.modules:
raise
def join(self, timeout=None):
if not self.__initialized:
raise RuntimeError("Thread.__init__() not called")
if not self.__started.is_set():
raise RuntimeError("cannot join thread before it is started")
if self is current_thread():
raise RuntimeError("cannot join current thread")
if __debug__:
if not self.__stopped:
self._note("%s.join(): waiting until thread stops", self)
self.__block.acquire()
try:
if timeout is None:
while not self.__stopped:
self.__block.wait()
if __debug__:
self._note("%s.join(): thread stopped", self)
else:
deadline = _time() + timeout
while not self.__stopped:
delay = deadline - _time()
if delay <= 0:
if __debug__:
self._note("%s.join(): timed out", self)
break
self.__block.wait(delay)
else:
if __debug__:
self._note("%s.join(): thread stopped", self)
finally:
self.__block.release()
@property
def name(self):
assert self.__initialized, "Thread.__init__() not called"
return self.__name
@name.setter
def name(self, name):
assert self.__initialized, "Thread.__init__() not called"
self.__name = str(name)
@property
def ident(self):
assert self.__initialized, "Thread.__init__() not called"
return self.__ident
def isAlive(self):
assert self.__initialized, "Thread.__init__() not called"
return self.__started.is_set() and not self.__stopped
is_alive = isAlive
@property
def daemon(self):
assert self.__initialized, "Thread.__init__() not called"
return self.__daemonic
@daemon.setter
def daemon(self, daemonic):
if not self.__initialized:
raise RuntimeError("Thread.__init__() not called")
if self.__started.is_set():
raise RuntimeError("cannot set daemon status of active thread");
self.__daemonic = daemonic
def isDaemon(self):
return self.daemon
def setDaemon(self, daemonic):
self.daemon = daemonic
def getName(self):
return self.name
def setName(self, name):
self.name = name
# The timer class was contributed by Itamar Shtull-Trauring
def Timer(*args, **kwargs):
return _Timer(*args, **kwargs)
class _Timer(Thread):
"""Call a function after a specified number of seconds:
t = Timer(30.0, f, args=[], kwargs={})
t.start()
t.cancel() # stop the timer's action if it's still waiting
"""
def __init__(self, interval, function, args=[], kwargs={}):
Thread.__init__(self)
self.interval = interval
self.function = function
self.args = args
self.kwargs = kwargs
self.finished = Event()
def cancel(self):
"""Stop the timer if it hasn't finished yet"""
self.finished.set()
def run(self):
self.finished.wait(self.interval)
if not self.finished.is_set():
self.function(*self.args, **self.kwargs)
self.finished.set()
# Special thread class to represent the main thread
# This is garbage collected through an exit handler
class _MainThread(Thread):
def __init__(self):
Thread.__init__(self, name="MainThread")
self._Thread__started.set()
self._set_ident()
with _active_limbo_lock:
_active[_get_ident()] = self
def _set_daemon(self):
return False
def _exitfunc(self):
self._Thread__stop()
t = _pickSomeNonDaemonThread()
if t:
if __debug__:
self._note("%s: waiting for other threads", self)
while t:
t.join()
t = _pickSomeNonDaemonThread()
if __debug__:
self._note("%s: exiting", self)
self._Thread__delete()
def _pickSomeNonDaemonThread():
for t in enumerate():
if not t.daemon and t.is_alive():
return t
return None
# Dummy thread class to represent threads not started here.
# These aren't garbage collected when they die, nor can they be waited for.
# If they invoke anything in threading.py that calls current_thread(), they
# leave an entry in the _active dict forever after.
# Their purpose is to return *something* from current_thread().
# They are marked as daemon threads so we won't wait for them
# when we exit (conform previous semantics).
class _DummyThread(Thread):
def __init__(self):
Thread.__init__(self, name=_newname("Dummy-%d"))
# Thread.__block consumes an OS-level locking primitive, which
# can never be used by a _DummyThread. Since a _DummyThread
# instance is immortal, that's bad, so release this resource.
del self._Thread__block
self._Thread__started.set()
self._set_ident()
with _active_limbo_lock:
_active[_get_ident()] = self
def _set_daemon(self):
return True
def join(self, timeout=None):
assert False, "cannot join a dummy thread"
# Global API functions
def currentThread():
try:
return _active[_get_ident()]
except KeyError:
##print "current_thread(): no current thread for", _get_ident()
return _DummyThread()
current_thread = currentThread
def activeCount():
with _active_limbo_lock:
return len(_active) + len(_limbo)
active_count = activeCount
def _enumerate():
# Same as enumerate(), but without the lock. Internal use only.
return _active.values() + _limbo.values()
def enumerate():
with _active_limbo_lock:
return _active.values() + _limbo.values()
from thread import stack_size
# Create the main thread object,
# and make it available for the interpreter
# (Py_Main) as threading._shutdown.
_shutdown = _MainThread()._exitfunc
# get thread-local implementation, either from the thread
# module, or from the python fallback
try:
from thread import _local as local
except ImportError:
from _threading_local import local
def _after_fork():
# This function is called by Python/ceval.c:PyEval_ReInitThreads which
# is called from PyOS_AfterFork. Here we cleanup threading module state
# that should not exist after a fork.
# Reset _active_limbo_lock, in case we forked while the lock was held
# by another (non-forked) thread. http://bugs.python.org/issue874900
global _active_limbo_lock
_active_limbo_lock = _allocate_lock()
# fork() only copied the current thread; clear references to others.
new_active = {}
current = current_thread()
with _active_limbo_lock:
for thread in _active.itervalues():
# Any lock/condition variable may be currently locked or in an
# invalid state, so we reinitialize them.
if hasattr(thread, '_reset_internal_locks'):
thread._reset_internal_locks()
if thread is current:
# There is only one active thread. We reset the ident to
# its new value since it can have changed.
ident = _get_ident()
thread._Thread__ident = ident
new_active[ident] = thread
else:
# All the others are already stopped.
thread._Thread__stop()
_limbo.clear()
_active.clear()
_active.update(new_active)
assert len(_active) == 1
# Self-test code
def _test():
class BoundedQueue(_Verbose):
def __init__(self, limit):
_Verbose.__init__(self)
self.mon = RLock()
self.rc = Condition(self.mon)
self.wc = Condition(self.mon)
self.limit = limit
self.queue = deque()
def put(self, item):
self.mon.acquire()
while len(self.queue) >= self.limit:
self._note("put(%s): queue full", item)
self.wc.wait()
self.queue.append(item)
self._note("put(%s): appended, length now %d",
item, len(self.queue))
self.rc.notify()
self.mon.release()
def get(self):
self.mon.acquire()
while not self.queue:
self._note("get(): queue empty")
self.rc.wait()
item = self.queue.popleft()
self._note("get(): got %s, %d left", item, len(self.queue))
self.wc.notify()
self.mon.release()
return item
class ProducerThread(Thread):
def __init__(self, queue, quota):
Thread.__init__(self, name="Producer")
self.queue = queue
self.quota = quota
def run(self):
from random import random
counter = 0
while counter < self.quota:
counter = counter + 1
self.queue.put("%s.%d" % (self.name, counter))
_sleep(random() * 0.00001)
class ConsumerThread(Thread):
def __init__(self, queue, count):
Thread.__init__(self, name="Consumer")
self.queue = queue
self.count = count
def run(self):
while self.count > 0:
item = self.queue.get()
print item
self.count = self.count - 1
NP = 3
QL = 4
NI = 5
Q = BoundedQueue(QL)
P = []
for i in range(NP):
t = ProducerThread(Q, NI)
t.name = ("Producer-%d" % (i+1))
P.append(t)
C = ConsumerThread(Q, NI*NP)
for t in P:
t.start()
_sleep(0.000001)
C.start()
for t in P:
t.join()
C.join()
if __name__ == '__main__':
_test()
| mit |
calico/basenji | tests/train_full/make_targets.py | 1 | 3365 | #!/usr/bin/env python
from optparse import OptionParser
import glob
import os
import subprocess
import sys
import pandas as pd
'''
make_targets.py
Make targets table for generating TF Records.
'''
################################################################################
# main
################################################################################
def main():
usage = 'usage: %prog [options] arg'
parser = OptionParser(usage)
#parser.add_option()
(options,args) = parser.parse_args()
species = ['human']
assays = ['DNASE','ATAC','CAGE']
# sources = ['encode', 'fantom', 'geo', 'uw-atlas']
sources = ['encode', 'fantom']
source_clip = {'encode':32, 'fantom':384, 'geo':64, 'uw-atlas':32}
source_scale = {'encode':2, 'fantom':1, 'geo':1, 'uw-atlas':4}
source_sum = {'encode':'mean', 'fantom':'sum', 'geo':'sum', 'uw-atlas':'mean'}
targets_file = 'targets.txt'
targets_out = open(targets_file, 'w')
print('\t'.join(['index', 'genome', 'identifier', 'file', 'clip', 'scale', 'sum_stat', 'description']), file=targets_out)
ti = 0
for si in range(len(species)):
for assay in assays:
for source in sources:
# collect w5 files
w5_files = sorted(glob.glob('%s/datasets/%s/%s/%s/*/summary/*.w5' % (os.environ['TILLAGE'], species[si], assay.lower(), source)))
if len(w5_files) > 0:
print('%s %s %s %d datasets' % (species[si], assay, source, len(w5_files)))
# parse and write each w5 file
for w5_file in w5_files:
w5_dir = os.path.split(w5_file)[0]
meta_file = '%s/metadata.txt' % w5_dir
# source = meta_file.split('/')[-4]
# read meta dict
meta_dict = read_meta(meta_file)
# check retirement
if meta_dict.get('status','active') != 'retired':
# augment description
assay = assay_succinct(meta_dict['assay'])
if assay == 'CHIP':
desc = '%s:%s:%s' % (assay, meta_dict['target'], meta_dict['description'])
else:
desc = '%s:%s' % (assay, meta_dict['description'])
cols = [str(ti), str(si), meta_dict['identifier'], w5_file, str(source_clip[source]), str(source_scale[source]), source_sum[source], desc]
print('\t'.join(cols), file=targets_out)
ti += 1
targets_out.close()
##################################################
# tests
targets_df = pd.read_table(targets_file, index_col=0)
unique_ids = set(targets_df.identifier)
assert(len(unique_ids) == targets_df.shape[0])
def assay_succinct(assay):
assay = assay.replace('-seq', '')
return assay.upper()
def read_meta(meta_file):
meta_dict = {}
for line in open(meta_file):
a = line.strip().split('\t')
if len(a) > 1:
meta_dict[a[0]] = a[1]
return meta_dict
################################################################################
# __main__
################################################################################
if __name__ == '__main__':
main()
| apache-2.0 |
SlimRoms/android_external_chromium_org | tools/perf/measurements/skpicture_printer_unittest.py | 9 | 1311 | # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import shutil
import tempfile
from measurements import skpicture_printer
from telemetry import test
from telemetry.page import page_measurement_unittest_base
from telemetry.unittest import options_for_unittests
class SkpicturePrinterUnitTest(
page_measurement_unittest_base.PageMeasurementUnitTestBase):
def setUp(self):
self._options = options_for_unittests.GetCopy()
self._options.skp_outdir = tempfile.mkdtemp('_skp_test')
def tearDown(self):
shutil.rmtree(self._options.skp_outdir)
@test.Disabled('android')
def testSkpicturePrinter(self):
ps = self.CreatePageSetFromFileInUnittestDataDir('blank.html')
measurement = skpicture_printer.SkpicturePrinter()
results = self.RunMeasurement(measurement, ps, options=self._options)
# Picture printing is not supported on all platforms.
if results.failures:
assert 'not supported' in results.failures[0][1]
return
saved_picture_count = results.FindAllPageSpecificValuesNamed(
'saved_picture_count')
self.assertEquals(len(saved_picture_count), 1)
self.assertGreater(saved_picture_count[0].GetRepresentativeNumber(), 0)
| bsd-3-clause |
HKuz/Test_Code | exceptions.py | 1 | 1762 | #!/Applications/anaconda/envs/Python3/bin
def main():
'''Examples Using Exceptions in Python'''
# Python exceptions: http://docs.python.org/library/exceptions.html
# Catch exceptions with try
try:
f = open('noFile.txt')
except IOError as e:
print('Oh no, IOError:', e)
except ValueError as e:
print('Oh no, ValueError:', e)
else:
# Can put the else code in the try part, too
# Runs when try body completes with no exceptions
for line in f:
print(line, end='')
finally:
# Always executed after try, except, and else even if exceptions raised
# or hit break/continue/return statement. Good for clean-up
# f.close()
pass
# Exceptions in a while loop
while True:
try:
n = input('Please enter an integer: ')
n = int(n)
break
except ValueError:
print('Input not an integer, please try again: ')
print('Correct input!')
# Raise own exceptions
try:
for line in readDocFile('noFile.txt'):
print(line.strip())
except ValueError as e:
print('Bad filename:', e)
testBool = True
if testBool:
raise CustomException('NOOOOOO!')
# Assert that input is correct
grades = [79, 92, 84]
assert not len(grades) == 0, 'no grades data'
return 0
def readDocFile(filename):
if filename.endswith('.doc'):
f = open(filename)
return f.readlines()
else:
raise ValueError('Filename must end with .doc')
class CustomException(Exception):
def __init__(self, error):
super(Exception, self).__init__(error)
print(error)
if __name__ == '__main__':
main()
| mit |
BizzCloud/PosBox | addons/auth_signup/__openerp__.py | 62 | 1573 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2009-today OpenERP SA (<http://www.openerp.com>)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>
#
##############################################################################
{
'name': 'Signup',
'description': """
Allow users to sign up and reset their password
===============================================
""",
'author': 'OpenERP SA',
'version': '1.0',
'category': 'Authentication',
'website': 'http://www.openerp.com',
'installable': True,
'auto_install': True,
'depends': [
'base_setup',
'email_template',
'web',
],
'data': [
'auth_signup_data.xml',
'res_config.xml',
'res_users_view.xml',
'views/auth_signup_login.xml',
],
'bootstrap': True,
}
| agpl-3.0 |
mitsuhiko/badideas | implicitself.py | 1 | 3083 | # -*- coding: utf-8 -*-
"""
implicitself
~~~~~~~~~~~~
Implements a bytecode hack and metaclass to make the self
implicit in functions.
:copyright: (c) Copyright 2011 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
import opcode
from types import FunctionType, CodeType
HAVE_ARGUMENT = opcode.HAVE_ARGUMENT
LOAD_FAST = opcode.opmap['LOAD_FAST']
STORE_FAST = opcode.opmap['STORE_FAST']
LOAD_GLOBAL = opcode.opmap['LOAD_GLOBAL']
STORE_GLOBAL = opcode.opmap['STORE_GLOBAL']
LOAD_ATTR = opcode.opmap['LOAD_ATTR']
STORE_ATTR = opcode.opmap['STORE_ATTR']
LOAD_NAME = opcode.opmap['LOAD_NAME']
STORE_NAME = opcode.opmap['STORE_NAME']
def disassemble(code):
code = map(ord, code)
i = 0
n = len(code)
while i < n:
op = code[i]
i += 1
if op >= HAVE_ARGUMENT:
oparg = code[i] | code[i + 1] << 8
i += 2
else:
oparg = None
yield op, oparg
def implicit_self(function):
code = function.func_code
bytecode, varnames, names = inject_self(code)
function.func_code = CodeType(code.co_argcount + 1, code.co_nlocals + 1,
code.co_stacksize, code.co_flags, bytecode, code.co_consts, names,
varnames, code.co_filename, code.co_name, code.co_firstlineno,
code.co_lnotab, code.co_freevars, code.co_cellvars)
def inject_self(code):
varnames = ('self',) + tuple(n for i, n in enumerate(code.co_varnames))
names = tuple(n for i, n in enumerate(code.co_names))
bytecode = []
for op, arg in disassemble(code.co_code):
if op in (LOAD_FAST, STORE_FAST):
arg = varnames.index(code.co_varnames[arg])
elif op in (LOAD_GLOBAL, STORE_GLOBAL, LOAD_NAME, STORE_NAME):
if code.co_names[arg] == 'self':
op = LOAD_FAST if op in (LOAD_GLOBAL, LOAD_NAME) \
else STORE_FAST
arg = 0
else:
arg = names.index(code.co_names[arg])
elif op in (LOAD_ATTR, STORE_ATTR):
arg = names.index(code.co_names[arg])
bytecode.append(chr(op))
if op >= opcode.HAVE_ARGUMENT:
bytecode.append(chr(arg & 0xff))
bytecode.append(chr(arg >> 8))
return ''.join(bytecode), varnames, names
class ImplicitSelfType(type):
def __new__(cls, name, bases, d):
for key, value in d.iteritems():
if isinstance(value, FunctionType):
implicit_self(value)
return type.__new__(cls, name, bases, d)
class ImplicitSelf(object):
__metaclass__ = ImplicitSelfType
if __name__ == '__main__':
import hashlib
class User(ImplicitSelf):
def __init__(username, password):
self.username = username
self.set_password(password)
def set_password(password):
self.hash = hashlib.sha1(password).hexdigest()
def check_password(password):
return hashlib.sha1(password).hexdigest() == self.hash
u = User('mitsuhiko', 'default')
print u.__dict__
| bsd-3-clause |
naucoin/VTKSlicerWidgets | Examples/Rendering/Python/CSpline.py | 9 | 3210 | #!/usr/bin/env python
# This example demonstrates the use of vtkCardinalSpline.
# It creates random points and connects them with a spline
import vtk
from vtk.util.colors import tomato, banana
# This will be used later to get random numbers.
math = vtk.vtkMath()
# Total number of points.
numberOfInputPoints = 10
# One spline for each direction.
aSplineX = vtk.vtkCardinalSpline()
aSplineY = vtk.vtkCardinalSpline()
aSplineZ = vtk.vtkCardinalSpline()
# Generate random (pivot) points and add the corresponding
# coordinates to the splines.
# aSplineX will interpolate the x values of the points
# aSplineY will interpolate the y values of the points
# aSplineZ will interpolate the z values of the points
inputPoints = vtk.vtkPoints()
for i in range(0, numberOfInputPoints):
x = math.Random(0, 1)
y = math.Random(0, 1)
z = math.Random(0, 1)
aSplineX.AddPoint(i, x)
aSplineY.AddPoint(i, y)
aSplineZ.AddPoint(i, z)
inputPoints.InsertPoint(i, x, y, z)
# The following section will create glyphs for the pivot points
# in order to make the effect of the spline more clear.
# Create a polydata to be glyphed.
inputData = vtk.vtkPolyData()
inputData.SetPoints(inputPoints)
# Use sphere as glyph source.
balls = vtk.vtkSphereSource()
balls.SetRadius(.01)
balls.SetPhiResolution(10)
balls.SetThetaResolution(10)
glyphPoints = vtk.vtkGlyph3D()
glyphPoints.SetInput(inputData)
glyphPoints.SetSource(balls.GetOutput())
glyphMapper = vtk.vtkPolyDataMapper()
glyphMapper.SetInputConnection(glyphPoints.GetOutputPort())
glyph = vtk.vtkActor()
glyph.SetMapper(glyphMapper)
glyph.GetProperty().SetDiffuseColor(tomato)
glyph.GetProperty().SetSpecular(.3)
glyph.GetProperty().SetSpecularPower(30)
# Generate the polyline for the spline.
points = vtk.vtkPoints()
profileData = vtk.vtkPolyData()
# Number of points on the spline
numberOfOutputPoints = 400
# Interpolate x, y and z by using the three spline filters and
# create new points
for i in range(0, numberOfOutputPoints):
t = (numberOfInputPoints-1.0)/(numberOfOutputPoints-1.0)*i
points.InsertPoint(i, aSplineX.Evaluate(t), aSplineY.Evaluate(t),
aSplineZ.Evaluate(t))
# Create the polyline.
lines = vtk.vtkCellArray()
lines.InsertNextCell(numberOfOutputPoints)
for i in range(0, numberOfOutputPoints):
lines.InsertCellPoint(i)
profileData.SetPoints(points)
profileData.SetLines(lines)
# Add thickness to the resulting line.
profileTubes = vtk.vtkTubeFilter()
profileTubes.SetNumberOfSides(8)
profileTubes.SetInput(profileData)
profileTubes.SetRadius(.005)
profileMapper = vtk.vtkPolyDataMapper()
profileMapper.SetInputConnection(profileTubes.GetOutputPort())
profile = vtk.vtkActor()
profile.SetMapper(profileMapper)
profile.GetProperty().SetDiffuseColor(banana)
profile.GetProperty().SetSpecular(.3)
profile.GetProperty().SetSpecularPower(30)
# Now create the RenderWindow, Renderer and Interactor
ren = vtk.vtkRenderer()
renWin = vtk.vtkRenderWindow()
renWin.AddRenderer(ren)
iren = vtk.vtkRenderWindowInteractor()
iren.SetRenderWindow(renWin)
# Add the actors
ren.AddActor(glyph)
ren.AddActor(profile)
renWin.SetSize(500, 500)
iren.Initialize()
renWin.Render()
iren.Start()
| bsd-3-clause |
kenshay/ImageScripter | ProgramData/SystemFiles/Python/Lib/site-packages/numpy/f2py/tests/test_return_real.py | 145 | 5403 | from __future__ import division, absolute_import, print_function
from numpy import array
from numpy.compat import long
from numpy.testing import run_module_suite, assert_, assert_raises, dec
import util
class TestReturnReal(util.F2PyTest):
def check_function(self, t):
if t.__doc__.split()[0] in ['t0', 't4', 's0', 's4']:
err = 1e-5
else:
err = 0.0
assert_(abs(t(234) - 234.0) <= err)
assert_(abs(t(234.6) - 234.6) <= err)
assert_(abs(t(long(234)) - 234.0) <= err)
assert_(abs(t('234') - 234) <= err)
assert_(abs(t('234.6') - 234.6) <= err)
assert_(abs(t(-234) + 234) <= err)
assert_(abs(t([234]) - 234) <= err)
assert_(abs(t((234,)) - 234.) <= err)
assert_(abs(t(array(234)) - 234.) <= err)
assert_(abs(t(array([234])) - 234.) <= err)
assert_(abs(t(array([[234]])) - 234.) <= err)
assert_(abs(t(array([234], 'b')) + 22) <= err)
assert_(abs(t(array([234], 'h')) - 234.) <= err)
assert_(abs(t(array([234], 'i')) - 234.) <= err)
assert_(abs(t(array([234], 'l')) - 234.) <= err)
assert_(abs(t(array([234], 'B')) - 234.) <= err)
assert_(abs(t(array([234], 'f')) - 234.) <= err)
assert_(abs(t(array([234], 'd')) - 234.) <= err)
if t.__doc__.split()[0] in ['t0', 't4', 's0', 's4']:
assert_(t(1e200) == t(1e300)) # inf
#assert_raises(ValueError, t, array([234], 'S1'))
assert_raises(ValueError, t, 'abc')
assert_raises(IndexError, t, [])
assert_raises(IndexError, t, ())
assert_raises(Exception, t, t)
assert_raises(Exception, t, {})
try:
r = t(10 ** 400)
assert_(repr(r) in ['inf', 'Infinity'], repr(r))
except OverflowError:
pass
class TestCReturnReal(TestReturnReal):
suffix = ".pyf"
module_name = "c_ext_return_real"
code = """
python module c_ext_return_real
usercode \'\'\'
float t4(float value) { return value; }
void s4(float *t4, float value) { *t4 = value; }
double t8(double value) { return value; }
void s8(double *t8, double value) { *t8 = value; }
\'\'\'
interface
function t4(value)
real*4 intent(c) :: t4,value
end
function t8(value)
real*8 intent(c) :: t8,value
end
subroutine s4(t4,value)
intent(c) s4
real*4 intent(out) :: t4
real*4 intent(c) :: value
end
subroutine s8(t8,value)
intent(c) s8
real*8 intent(out) :: t8
real*8 intent(c) :: value
end
end interface
end python module c_ext_return_real
"""
@dec.slow
def test_all(self):
for name in "t4,t8,s4,s8".split(","):
self.check_function(getattr(self.module, name))
class TestF77ReturnReal(TestReturnReal):
code = """
function t0(value)
real value
real t0
t0 = value
end
function t4(value)
real*4 value
real*4 t4
t4 = value
end
function t8(value)
real*8 value
real*8 t8
t8 = value
end
function td(value)
double precision value
double precision td
td = value
end
subroutine s0(t0,value)
real value
real t0
cf2py intent(out) t0
t0 = value
end
subroutine s4(t4,value)
real*4 value
real*4 t4
cf2py intent(out) t4
t4 = value
end
subroutine s8(t8,value)
real*8 value
real*8 t8
cf2py intent(out) t8
t8 = value
end
subroutine sd(td,value)
double precision value
double precision td
cf2py intent(out) td
td = value
end
"""
@dec.slow
def test_all(self):
for name in "t0,t4,t8,td,s0,s4,s8,sd".split(","):
self.check_function(getattr(self.module, name))
class TestF90ReturnReal(TestReturnReal):
suffix = ".f90"
code = """
module f90_return_real
contains
function t0(value)
real :: value
real :: t0
t0 = value
end function t0
function t4(value)
real(kind=4) :: value
real(kind=4) :: t4
t4 = value
end function t4
function t8(value)
real(kind=8) :: value
real(kind=8) :: t8
t8 = value
end function t8
function td(value)
double precision :: value
double precision :: td
td = value
end function td
subroutine s0(t0,value)
real :: value
real :: t0
!f2py intent(out) t0
t0 = value
end subroutine s0
subroutine s4(t4,value)
real(kind=4) :: value
real(kind=4) :: t4
!f2py intent(out) t4
t4 = value
end subroutine s4
subroutine s8(t8,value)
real(kind=8) :: value
real(kind=8) :: t8
!f2py intent(out) t8
t8 = value
end subroutine s8
subroutine sd(td,value)
double precision :: value
double precision :: td
!f2py intent(out) td
td = value
end subroutine sd
end module f90_return_real
"""
@dec.slow
def test_all(self):
for name in "t0,t4,t8,td,s0,s4,s8,sd".split(","):
self.check_function(getattr(self.module.f90_return_real, name))
if __name__ == "__main__":
run_module_suite()
| gpl-3.0 |
Dm47021/Android_kernel_f6mt_aosp_jb-rebase | tools/perf/scripts/python/syscall-counts-by-pid.py | 11180 | 1927 | # system call counts, by pid
# (c) 2010, Tom Zanussi <[email protected]>
# Licensed under the terms of the GNU GPL License version 2
#
# Displays system-wide system call totals, broken down by syscall.
# If a [comm] arg is specified, only syscalls called by [comm] are displayed.
import os, sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from Util import syscall_name
usage = "perf script -s syscall-counts-by-pid.py [comm]\n";
for_comm = None
for_pid = None
if len(sys.argv) > 2:
sys.exit(usage)
if len(sys.argv) > 1:
try:
for_pid = int(sys.argv[1])
except:
for_comm = sys.argv[1]
syscalls = autodict()
def trace_begin():
print "Press control+C to stop and show the summary"
def trace_end():
print_syscall_totals()
def raw_syscalls__sys_enter(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
id, args):
if (for_comm and common_comm != for_comm) or \
(for_pid and common_pid != for_pid ):
return
try:
syscalls[common_comm][common_pid][id] += 1
except TypeError:
syscalls[common_comm][common_pid][id] = 1
def print_syscall_totals():
if for_comm is not None:
print "\nsyscall events for %s:\n\n" % (for_comm),
else:
print "\nsyscall events by comm/pid:\n\n",
print "%-40s %10s\n" % ("comm [pid]/syscalls", "count"),
print "%-40s %10s\n" % ("----------------------------------------", \
"----------"),
comm_keys = syscalls.keys()
for comm in comm_keys:
pid_keys = syscalls[comm].keys()
for pid in pid_keys:
print "\n%s [%d]\n" % (comm, pid),
id_keys = syscalls[comm][pid].keys()
for id, val in sorted(syscalls[comm][pid].iteritems(), \
key = lambda(k, v): (v, k), reverse = True):
print " %-38s %10d\n" % (syscall_name(id), val),
| gpl-2.0 |
d3banjan/polyamide | webdev/lib/python2.7/site-packages/django/contrib/humanize/templatetags/humanize.py | 526 | 9442 | # -*- encoding: utf-8 -*-
from __future__ import unicode_literals
import re
from datetime import date, datetime
from decimal import Decimal
from django import template
from django.conf import settings
from django.template import defaultfilters
from django.utils.encoding import force_text
from django.utils.formats import number_format
from django.utils.safestring import mark_safe
from django.utils.timezone import is_aware, utc
from django.utils.translation import pgettext, ugettext as _, ungettext
register = template.Library()
@register.filter(is_safe=True)
def ordinal(value):
"""
Converts an integer to its ordinal as a string. 1 is '1st', 2 is '2nd',
3 is '3rd', etc. Works for any integer.
"""
try:
value = int(value)
except (TypeError, ValueError):
return value
suffixes = (_('th'), _('st'), _('nd'), _('rd'), _('th'), _('th'), _('th'), _('th'), _('th'), _('th'))
if value % 100 in (11, 12, 13): # special case
return mark_safe("%d%s" % (value, suffixes[0]))
# Mark value safe so i18n does not break with <sup> or <sub> see #19988
return mark_safe("%d%s" % (value, suffixes[value % 10]))
@register.filter(is_safe=True)
def intcomma(value, use_l10n=True):
"""
Converts an integer to a string containing commas every three digits.
For example, 3000 becomes '3,000' and 45000 becomes '45,000'.
"""
if settings.USE_L10N and use_l10n:
try:
if not isinstance(value, (float, Decimal)):
value = int(value)
except (TypeError, ValueError):
return intcomma(value, False)
else:
return number_format(value, force_grouping=True)
orig = force_text(value)
new = re.sub("^(-?\d+)(\d{3})", '\g<1>,\g<2>', orig)
if orig == new:
return new
else:
return intcomma(new, use_l10n)
# A tuple of standard large number to their converters
intword_converters = (
(6, lambda number: (
ungettext('%(value).1f million', '%(value).1f million', number),
ungettext('%(value)s million', '%(value)s million', number),
)),
(9, lambda number: (
ungettext('%(value).1f billion', '%(value).1f billion', number),
ungettext('%(value)s billion', '%(value)s billion', number),
)),
(12, lambda number: (
ungettext('%(value).1f trillion', '%(value).1f trillion', number),
ungettext('%(value)s trillion', '%(value)s trillion', number),
)),
(15, lambda number: (
ungettext('%(value).1f quadrillion', '%(value).1f quadrillion', number),
ungettext('%(value)s quadrillion', '%(value)s quadrillion', number),
)),
(18, lambda number: (
ungettext('%(value).1f quintillion', '%(value).1f quintillion', number),
ungettext('%(value)s quintillion', '%(value)s quintillion', number),
)),
(21, lambda number: (
ungettext('%(value).1f sextillion', '%(value).1f sextillion', number),
ungettext('%(value)s sextillion', '%(value)s sextillion', number),
)),
(24, lambda number: (
ungettext('%(value).1f septillion', '%(value).1f septillion', number),
ungettext('%(value)s septillion', '%(value)s septillion', number),
)),
(27, lambda number: (
ungettext('%(value).1f octillion', '%(value).1f octillion', number),
ungettext('%(value)s octillion', '%(value)s octillion', number),
)),
(30, lambda number: (
ungettext('%(value).1f nonillion', '%(value).1f nonillion', number),
ungettext('%(value)s nonillion', '%(value)s nonillion', number),
)),
(33, lambda number: (
ungettext('%(value).1f decillion', '%(value).1f decillion', number),
ungettext('%(value)s decillion', '%(value)s decillion', number),
)),
(100, lambda number: (
ungettext('%(value).1f googol', '%(value).1f googol', number),
ungettext('%(value)s googol', '%(value)s googol', number),
)),
)
@register.filter(is_safe=False)
def intword(value):
"""
Converts a large integer to a friendly text representation. Works best
for numbers over 1 million. For example, 1000000 becomes '1.0 million',
1200000 becomes '1.2 million' and '1200000000' becomes '1.2 billion'.
"""
try:
value = int(value)
except (TypeError, ValueError):
return value
if value < 1000000:
return value
def _check_for_i18n(value, float_formatted, string_formatted):
"""
Use the i18n enabled defaultfilters.floatformat if possible
"""
if settings.USE_L10N:
value = defaultfilters.floatformat(value, 1)
template = string_formatted
else:
template = float_formatted
return template % {'value': value}
for exponent, converters in intword_converters:
large_number = 10 ** exponent
if value < large_number * 1000:
new_value = value / float(large_number)
return _check_for_i18n(new_value, *converters(new_value))
return value
@register.filter(is_safe=True)
def apnumber(value):
"""
For numbers 1-9, returns the number spelled out. Otherwise, returns the
number. This follows Associated Press style.
"""
try:
value = int(value)
except (TypeError, ValueError):
return value
if not 0 < value < 10:
return value
return (_('one'), _('two'), _('three'), _('four'), _('five'),
_('six'), _('seven'), _('eight'), _('nine'))[value - 1]
# Perform the comparison in the default time zone when USE_TZ = True
# (unless a specific time zone has been applied with the |timezone filter).
@register.filter(expects_localtime=True)
def naturalday(value, arg=None):
"""
For date values that are tomorrow, today or yesterday compared to
present day returns representing string. Otherwise, returns a string
formatted according to settings.DATE_FORMAT.
"""
try:
tzinfo = getattr(value, 'tzinfo', None)
value = date(value.year, value.month, value.day)
except AttributeError:
# Passed value wasn't a date object
return value
except ValueError:
# Date arguments out of range
return value
today = datetime.now(tzinfo).date()
delta = value - today
if delta.days == 0:
return _('today')
elif delta.days == 1:
return _('tomorrow')
elif delta.days == -1:
return _('yesterday')
return defaultfilters.date(value, arg)
# This filter doesn't require expects_localtime=True because it deals properly
# with both naive and aware datetimes. Therefore avoid the cost of conversion.
@register.filter
def naturaltime(value):
"""
For date and time values shows how many seconds, minutes or hours ago
compared to current timestamp returns representing string.
"""
if not isinstance(value, date): # datetime is a subclass of date
return value
now = datetime.now(utc if is_aware(value) else None)
if value < now:
delta = now - value
if delta.days != 0:
return pgettext(
'naturaltime', '%(delta)s ago'
) % {'delta': defaultfilters.timesince(value, now)}
elif delta.seconds == 0:
return _('now')
elif delta.seconds < 60:
return ungettext(
# Translators: please keep a non-breaking space (U+00A0)
# between count and time unit.
'a second ago', '%(count)s seconds ago', delta.seconds
) % {'count': delta.seconds}
elif delta.seconds // 60 < 60:
count = delta.seconds // 60
return ungettext(
# Translators: please keep a non-breaking space (U+00A0)
# between count and time unit.
'a minute ago', '%(count)s minutes ago', count
) % {'count': count}
else:
count = delta.seconds // 60 // 60
return ungettext(
# Translators: please keep a non-breaking space (U+00A0)
# between count and time unit.
'an hour ago', '%(count)s hours ago', count
) % {'count': count}
else:
delta = value - now
if delta.days != 0:
return pgettext(
'naturaltime', '%(delta)s from now'
) % {'delta': defaultfilters.timeuntil(value, now)}
elif delta.seconds == 0:
return _('now')
elif delta.seconds < 60:
return ungettext(
# Translators: please keep a non-breaking space (U+00A0)
# between count and time unit.
'a second from now', '%(count)s seconds from now', delta.seconds
) % {'count': delta.seconds}
elif delta.seconds // 60 < 60:
count = delta.seconds // 60
return ungettext(
# Translators: please keep a non-breaking space (U+00A0)
# between count and time unit.
'a minute from now', '%(count)s minutes from now', count
) % {'count': count}
else:
count = delta.seconds // 60 // 60
return ungettext(
# Translators: please keep a non-breaking space (U+00A0)
# between count and time unit.
'an hour from now', '%(count)s hours from now', count
) % {'count': count}
| bsd-2-clause |
staticlibs/android-ndk-r9d-arm-linux-androideabi-4.8 | lib/python2.7/test/test_ast.py | 36 | 24760 | import sys, itertools, unittest
from test import test_support
import ast
def to_tuple(t):
if t is None or isinstance(t, (basestring, int, long, complex)):
return t
elif isinstance(t, list):
return [to_tuple(e) for e in t]
result = [t.__class__.__name__]
if hasattr(t, 'lineno') and hasattr(t, 'col_offset'):
result.append((t.lineno, t.col_offset))
if t._fields is None:
return tuple(result)
for f in t._fields:
result.append(to_tuple(getattr(t, f)))
return tuple(result)
# These tests are compiled through "exec"
# There should be atleast one test per statement
exec_tests = [
# None
"None",
# FunctionDef
"def f(): pass",
# FunctionDef with arg
"def f(a): pass",
# FunctionDef with arg and default value
"def f(a=0): pass",
# FunctionDef with varargs
"def f(*args): pass",
# FunctionDef with kwargs
"def f(**kwargs): pass",
# FunctionDef with all kind of args
"def f(a, b=1, c=None, d=[], e={}, *args, **kwargs): pass",
# ClassDef
"class C:pass",
# ClassDef, new style class
"class C(object): pass",
# Return
"def f():return 1",
# Delete
"del v",
# Assign
"v = 1",
# AugAssign
"v += 1",
# Print
"print >>f, 1, ",
# For
"for v in v:pass",
# While
"while v:pass",
# If
"if v:pass",
# Raise
"raise Exception, 'string'",
# TryExcept
"try:\n pass\nexcept Exception:\n pass",
# TryFinally
"try:\n pass\nfinally:\n pass",
# Assert
"assert v",
# Import
"import sys",
# ImportFrom
"from sys import v",
# Exec
"exec 'v'",
# Global
"global v",
# Expr
"1",
# Pass,
"pass",
# Break
"break",
# Continue
"continue",
# for statements with naked tuples (see http://bugs.python.org/issue6704)
"for a,b in c: pass",
"[(a,b) for a,b in c]",
"((a,b) for a,b in c)",
"((a,b) for (a,b) in c)",
# Multiline generator expression (test for .lineno & .col_offset)
"""(
(
Aa
,
Bb
)
for
Aa
,
Bb in Cc
)""",
# dictcomp
"{a : b for w in x for m in p if g}",
# dictcomp with naked tuple
"{a : b for v,w in x}",
# setcomp
"{r for l in x if g}",
# setcomp with naked tuple
"{r for l,m in x}",
]
# These are compiled through "single"
# because of overlap with "eval", it just tests what
# can't be tested with "eval"
single_tests = [
"1+2"
]
# These are compiled through "eval"
# It should test all expressions
eval_tests = [
# None
"None",
# BoolOp
"a and b",
# BinOp
"a + b",
# UnaryOp
"not v",
# Lambda
"lambda:None",
# Dict
"{ 1:2 }",
# Empty dict
"{}",
# Set
"{None,}",
# Multiline dict (test for .lineno & .col_offset)
"""{
1
:
2
}""",
# ListComp
"[a for b in c if d]",
# GeneratorExp
"(a for b in c if d)",
# Yield - yield expressions can't work outside a function
#
# Compare
"1 < 2 < 3",
# Call
"f(1,2,c=3,*d,**e)",
# Repr
"`v`",
# Num
"10L",
# Str
"'string'",
# Attribute
"a.b",
# Subscript
"a[b:c]",
# Name
"v",
# List
"[1,2,3]",
# Empty list
"[]",
# Tuple
"1,2,3",
# Tuple
"(1,2,3)",
# Empty tuple
"()",
# Combination
"a.b.c.d(a.b[1:2])",
]
# TODO: expr_context, slice, boolop, operator, unaryop, cmpop, comprehension
# excepthandler, arguments, keywords, alias
class AST_Tests(unittest.TestCase):
def _assertTrueorder(self, ast_node, parent_pos):
if not isinstance(ast_node, ast.AST) or ast_node._fields is None:
return
if isinstance(ast_node, (ast.expr, ast.stmt, ast.excepthandler)):
node_pos = (ast_node.lineno, ast_node.col_offset)
self.assertTrue(node_pos >= parent_pos)
parent_pos = (ast_node.lineno, ast_node.col_offset)
for name in ast_node._fields:
value = getattr(ast_node, name)
if isinstance(value, list):
for child in value:
self._assertTrueorder(child, parent_pos)
elif value is not None:
self._assertTrueorder(value, parent_pos)
def test_AST_objects(self):
x = ast.AST()
self.assertEqual(x._fields, ())
with self.assertRaises(AttributeError):
x.vararg
with self.assertRaises(AttributeError):
x.foobar = 21
with self.assertRaises(AttributeError):
ast.AST(lineno=2)
with self.assertRaises(TypeError):
# "_ast.AST constructor takes 0 positional arguments"
ast.AST(2)
def test_snippets(self):
for input, output, kind in ((exec_tests, exec_results, "exec"),
(single_tests, single_results, "single"),
(eval_tests, eval_results, "eval")):
for i, o in itertools.izip(input, output):
ast_tree = compile(i, "?", kind, ast.PyCF_ONLY_AST)
self.assertEqual(to_tuple(ast_tree), o)
self._assertTrueorder(ast_tree, (0, 0))
def test_slice(self):
slc = ast.parse("x[::]").body[0].value.slice
self.assertIsNone(slc.upper)
self.assertIsNone(slc.lower)
self.assertIsInstance(slc.step, ast.Name)
self.assertEqual(slc.step.id, "None")
def test_from_import(self):
im = ast.parse("from . import y").body[0]
self.assertIsNone(im.module)
def test_non_interned_future_from_ast(self):
mod = ast.parse("from __future__ import division")
self.assertIsInstance(mod.body[0], ast.ImportFrom)
mod.body[0].module = " __future__ ".strip()
compile(mod, "<test>", "exec")
def test_base_classes(self):
self.assertTrue(issubclass(ast.For, ast.stmt))
self.assertTrue(issubclass(ast.Name, ast.expr))
self.assertTrue(issubclass(ast.stmt, ast.AST))
self.assertTrue(issubclass(ast.expr, ast.AST))
self.assertTrue(issubclass(ast.comprehension, ast.AST))
self.assertTrue(issubclass(ast.Gt, ast.AST))
def test_field_attr_existence(self):
for name, item in ast.__dict__.iteritems():
if isinstance(item, type) and name != 'AST' and name[0].isupper():
x = item()
if isinstance(x, ast.AST):
self.assertEqual(type(x._fields), tuple)
def test_arguments(self):
x = ast.arguments()
self.assertEqual(x._fields, ('args', 'vararg', 'kwarg', 'defaults'))
with self.assertRaises(AttributeError):
x.vararg
x = ast.arguments(1, 2, 3, 4)
self.assertEqual(x.vararg, 2)
def test_field_attr_writable(self):
x = ast.Num()
# We can assign to _fields
x._fields = 666
self.assertEqual(x._fields, 666)
def test_classattrs(self):
x = ast.Num()
self.assertEqual(x._fields, ('n',))
with self.assertRaises(AttributeError):
x.n
x = ast.Num(42)
self.assertEqual(x.n, 42)
with self.assertRaises(AttributeError):
x.lineno
with self.assertRaises(AttributeError):
x.foobar
x = ast.Num(lineno=2)
self.assertEqual(x.lineno, 2)
x = ast.Num(42, lineno=0)
self.assertEqual(x.lineno, 0)
self.assertEqual(x._fields, ('n',))
self.assertEqual(x.n, 42)
self.assertRaises(TypeError, ast.Num, 1, 2)
self.assertRaises(TypeError, ast.Num, 1, 2, lineno=0)
def test_module(self):
body = [ast.Num(42)]
x = ast.Module(body)
self.assertEqual(x.body, body)
def test_nodeclasses(self):
# Zero arguments constructor explicitely allowed
x = ast.BinOp()
self.assertEqual(x._fields, ('left', 'op', 'right'))
# Random attribute allowed too
x.foobarbaz = 5
self.assertEqual(x.foobarbaz, 5)
n1 = ast.Num(1)
n3 = ast.Num(3)
addop = ast.Add()
x = ast.BinOp(n1, addop, n3)
self.assertEqual(x.left, n1)
self.assertEqual(x.op, addop)
self.assertEqual(x.right, n3)
x = ast.BinOp(1, 2, 3)
self.assertEqual(x.left, 1)
self.assertEqual(x.op, 2)
self.assertEqual(x.right, 3)
x = ast.BinOp(1, 2, 3, lineno=0)
self.assertEqual(x.left, 1)
self.assertEqual(x.op, 2)
self.assertEqual(x.right, 3)
self.assertEqual(x.lineno, 0)
# node raises exception when not given enough arguments
self.assertRaises(TypeError, ast.BinOp, 1, 2)
# node raises exception when given too many arguments
self.assertRaises(TypeError, ast.BinOp, 1, 2, 3, 4)
# node raises exception when not given enough arguments
self.assertRaises(TypeError, ast.BinOp, 1, 2, lineno=0)
# node raises exception when given too many arguments
self.assertRaises(TypeError, ast.BinOp, 1, 2, 3, 4, lineno=0)
# can set attributes through kwargs too
x = ast.BinOp(left=1, op=2, right=3, lineno=0)
self.assertEqual(x.left, 1)
self.assertEqual(x.op, 2)
self.assertEqual(x.right, 3)
self.assertEqual(x.lineno, 0)
# Random kwargs also allowed
x = ast.BinOp(1, 2, 3, foobarbaz=42)
self.assertEqual(x.foobarbaz, 42)
def test_no_fields(self):
# this used to fail because Sub._fields was None
x = ast.Sub()
self.assertEqual(x._fields, ())
def test_pickling(self):
import pickle
mods = [pickle]
try:
import cPickle
mods.append(cPickle)
except ImportError:
pass
protocols = [0, 1, 2]
for mod in mods:
for protocol in protocols:
for ast in (compile(i, "?", "exec", 0x400) for i in exec_tests):
ast2 = mod.loads(mod.dumps(ast, protocol))
self.assertEqual(to_tuple(ast2), to_tuple(ast))
def test_invalid_identitifer(self):
m = ast.Module([ast.Expr(ast.Name(u"x", ast.Load()))])
ast.fix_missing_locations(m)
with self.assertRaises(TypeError) as cm:
compile(m, "<test>", "exec")
self.assertIn("identifier must be of type str", str(cm.exception))
def test_invalid_string(self):
m = ast.Module([ast.Expr(ast.Str(43))])
ast.fix_missing_locations(m)
with self.assertRaises(TypeError) as cm:
compile(m, "<test>", "exec")
self.assertIn("string must be of type str or uni", str(cm.exception))
class ASTHelpers_Test(unittest.TestCase):
def test_parse(self):
a = ast.parse('foo(1 + 1)')
b = compile('foo(1 + 1)', '<unknown>', 'exec', ast.PyCF_ONLY_AST)
self.assertEqual(ast.dump(a), ast.dump(b))
def test_dump(self):
node = ast.parse('spam(eggs, "and cheese")')
self.assertEqual(ast.dump(node),
"Module(body=[Expr(value=Call(func=Name(id='spam', ctx=Load()), "
"args=[Name(id='eggs', ctx=Load()), Str(s='and cheese')], "
"keywords=[], starargs=None, kwargs=None))])"
)
self.assertEqual(ast.dump(node, annotate_fields=False),
"Module([Expr(Call(Name('spam', Load()), [Name('eggs', Load()), "
"Str('and cheese')], [], None, None))])"
)
self.assertEqual(ast.dump(node, include_attributes=True),
"Module(body=[Expr(value=Call(func=Name(id='spam', ctx=Load(), "
"lineno=1, col_offset=0), args=[Name(id='eggs', ctx=Load(), "
"lineno=1, col_offset=5), Str(s='and cheese', lineno=1, "
"col_offset=11)], keywords=[], starargs=None, kwargs=None, "
"lineno=1, col_offset=0), lineno=1, col_offset=0)])"
)
def test_copy_location(self):
src = ast.parse('1 + 1', mode='eval')
src.body.right = ast.copy_location(ast.Num(2), src.body.right)
self.assertEqual(ast.dump(src, include_attributes=True),
'Expression(body=BinOp(left=Num(n=1, lineno=1, col_offset=0), '
'op=Add(), right=Num(n=2, lineno=1, col_offset=4), lineno=1, '
'col_offset=0))'
)
def test_fix_missing_locations(self):
src = ast.parse('write("spam")')
src.body.append(ast.Expr(ast.Call(ast.Name('spam', ast.Load()),
[ast.Str('eggs')], [], None, None)))
self.assertEqual(src, ast.fix_missing_locations(src))
self.assertEqual(ast.dump(src, include_attributes=True),
"Module(body=[Expr(value=Call(func=Name(id='write', ctx=Load(), "
"lineno=1, col_offset=0), args=[Str(s='spam', lineno=1, "
"col_offset=6)], keywords=[], starargs=None, kwargs=None, "
"lineno=1, col_offset=0), lineno=1, col_offset=0), "
"Expr(value=Call(func=Name(id='spam', ctx=Load(), lineno=1, "
"col_offset=0), args=[Str(s='eggs', lineno=1, col_offset=0)], "
"keywords=[], starargs=None, kwargs=None, lineno=1, "
"col_offset=0), lineno=1, col_offset=0)])"
)
def test_increment_lineno(self):
src = ast.parse('1 + 1', mode='eval')
self.assertEqual(ast.increment_lineno(src, n=3), src)
self.assertEqual(ast.dump(src, include_attributes=True),
'Expression(body=BinOp(left=Num(n=1, lineno=4, col_offset=0), '
'op=Add(), right=Num(n=1, lineno=4, col_offset=4), lineno=4, '
'col_offset=0))'
)
# issue10869: do not increment lineno of root twice
src = ast.parse('1 + 1', mode='eval')
self.assertEqual(ast.increment_lineno(src.body, n=3), src.body)
self.assertEqual(ast.dump(src, include_attributes=True),
'Expression(body=BinOp(left=Num(n=1, lineno=4, col_offset=0), '
'op=Add(), right=Num(n=1, lineno=4, col_offset=4), lineno=4, '
'col_offset=0))'
)
def test_iter_fields(self):
node = ast.parse('foo()', mode='eval')
d = dict(ast.iter_fields(node.body))
self.assertEqual(d.pop('func').id, 'foo')
self.assertEqual(d, {'keywords': [], 'kwargs': None,
'args': [], 'starargs': None})
def test_iter_child_nodes(self):
node = ast.parse("spam(23, 42, eggs='leek')", mode='eval')
self.assertEqual(len(list(ast.iter_child_nodes(node.body))), 4)
iterator = ast.iter_child_nodes(node.body)
self.assertEqual(next(iterator).id, 'spam')
self.assertEqual(next(iterator).n, 23)
self.assertEqual(next(iterator).n, 42)
self.assertEqual(ast.dump(next(iterator)),
"keyword(arg='eggs', value=Str(s='leek'))"
)
def test_get_docstring(self):
node = ast.parse('def foo():\n """line one\n line two"""')
self.assertEqual(ast.get_docstring(node.body[0]),
'line one\nline two')
def test_literal_eval(self):
self.assertEqual(ast.literal_eval('[1, 2, 3]'), [1, 2, 3])
self.assertEqual(ast.literal_eval('{"foo": 42}'), {"foo": 42})
self.assertEqual(ast.literal_eval('(True, False, None)'), (True, False, None))
self.assertRaises(ValueError, ast.literal_eval, 'foo()')
def test_literal_eval_issue4907(self):
self.assertEqual(ast.literal_eval('2j'), 2j)
self.assertEqual(ast.literal_eval('10 + 2j'), 10 + 2j)
self.assertEqual(ast.literal_eval('1.5 - 2j'), 1.5 - 2j)
self.assertRaises(ValueError, ast.literal_eval, '2 + (3 + 4j)')
def test_main():
with test_support.check_py3k_warnings(("backquote not supported",
SyntaxWarning)):
test_support.run_unittest(AST_Tests, ASTHelpers_Test)
def main():
if __name__ != '__main__':
return
if sys.argv[1:] == ['-g']:
for statements, kind in ((exec_tests, "exec"), (single_tests, "single"),
(eval_tests, "eval")):
print kind+"_results = ["
for s in statements:
print repr(to_tuple(compile(s, "?", kind, 0x400)))+","
print "]"
print "main()"
raise SystemExit
test_main()
#### EVERYTHING BELOW IS GENERATED #####
exec_results = [
('Module', [('Expr', (1, 0), ('Name', (1, 0), 'None', ('Load',)))]),
('Module', [('FunctionDef', (1, 0), 'f', ('arguments', [], None, None, []), [('Pass', (1, 9))], [])]),
('Module', [('FunctionDef', (1, 0), 'f', ('arguments', [('Name', (1, 6), 'a', ('Param',))], None, None, []), [('Pass', (1, 10))], [])]),
('Module', [('FunctionDef', (1, 0), 'f', ('arguments', [('Name', (1, 6), 'a', ('Param',))], None, None, [('Num', (1, 8), 0)]), [('Pass', (1, 12))], [])]),
('Module', [('FunctionDef', (1, 0), 'f', ('arguments', [], 'args', None, []), [('Pass', (1, 14))], [])]),
('Module', [('FunctionDef', (1, 0), 'f', ('arguments', [], None, 'kwargs', []), [('Pass', (1, 17))], [])]),
('Module', [('FunctionDef', (1, 0), 'f', ('arguments', [('Name', (1, 6), 'a', ('Param',)), ('Name', (1, 9), 'b', ('Param',)), ('Name', (1, 14), 'c', ('Param',)), ('Name', (1, 22), 'd', ('Param',)), ('Name', (1, 28), 'e', ('Param',))], 'args', 'kwargs', [('Num', (1, 11), 1), ('Name', (1, 16), 'None', ('Load',)), ('List', (1, 24), [], ('Load',)), ('Dict', (1, 30), [], [])]), [('Pass', (1, 52))], [])]),
('Module', [('ClassDef', (1, 0), 'C', [], [('Pass', (1, 8))], [])]),
('Module', [('ClassDef', (1, 0), 'C', [('Name', (1, 8), 'object', ('Load',))], [('Pass', (1, 17))], [])]),
('Module', [('FunctionDef', (1, 0), 'f', ('arguments', [], None, None, []), [('Return', (1, 8), ('Num', (1, 15), 1))], [])]),
('Module', [('Delete', (1, 0), [('Name', (1, 4), 'v', ('Del',))])]),
('Module', [('Assign', (1, 0), [('Name', (1, 0), 'v', ('Store',))], ('Num', (1, 4), 1))]),
('Module', [('AugAssign', (1, 0), ('Name', (1, 0), 'v', ('Store',)), ('Add',), ('Num', (1, 5), 1))]),
('Module', [('Print', (1, 0), ('Name', (1, 8), 'f', ('Load',)), [('Num', (1, 11), 1)], False)]),
('Module', [('For', (1, 0), ('Name', (1, 4), 'v', ('Store',)), ('Name', (1, 9), 'v', ('Load',)), [('Pass', (1, 11))], [])]),
('Module', [('While', (1, 0), ('Name', (1, 6), 'v', ('Load',)), [('Pass', (1, 8))], [])]),
('Module', [('If', (1, 0), ('Name', (1, 3), 'v', ('Load',)), [('Pass', (1, 5))], [])]),
('Module', [('Raise', (1, 0), ('Name', (1, 6), 'Exception', ('Load',)), ('Str', (1, 17), 'string'), None)]),
('Module', [('TryExcept', (1, 0), [('Pass', (2, 2))], [('ExceptHandler', (3, 0), ('Name', (3, 7), 'Exception', ('Load',)), None, [('Pass', (4, 2))])], [])]),
('Module', [('TryFinally', (1, 0), [('Pass', (2, 2))], [('Pass', (4, 2))])]),
('Module', [('Assert', (1, 0), ('Name', (1, 7), 'v', ('Load',)), None)]),
('Module', [('Import', (1, 0), [('alias', 'sys', None)])]),
('Module', [('ImportFrom', (1, 0), 'sys', [('alias', 'v', None)], 0)]),
('Module', [('Exec', (1, 0), ('Str', (1, 5), 'v'), None, None)]),
('Module', [('Global', (1, 0), ['v'])]),
('Module', [('Expr', (1, 0), ('Num', (1, 0), 1))]),
('Module', [('Pass', (1, 0))]),
('Module', [('Break', (1, 0))]),
('Module', [('Continue', (1, 0))]),
('Module', [('For', (1, 0), ('Tuple', (1, 4), [('Name', (1, 4), 'a', ('Store',)), ('Name', (1, 6), 'b', ('Store',))], ('Store',)), ('Name', (1, 11), 'c', ('Load',)), [('Pass', (1, 14))], [])]),
('Module', [('Expr', (1, 0), ('ListComp', (1, 1), ('Tuple', (1, 2), [('Name', (1, 2), 'a', ('Load',)), ('Name', (1, 4), 'b', ('Load',))], ('Load',)), [('comprehension', ('Tuple', (1, 11), [('Name', (1, 11), 'a', ('Store',)), ('Name', (1, 13), 'b', ('Store',))], ('Store',)), ('Name', (1, 18), 'c', ('Load',)), [])]))]),
('Module', [('Expr', (1, 0), ('GeneratorExp', (1, 1), ('Tuple', (1, 2), [('Name', (1, 2), 'a', ('Load',)), ('Name', (1, 4), 'b', ('Load',))], ('Load',)), [('comprehension', ('Tuple', (1, 11), [('Name', (1, 11), 'a', ('Store',)), ('Name', (1, 13), 'b', ('Store',))], ('Store',)), ('Name', (1, 18), 'c', ('Load',)), [])]))]),
('Module', [('Expr', (1, 0), ('GeneratorExp', (1, 1), ('Tuple', (1, 2), [('Name', (1, 2), 'a', ('Load',)), ('Name', (1, 4), 'b', ('Load',))], ('Load',)), [('comprehension', ('Tuple', (1, 12), [('Name', (1, 12), 'a', ('Store',)), ('Name', (1, 14), 'b', ('Store',))], ('Store',)), ('Name', (1, 20), 'c', ('Load',)), [])]))]),
('Module', [('Expr', (1, 0), ('GeneratorExp', (2, 4), ('Tuple', (3, 4), [('Name', (3, 4), 'Aa', ('Load',)), ('Name', (5, 7), 'Bb', ('Load',))], ('Load',)), [('comprehension', ('Tuple', (8, 4), [('Name', (8, 4), 'Aa', ('Store',)), ('Name', (10, 4), 'Bb', ('Store',))], ('Store',)), ('Name', (10, 10), 'Cc', ('Load',)), [])]))]),
('Module', [('Expr', (1, 0), ('DictComp', (1, 1), ('Name', (1, 1), 'a', ('Load',)), ('Name', (1, 5), 'b', ('Load',)), [('comprehension', ('Name', (1, 11), 'w', ('Store',)), ('Name', (1, 16), 'x', ('Load',)), []), ('comprehension', ('Name', (1, 22), 'm', ('Store',)), ('Name', (1, 27), 'p', ('Load',)), [('Name', (1, 32), 'g', ('Load',))])]))]),
('Module', [('Expr', (1, 0), ('DictComp', (1, 1), ('Name', (1, 1), 'a', ('Load',)), ('Name', (1, 5), 'b', ('Load',)), [('comprehension', ('Tuple', (1, 11), [('Name', (1, 11), 'v', ('Store',)), ('Name', (1, 13), 'w', ('Store',))], ('Store',)), ('Name', (1, 18), 'x', ('Load',)), [])]))]),
('Module', [('Expr', (1, 0), ('SetComp', (1, 1), ('Name', (1, 1), 'r', ('Load',)), [('comprehension', ('Name', (1, 7), 'l', ('Store',)), ('Name', (1, 12), 'x', ('Load',)), [('Name', (1, 17), 'g', ('Load',))])]))]),
('Module', [('Expr', (1, 0), ('SetComp', (1, 1), ('Name', (1, 1), 'r', ('Load',)), [('comprehension', ('Tuple', (1, 7), [('Name', (1, 7), 'l', ('Store',)), ('Name', (1, 9), 'm', ('Store',))], ('Store',)), ('Name', (1, 14), 'x', ('Load',)), [])]))]),
]
single_results = [
('Interactive', [('Expr', (1, 0), ('BinOp', (1, 0), ('Num', (1, 0), 1), ('Add',), ('Num', (1, 2), 2)))]),
]
eval_results = [
('Expression', ('Name', (1, 0), 'None', ('Load',))),
('Expression', ('BoolOp', (1, 0), ('And',), [('Name', (1, 0), 'a', ('Load',)), ('Name', (1, 6), 'b', ('Load',))])),
('Expression', ('BinOp', (1, 0), ('Name', (1, 0), 'a', ('Load',)), ('Add',), ('Name', (1, 4), 'b', ('Load',)))),
('Expression', ('UnaryOp', (1, 0), ('Not',), ('Name', (1, 4), 'v', ('Load',)))),
('Expression', ('Lambda', (1, 0), ('arguments', [], None, None, []), ('Name', (1, 7), 'None', ('Load',)))),
('Expression', ('Dict', (1, 0), [('Num', (1, 2), 1)], [('Num', (1, 4), 2)])),
('Expression', ('Dict', (1, 0), [], [])),
('Expression', ('Set', (1, 0), [('Name', (1, 1), 'None', ('Load',))])),
('Expression', ('Dict', (1, 0), [('Num', (2, 6), 1)], [('Num', (4, 10), 2)])),
('Expression', ('ListComp', (1, 1), ('Name', (1, 1), 'a', ('Load',)), [('comprehension', ('Name', (1, 7), 'b', ('Store',)), ('Name', (1, 12), 'c', ('Load',)), [('Name', (1, 17), 'd', ('Load',))])])),
('Expression', ('GeneratorExp', (1, 1), ('Name', (1, 1), 'a', ('Load',)), [('comprehension', ('Name', (1, 7), 'b', ('Store',)), ('Name', (1, 12), 'c', ('Load',)), [('Name', (1, 17), 'd', ('Load',))])])),
('Expression', ('Compare', (1, 0), ('Num', (1, 0), 1), [('Lt',), ('Lt',)], [('Num', (1, 4), 2), ('Num', (1, 8), 3)])),
('Expression', ('Call', (1, 0), ('Name', (1, 0), 'f', ('Load',)), [('Num', (1, 2), 1), ('Num', (1, 4), 2)], [('keyword', 'c', ('Num', (1, 8), 3))], ('Name', (1, 11), 'd', ('Load',)), ('Name', (1, 15), 'e', ('Load',)))),
('Expression', ('Repr', (1, 0), ('Name', (1, 1), 'v', ('Load',)))),
('Expression', ('Num', (1, 0), 10L)),
('Expression', ('Str', (1, 0), 'string')),
('Expression', ('Attribute', (1, 0), ('Name', (1, 0), 'a', ('Load',)), 'b', ('Load',))),
('Expression', ('Subscript', (1, 0), ('Name', (1, 0), 'a', ('Load',)), ('Slice', ('Name', (1, 2), 'b', ('Load',)), ('Name', (1, 4), 'c', ('Load',)), None), ('Load',))),
('Expression', ('Name', (1, 0), 'v', ('Load',))),
('Expression', ('List', (1, 0), [('Num', (1, 1), 1), ('Num', (1, 3), 2), ('Num', (1, 5), 3)], ('Load',))),
('Expression', ('List', (1, 0), [], ('Load',))),
('Expression', ('Tuple', (1, 0), [('Num', (1, 0), 1), ('Num', (1, 2), 2), ('Num', (1, 4), 3)], ('Load',))),
('Expression', ('Tuple', (1, 1), [('Num', (1, 1), 1), ('Num', (1, 3), 2), ('Num', (1, 5), 3)], ('Load',))),
('Expression', ('Tuple', (1, 0), [], ('Load',))),
('Expression', ('Call', (1, 0), ('Attribute', (1, 0), ('Attribute', (1, 0), ('Attribute', (1, 0), ('Name', (1, 0), 'a', ('Load',)), 'b', ('Load',)), 'c', ('Load',)), 'd', ('Load',)), [('Subscript', (1, 8), ('Attribute', (1, 8), ('Name', (1, 8), 'a', ('Load',)), 'b', ('Load',)), ('Slice', ('Num', (1, 12), 1), ('Num', (1, 14), 2), None), ('Load',))], [], None, None)),
]
main()
| gpl-2.0 |
eSpaceEPFL/marsissharadviewer | test/test_translations.py | 116 | 1741 | # coding=utf-8
"""Safe Translations Test.
.. note:: This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
"""
from utilities import get_qgis_app
__author__ = '[email protected]'
__date__ = '12/10/2011'
__copyright__ = ('Copyright 2012, Australia Indonesia Facility for '
'Disaster Reduction')
import unittest
import os
from PyQt4.QtCore import QCoreApplication, QTranslator
QGIS_APP = get_qgis_app()
class SafeTranslationsTest(unittest.TestCase):
"""Test translations work."""
def setUp(self):
"""Runs before each test."""
if 'LANG' in os.environ.iterkeys():
os.environ.__delitem__('LANG')
def tearDown(self):
"""Runs after each test."""
if 'LANG' in os.environ.iterkeys():
os.environ.__delitem__('LANG')
def test_qgis_translations(self):
"""Test that translations work."""
parent_path = os.path.join(__file__, os.path.pardir, os.path.pardir)
dir_path = os.path.abspath(parent_path)
file_path = os.path.join(
dir_path, 'i18n', 'af.qm')
translator = QTranslator()
translator.load(file_path)
QCoreApplication.installTranslator(translator)
expected_message = 'Goeie more'
real_message = QCoreApplication.translate("@default", 'Good morning')
self.assertEqual(real_message, expected_message)
if __name__ == "__main__":
suite = unittest.makeSuite(SafeTranslationsTest)
runner = unittest.TextTestRunner(verbosity=2)
runner.run(suite)
| gpl-2.0 |
cvmfs-testing/cvmfs | add-ons/tools/legacy/downloadCatalogs.py | 12 | 5491 | #!/usr/bin/python
from urllib2 import urlopen, URLError, HTTPError
import sys
import os
import zlib
import shutil
import tempfile
from optparse import OptionParser
foundSqlite = False
foundSqlite3 = False
# figure out which sqlite module to use
# in Python 2.4 an old version is present
# which does not allow proper read out of
# long int and therefore cannot merge catalogs
try:
import sqlite3 as sqlite
foundSqlite3 = True
except:
pass
if not foundSqlite3:
try:
import sqlite
foundSqlite = True
except ImportError, e:
pass
def doHttpRequest(url):
response = urlopen(url)
return response.read()
def getRootCatalogName(repositoryUrl):
result = ""
try:
result = doHttpRequest(repositoryUrl + "/.cvmfspublished")
except:
printError("cannot load .cvmfspublished")
lines = result.split('\n')
if len(lines) < 1:
printError(".cvmfspublished is malformed")
return lines[0][1:]
def createDirectory(directory):
d = os.path.dirname(directory)
if not os.path.exists(d):
os.makedirs(d)
def getCatalogFilePath(catalogName, catalogDirectory):
return catalogDirectory + "/" + catalogName[0:2] + "/" + catalogName[2:] + "C"
def downloadCatalog(repositoryUrl, catalogName, catalogDirectory, beVerbose):
# find out some pathes and init the zlib decompressor
subdir = catalogName[0:2]
filename = catalogName[2:] + "C"
url = repositoryUrl + "/data/" + subdir + "/" + filename
destDir = catalogDirectory + "/" + subdir + "/"
dest = destDir + filename
#decoder = zlib.decompressobj()
# create target directory if not existing and open output file
createDirectory(destDir)
outputFile = open(dest, 'wb')
# download the catalog
try:
f = urlopen(url)
meta = f.info()
fileSize = int(meta.getheaders("Content-Length")[0])
if beVerbose:
print "retrieving " + catalogName + " - " + str(fileSize) + " bytes"
with open(dest, "wb") as local_file:
local_file.write(f.read())
except HTTPError, e:
printError("HTTP: " + e.code + url)
except URLError, e:
printError("URL:" + e.reason + url)
def decompressCatalog(filename, destination):
str_object1 = open(filename, 'rb').read()
str_object2 = zlib.decompress(str_object1)
f = open(destination, 'wb')
f.write(str_object2)
f.close()
def findNestedCatalogs(catalogName, catalogDirectory, getHistory):
catalogFile = getCatalogFilePath(catalogName, catalogDirectory)
tempFile = tempfile.NamedTemporaryFile('wb')
decompressCatalog(catalogFile, tempFile.name)
dbHandle = sqlite.connect(tempFile.name)
cursor = dbHandle.cursor()
catalogs = []
# nested catalog references
cursor.execute("SELECT sha1 FROM nested_catalogs")
result = cursor.fetchall()
for catalog in result:
catalogs.append(catalog[0])
# history references
if getHistory:
cursor.execute("SELECT value FROM properties WHERE key = 'previous_revision' LIMIT 1")
result = cursor.fetchall()
if result:
catalogs.append(result[0][0])
dbHandle.close()
tempFile.close()
return catalogs
def retrieveCatalogsRecursively(repositoryUrl, catalogName, catalogDirectory, beVerbose, getHistory):
catalogs = [catalogName]
downloads = 0
while catalogs:
catalog = catalogs.pop(0)
if os.path.exists(getCatalogFilePath(catalog, catalogDirectory)):
if beVerbose:
print "--> skipping already loaded catalog:" , catalog
continue
downloadCatalog(repositoryUrl, catalog, catalogDirectory, beVerbose)
nestedCatalogs = findNestedCatalogs(catalog, catalogDirectory, getHistory)
downloads += 1
if beVerbose:
print "--> found" , len(nestedCatalogs) , "catalog references |" , len(catalogs) , "in queue"
catalogs.extend(nestedCatalogs)
return downloads
def main():
usage = "usage: %prog [options] <repository url>\nThis script walks through all nested catalogs of a repository and\ndownloads these catalogs to the given destination directory\nTake care: the catalogs are saved uncompressed, so do not use cvmfs_zpipe"
parser = OptionParser(usage)
parser.add_option("-d", "--directory", dest="catalogDirectory", default="catalogs", help="the directory to download catalogs to")
parser.add_option("-m", "--merge", metavar="FILE", dest="mergeToFile", help="merge all catalogs into one given file")
parser.add_option("-q", "--quiet", action="store_false", dest="verbose", default=True, help="don't print status messages to stdout")
parser.add_option("-l", "--history", action="store_true", dest="history", default=False, help="download the catalog history")
(options, args) = parser.parse_args()
if len(args) != 1:
parser.error("Please provide the repository url as argument")
# read command line arguments
repositoryUrl = args[0]
catalogDirectory = options.catalogDirectory
merge = options.mergeToFile
verbose = options.verbose
history = options.history
# check option consistency
if os.path.exists(catalogDirectory) and os.listdir(catalogDirectory) != []:
printError("Directory '" + catalogDirectory + "' exists and is not empty")
if merge and foundSqlite and not foundSqlite3:
printError("unfortunately merging is not possible with your version of the python sqlite module")
# do the job
rootCatalog = getRootCatalogName(repositoryUrl)
numCatalogs = retrieveCatalogsRecursively(repositoryUrl, rootCatalog, catalogDirectory, verbose, history)
print "downloaded" , numCatalogs , "catalogs"
if merge:
mergeCatalogs(rootCatalog, catalogs, catalogDirectory, merge, verbose)
def printError(errorMessage):
print "[ERROR] " + errorMessage
sys.exit(1)
main()
| bsd-3-clause |
phammin1/QaManagement | QaManagement/env/Lib/site-packages/django/contrib/admin/tests.py | 229 | 6464 | import os
from unittest import SkipTest
from django.contrib.staticfiles.testing import StaticLiveServerTestCase
from django.utils.module_loading import import_string
from django.utils.translation import ugettext as _
class AdminSeleniumWebDriverTestCase(StaticLiveServerTestCase):
available_apps = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
]
webdriver_class = 'selenium.webdriver.firefox.webdriver.WebDriver'
@classmethod
def setUpClass(cls):
if not os.environ.get('DJANGO_SELENIUM_TESTS', False):
raise SkipTest('Selenium tests not requested')
try:
cls.selenium = import_string(cls.webdriver_class)()
except Exception as e:
raise SkipTest('Selenium webdriver "%s" not installed or not '
'operational: %s' % (cls.webdriver_class, str(e)))
# This has to be last to ensure that resources are cleaned up properly!
super(AdminSeleniumWebDriverTestCase, cls).setUpClass()
@classmethod
def _tearDownClassInternal(cls):
if hasattr(cls, 'selenium'):
cls.selenium.quit()
super(AdminSeleniumWebDriverTestCase, cls)._tearDownClassInternal()
def wait_until(self, callback, timeout=10):
"""
Helper function that blocks the execution of the tests until the
specified callback returns a value that is not falsy. This function can
be called, for example, after clicking a link or submitting a form.
See the other public methods that call this function for more details.
"""
from selenium.webdriver.support.wait import WebDriverWait
WebDriverWait(self.selenium, timeout).until(callback)
def wait_for_popup(self, num_windows=2, timeout=10):
"""
Block until `num_windows` are present (usually 2, but can be
overridden in the case of pop-ups opening other pop-ups).
"""
self.wait_until(lambda d: len(d.window_handles) == num_windows, timeout)
def wait_loaded_tag(self, tag_name, timeout=10):
"""
Helper function that blocks until the element with the given tag name
is found on the page.
"""
self.wait_for(tag_name, timeout)
def wait_for(self, css_selector, timeout=10):
"""
Helper function that blocks until a CSS selector is found on the page.
"""
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions as ec
self.wait_until(
ec.presence_of_element_located((By.CSS_SELECTOR, css_selector)),
timeout
)
def wait_for_text(self, css_selector, text, timeout=10):
"""
Helper function that blocks until the text is found in the CSS selector.
"""
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions as ec
self.wait_until(
ec.text_to_be_present_in_element(
(By.CSS_SELECTOR, css_selector), text),
timeout
)
def wait_for_value(self, css_selector, text, timeout=10):
"""
Helper function that blocks until the value is found in the CSS selector.
"""
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions as ec
self.wait_until(
ec.text_to_be_present_in_element_value(
(By.CSS_SELECTOR, css_selector), text),
timeout
)
def wait_page_loaded(self):
"""
Block until page has started to load.
"""
from selenium.common.exceptions import TimeoutException
try:
# Wait for the next page to be loaded
self.wait_loaded_tag('body')
except TimeoutException:
# IE7 occasionally returns an error "Internet Explorer cannot
# display the webpage" and doesn't load the next page. We just
# ignore it.
pass
def admin_login(self, username, password, login_url='/admin/'):
"""
Helper function to log into the admin.
"""
self.selenium.get('%s%s' % (self.live_server_url, login_url))
username_input = self.selenium.find_element_by_name('username')
username_input.send_keys(username)
password_input = self.selenium.find_element_by_name('password')
password_input.send_keys(password)
login_text = _('Log in')
self.selenium.find_element_by_xpath(
'//input[@value="%s"]' % login_text).click()
self.wait_page_loaded()
def get_css_value(self, selector, attribute):
"""
Helper function that returns the value for the CSS attribute of an
DOM element specified by the given selector. Uses the jQuery that ships
with Django.
"""
return self.selenium.execute_script(
'return django.jQuery("%s").css("%s")' % (selector, attribute))
def get_select_option(self, selector, value):
"""
Returns the <OPTION> with the value `value` inside the <SELECT> widget
identified by the CSS selector `selector`.
"""
from selenium.common.exceptions import NoSuchElementException
options = self.selenium.find_elements_by_css_selector('%s > option' % selector)
for option in options:
if option.get_attribute('value') == value:
return option
raise NoSuchElementException('Option "%s" not found in "%s"' % (value, selector))
def assertSelectOptions(self, selector, values):
"""
Asserts that the <SELECT> widget identified by `selector` has the
options with the given `values`.
"""
options = self.selenium.find_elements_by_css_selector('%s > option' % selector)
actual_values = []
for option in options:
actual_values.append(option.get_attribute('value'))
self.assertEqual(values, actual_values)
def has_css_class(self, selector, klass):
"""
Returns True if the element identified by `selector` has the CSS class
`klass`.
"""
return (self.selenium.find_element_by_css_selector(selector)
.get_attribute('class').find(klass) != -1)
| mit |
TathagataChakraborti/resource-conflicts | PLANROB-2015/seq-sat-lama/py2.5/lib/python2.5/encodings/ascii.py | 858 | 1248 | """ Python 'ascii' Codec
Written by Marc-Andre Lemburg ([email protected]).
(c) Copyright CNRI, All Rights Reserved. NO WARRANTY.
"""
import codecs
### Codec APIs
class Codec(codecs.Codec):
# Note: Binding these as C functions will result in the class not
# converting them to methods. This is intended.
encode = codecs.ascii_encode
decode = codecs.ascii_decode
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.ascii_encode(input, self.errors)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.ascii_decode(input, self.errors)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
class StreamConverter(StreamWriter,StreamReader):
encode = codecs.ascii_decode
decode = codecs.ascii_encode
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='ascii',
encode=Codec.encode,
decode=Codec.decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamwriter=StreamWriter,
streamreader=StreamReader,
)
| mit |
AndyLavr/htc_kernel_oxp | arch/ia64/scripts/unwcheck.py | 13143 | 1714 | #!/usr/bin/python
#
# Usage: unwcheck.py FILE
#
# This script checks the unwind info of each function in file FILE
# and verifies that the sum of the region-lengths matches the total
# length of the function.
#
# Based on a shell/awk script originally written by Harish Patil,
# which was converted to Perl by Matthew Chapman, which was converted
# to Python by David Mosberger.
#
import os
import re
import sys
if len(sys.argv) != 2:
print "Usage: %s FILE" % sys.argv[0]
sys.exit(2)
readelf = os.getenv("READELF", "readelf")
start_pattern = re.compile("<([^>]*)>: \[0x([0-9a-f]+)-0x([0-9a-f]+)\]")
rlen_pattern = re.compile(".*rlen=([0-9]+)")
def check_func (func, slots, rlen_sum):
if slots != rlen_sum:
global num_errors
num_errors += 1
if not func: func = "[%#x-%#x]" % (start, end)
print "ERROR: %s: %lu slots, total region length = %lu" % (func, slots, rlen_sum)
return
num_funcs = 0
num_errors = 0
func = False
slots = 0
rlen_sum = 0
for line in os.popen("%s -u %s" % (readelf, sys.argv[1])):
m = start_pattern.match(line)
if m:
check_func(func, slots, rlen_sum)
func = m.group(1)
start = long(m.group(2), 16)
end = long(m.group(3), 16)
slots = 3 * (end - start) / 16
rlen_sum = 0L
num_funcs += 1
else:
m = rlen_pattern.match(line)
if m:
rlen_sum += long(m.group(1))
check_func(func, slots, rlen_sum)
if num_errors == 0:
print "No errors detected in %u functions." % num_funcs
else:
if num_errors > 1:
err="errors"
else:
err="error"
print "%u %s detected in %u functions." % (num_errors, err, num_funcs)
sys.exit(1)
| gpl-2.0 |
asoc/snakewatch | snakewatch/main.py | 1 | 7734 | """
This file is part of snakewatch.
snakewatch is free software: you can redistribute it and/or modify
it under the terms of the GNU Lesser General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
snakewatch is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public License
along with snakewatch. If not, see <http://www.gnu.org/licenses/>.
"""
from __future__ import print_function, absolute_import, unicode_literals, division
import argparse
import importlib
import logging
import os
import signal
import stat
import sys
from logging.handlers import RotatingFileHandler
from . import (
NAME, VERSION, DESCRIPTION, USER_PATH, URL, AUTHOR, AUTHOR_EMAIL,
LOG_FILE, LOG_LEVEL, LOG_BACKUP_COUNT, LOG_MAX_BYTES, LOG_FORMAT, LOG_TO_STDOUT
)
from .util import AbortError, get_read_object, config, ui_print
_logger = logging.getLogger()
_logger.setLevel(LOG_LEVEL)
_log_handler = logging.StreamHandler()
_log_handler.setFormatter(logging.Formatter(fmt=LOG_FORMAT))
_logger.addHandler(_log_handler)
parser = None
def get_logger(name):
"""Get a logging instance consistent with the main logger"""
if sys.version_info > (2, 7):
return _logger.getChild(name)
return logging.getLogger('.'.join([_logger.name, name]))
def release_action_resources():
"""Release all resources loaded by all actions"""
if config() is None:
return
for action in config().actions:
try:
action.release_resources()
except:
ui_print().error(
'Unable to release resources for action {}'.format(action.__class__.__name__),
str(action.cfg), sep='\n'
)
def main(initial_args=None, handle_signals=True):
global _log_handler, parser
if initial_args is None:
initial_args = sys.argv[1:]
log_to_file = True
if not os.path.exists(USER_PATH):
try:
os.makedirs(USER_PATH)
except OSError:
log_to_file = False
print('Unable to create snakewatch settings/log directory.',
'Please create the directory {}'.format(USER_PATH),
sep='\n', file=sys.stderr)
if not os.access(USER_PATH, os.W_OK):
try:
mode = stat.S_IWRITE
if not sys.platform == 'win':
st = os.stat(USER_PATH)
mode = mode | st.mode
os.chmod(USER_PATH, mode)
except OSError:
log_to_file = False
print('Unable to write to snakewatch settings/log directory.',
'Please set write permissions to the directory {}'.format(USER_PATH),
sep='\n', file=sys.stderr)
if log_to_file and not LOG_TO_STDOUT:
_logger.removeHandler(_log_handler)
_log_handler.close()
_log_handler = RotatingFileHandler(
filename=LOG_FILE,
maxBytes=LOG_MAX_BYTES,
backupCount=LOG_BACKUP_COUNT,
)
_log_handler.setFormatter(logging.Formatter(fmt=LOG_FORMAT))
_logger.addHandler(_log_handler)
parser = argparse.ArgumentParser(
prog=NAME,
description=DESCRIPTION,
formatter_class=argparse.RawDescriptionHelpFormatter
)
global_args = parser.add_argument_group('Global')
parser.add_argument(
'-v', '--version',
action='version',
version='\n'.join([NAME, VERSION, '', '{} <{}>'.format(AUTHOR, AUTHOR_EMAIL), URL])
)
parser_config = global_args.add_mutually_exclusive_group()
parser_config.add_argument(
'-c', '--config',
help='which configuration file to use'
)
parser_config.add_argument(
'--no-config', action='store_true',
help='don\'t use any configuration file (including the default), print everything'
)
mutp = parser.add_mutually_exclusive_group()
mutp.add_argument(
'-n', '--lines',
default=0, type=int,
help='start LINES from end of the file, use -1 to start at the beginning',
)
mutp.add_argument(
'-b', '--bytes',
default=0, type=int,
help='Seek #-bytes from the start of the file before reading.'
)
watch_loc_group = global_args.add_mutually_exclusive_group()
watch_loc_group.add_argument(
'-w', '--watch',
help='which file to watch'
)
watch_loc_group.add_argument(
'-r', '--read',
action='store_true',
help='read input from stdin'
)
# Only one mode for now, so exclude all this stuff.
# import imp
# import importlib
# from snakewatch import mode as mode_package
# suffixes = tuple([suffix[0] for suffix in imp.get_suffixes()])
# mode_names = set([
# os.path.splitext(module)[0]
# for module in os.listdir(mode_package.__path__[0])
# if module.endswith(suffixes)
# ])
# available_modes = dict()
# for mode_name in mode_names:
# if mode_name == '__init__':
# continue
# try:
# mode_module = importlib.import_module('snakewatch.mode.{}'.format(mode_name))
# except ImportError:
# _logger.exception('Could not load mode module {}'.format(mode_name))
# continue
# else:
# available_modes[mode_name] = mode_module
#
# setup_arguments = getattr(mode_module, 'setup_arguments', None)
# if setup_arguments and callable(setup_arguments):
# try:
# setup_arguments(parser)
# except:
# _logger.exception('{} mode has arguments but setup failed'.format(mode_name))
#
# if not available_modes:
# _logger.critical('No modes are available')
# return 1
# parser.add_argument(
# '-m', '--mode',
# choices=available_modes,
# default='Console',
# help='which mode to use'
# )
try:
args = parser.parse_args(initial_args)
except SystemExit:
return
args.mode = 'Console'
_logger.debug('{}\n'.format('=' * 40))
mode = importlib.import_module('.mode.Console', __package__)
handler = getattr(mode, '{}Mode'.format(args.mode), None)
if not handler or not callable(handler):
_logger.critical('{} mode structure is not valid'.format(args.mode))
sys.exit(1)
handler = handler()
if handle_signals:
if not sys.platform.startswith('win'):
signal.signal(signal.SIGHUP, handler.handle_signal)
signal.signal(signal.SIGQUIT, handler.handle_signal)
signal.signal(signal.SIGINT, handler.handle_signal)
signal.signal(signal.SIGTERM, handler.handle_signal)
signal.signal(signal.SIGABRT, handler.handle_signal)
try:
exit_code = handler.run(start_input=get_read_object(
args.read, args.watch, args.lines, args.bytes
), args=args) or 0
except AbortError as err:
exit_code = err.exit_code
except:
if LOG_LEVEL == logging.DEBUG:
raise
import traceback
exc_type, exc_value = sys.exc_info()[:2]
exc_traceback = traceback.extract_stack()
handler.fatal_error(exc_type, exc_value, exc_traceback)
return 1
finally:
release_action_resources()
_logger.debug('snakewatch exiting\n')
_log_handler.close()
return exit_code
if __name__ == '__main__':
sys.exit(main() or 0)
| bsd-3-clause |
elpaso/QGIS | tests/src/python/test_qgsshortcutsmanager.py | 43 | 17733 | # -*- coding: utf-8 -*-
"""QGIS Unit tests for QgsActionManager.
.. note:: This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
"""
__author__ = 'Nyall Dawson'
__date__ = '28/05/2016'
__copyright__ = 'Copyright 2016, The QGIS Project'
import qgis # NOQA
from qgis.core import QgsSettings
from qgis.gui import QgsShortcutsManager, QgsGui
from qgis.PyQt.QtCore import QCoreApplication
from qgis.PyQt.QtWidgets import QWidget, QAction, QShortcut
from qgis.testing import start_app, unittest
class TestQgsShortcutsManager(unittest.TestCase):
@classmethod
def setUpClass(cls):
"""Run before all tests"""
QCoreApplication.setOrganizationName("QGIS_Test")
QCoreApplication.setOrganizationDomain("QGIS_TestPyQgsWFSProviderGUI.com")
QCoreApplication.setApplicationName("QGIS_TestPyQgsWFSProviderGUI")
QgsSettings().clear()
start_app()
def testInstance(self):
""" test retrieving global instance """
self.assertTrue(QgsGui.shortcutsManager())
# register an action to the singleton
action = QAction('test', None)
QgsGui.shortcutsManager().registerAction(action)
# check that the same instance is returned
self.assertEqual(QgsGui.shortcutsManager().listActions(), [action])
s2 = QgsShortcutsManager()
self.assertEqual(s2.listActions(), [])
def testConstructor(self):
""" test constructing managers"""
s = QgsShortcutsManager(None, '/my_path/')
self.assertEqual(s.settingsPath(), '/my_path/')
def testSettingsPath(self):
""" test that settings path is respected """
QgsSettings().clear()
s1 = QgsShortcutsManager(None, '/path1/')
s2 = QgsShortcutsManager(None, '/path2/')
action1 = QAction('action', None)
s1.registerAction(action1)
s1.setKeySequence(action1, 'B')
action2 = QAction('action', None)
s2.registerAction(action2)
s2.setKeySequence(action2, 'C')
# test retrieving
r1 = QgsShortcutsManager(None, '/path1/')
r2 = QgsShortcutsManager(None, '/path2/')
raction1 = QAction('action', None)
r1.registerAction(raction1)
raction2 = QAction('action', None)
r2.registerAction(raction2)
self.assertEqual(raction1.shortcut().toString(), 'B')
self.assertEqual(raction2.shortcut().toString(), 'C')
def testRegisterAction(self):
""" test registering actions """
QgsSettings().clear()
s = QgsShortcutsManager(None)
action1 = QAction('action1', None)
action1.setShortcut('x')
self.assertTrue(s.registerAction(action1, 'A'))
action2 = QAction('action2', None)
action2.setShortcut('y')
self.assertTrue(s.registerAction(action2, 'B'))
self.assertCountEqual(s.listActions(), [action1, action2])
# try re-registering an existing action - should fail, but leave action registered
self.assertFalse(s.registerAction(action2, 'B'))
self.assertCountEqual(s.listActions(), [action1, action2])
# actions should have been set to default sequences
self.assertEqual(action1.shortcut().toString(), 'A')
self.assertEqual(action2.shortcut().toString(), 'B')
# test that adding an action should set its shortcut automatically
s.setKeySequence('action1', 'C')
s.setKeySequence('action2', 'D')
s = QgsShortcutsManager(None)
self.assertTrue(s.registerAction(action1, 'A'))
self.assertTrue(s.registerAction(action2, 'B'))
# actions should have been set to previous shortcuts
self.assertEqual(action1.shortcut().toString(), 'C')
self.assertEqual(action2.shortcut().toString(), 'D')
# test registering an action containing '&' in name
s = QgsShortcutsManager(None)
action = QAction('&action1', None)
self.assertTrue(s.registerAction(action))
self.assertEqual(action1.shortcut().toString(), 'C')
def testRegisterShortcut(self):
""" test registering shortcuts """
QgsSettings().clear()
s = QgsShortcutsManager(None)
shortcut1 = QShortcut(None)
shortcut1.setKey('x')
shortcut1.setObjectName('shortcut1')
self.assertTrue(s.registerShortcut(shortcut1, 'A'))
shortcut2 = QShortcut(None)
shortcut2.setKey('y')
shortcut2.setObjectName('shortcut2')
self.assertTrue(s.registerShortcut(shortcut2, 'B'))
# shortcuts should have been set to default sequences
self.assertEqual(shortcut1.key().toString(), 'A')
self.assertEqual(shortcut2.key().toString(), 'B')
# test that adding a shortcut should set its sequence automatically
s.setKeySequence(shortcut1, 'C')
s.setKeySequence(shortcut2, 'D')
s = QgsShortcutsManager(None)
self.assertTrue(s.registerShortcut(shortcut1, 'A'))
self.assertTrue(s.registerShortcut(shortcut2, 'B'))
# shortcuts should have been set to previous sequences
self.assertEqual(shortcut1.key().toString(), 'C')
self.assertEqual(shortcut2.key().toString(), 'D')
def testRegisterAll(self):
""" test registering all children """
w = QWidget()
action1 = QAction('action1', w)
shortcut1 = QShortcut(w)
shortcut1.setObjectName('shortcut1')
w2 = QWidget(w)
action2 = QAction('action2', w2)
shortcut2 = QShortcut(w2)
shortcut2.setObjectName('shortcut2')
# recursive
s = QgsShortcutsManager()
s.registerAllChildActions(w, True)
self.assertEqual(set(s.listActions()), set([action1, action2]))
s.registerAllChildShortcuts(w, True)
self.assertEqual(set(s.listShortcuts()), set([shortcut1, shortcut2]))
# non recursive
s = QgsShortcutsManager()
s.registerAllChildActions(w, False)
self.assertEqual(set(s.listActions()), set([action1]))
s.registerAllChildShortcuts(w, False)
self.assertEqual(set(s.listShortcuts()), set([shortcut1]))
# recursive
s = QgsShortcutsManager()
s.registerAllChildren(w, True)
self.assertEqual(set(s.listActions()), set([action1, action2]))
self.assertEqual(set(s.listShortcuts()), set([shortcut1, shortcut2]))
# non recursive
s = QgsShortcutsManager()
s.registerAllChildren(w, False)
self.assertEqual(set(s.listActions()), set([action1]))
self.assertEqual(set(s.listShortcuts()), set([shortcut1]))
def testUnregister(self):
""" test unregistering from manager """
QgsSettings().clear()
s = QgsShortcutsManager(None)
shortcut1 = QShortcut(None)
shortcut1.setKey('x')
shortcut1.setObjectName('shortcut1')
shortcut2 = QShortcut(None)
shortcut2.setKey('y')
shortcut2.setObjectName('shortcut2')
action1 = QAction('action1', None)
action1.setShortcut('x')
action2 = QAction('action2', None)
action2.setShortcut('y')
# try unregistering objects not registered in manager
self.assertFalse(s.unregisterShortcut(shortcut1))
self.assertFalse(s.unregisterAction(action1))
# try unregistering objects from manager
s.registerShortcut(shortcut1)
s.registerShortcut(shortcut2)
s.registerAction(action1)
s.registerAction(action2)
self.assertEqual(set(s.listActions()), set([action1, action2]))
self.assertEqual(set(s.listShortcuts()), set([shortcut1, shortcut2]))
self.assertTrue(s.unregisterAction(action1))
self.assertTrue(s.unregisterShortcut(shortcut1))
self.assertEqual(set(s.listActions()), set([action2]))
self.assertEqual(set(s.listShortcuts()), set([shortcut2]))
self.assertTrue(s.unregisterAction(action2))
self.assertTrue(s.unregisterShortcut(shortcut2))
def testList(self):
""" test listing registered objects """
QgsSettings().clear()
s = QgsShortcutsManager(None)
self.assertEqual(s.listActions(), [])
self.assertEqual(s.listShortcuts(), [])
self.assertEqual(s.listAll(), [])
shortcut1 = QShortcut(None)
shortcut2 = QShortcut(None)
action1 = QAction('action1', None)
action2 = QAction('action2', None)
s.registerShortcut(shortcut1)
s.registerShortcut(shortcut2)
s.registerAction(action1)
s.registerAction(action2)
self.assertEqual(set(s.listActions()), set([action1, action2]))
self.assertEqual(set(s.listShortcuts()), set([shortcut1, shortcut2]))
self.assertEqual(set(s.listAll()), set([action1, action2, shortcut1, shortcut2]))
def testDefault(self):
""" test retrieving default sequences """
QgsSettings().clear()
s = QgsShortcutsManager(None)
shortcut1 = QShortcut(None)
shortcut2 = QShortcut(None)
action1 = QAction('action1', None)
action2 = QAction('action2', None)
# test while not yet registered
self.assertEqual(s.defaultKeySequence(shortcut1), '')
self.assertEqual(s.defaultKeySequence(action1), '')
self.assertEqual(s.objectDefaultKeySequence(shortcut1), '')
self.assertEqual(s.objectDefaultKeySequence(action1), '')
# now register them
s.registerShortcut(shortcut1, 'A')
s.registerShortcut(shortcut2, 'B')
s.registerAction(action1, 'C')
s.registerAction(action2, 'D')
self.assertEqual(s.defaultKeySequence(shortcut1), 'A')
self.assertEqual(s.defaultKeySequence(shortcut2), 'B')
self.assertEqual(s.defaultKeySequence(action1), 'C')
self.assertEqual(s.defaultKeySequence(action2), 'D')
self.assertEqual(s.objectDefaultKeySequence(shortcut1), 'A')
self.assertEqual(s.objectDefaultKeySequence(shortcut2), 'B')
self.assertEqual(s.objectDefaultKeySequence(action1), 'C')
self.assertEqual(s.objectDefaultKeySequence(action2), 'D')
def testSetSequence(self):
""" test setting key sequences """
QgsSettings().clear()
s = QgsShortcutsManager(None)
shortcut1 = QShortcut(None)
shortcut1.setObjectName('shortcut1')
shortcut2 = QShortcut(None)
shortcut2.setObjectName('shortcut2')
action1 = QAction('action1', None)
action2 = QAction('action2', None)
s.registerShortcut(shortcut1, 'A')
s.registerShortcut(shortcut2, 'B')
s.registerAction(action1, 'C')
s.registerAction(action2, 'D')
# test setting by action/shortcut
self.assertTrue(s.setKeySequence(shortcut1, 'E'))
self.assertTrue(s.setKeySequence(shortcut2, 'F'))
self.assertTrue(s.setKeySequence(action1, 'G'))
self.assertTrue(s.setKeySequence(action2, 'H'))
# test that action/shortcuts have been updated
self.assertEqual(shortcut1.key().toString(), 'E')
self.assertEqual(shortcut2.key().toString(), 'F')
self.assertEqual(action1.shortcut().toString(), 'G')
self.assertEqual(action2.shortcut().toString(), 'H')
# new manager
s = QgsShortcutsManager(None)
# new shortcuts
shortcut1 = QShortcut(None)
shortcut1.setObjectName('shortcut1')
shortcut2 = QShortcut(None)
shortcut2.setObjectName('shortcut2')
action1 = QAction('action1', None)
action2 = QAction('action2', None)
# register them
s.registerShortcut(shortcut1, 'A')
s.registerShortcut(shortcut2, 'B')
s.registerAction(action1, 'C')
s.registerAction(action2, 'D')
# check that previously set sequence has been restored
self.assertEqual(shortcut1.key().toString(), 'E')
self.assertEqual(shortcut2.key().toString(), 'F')
self.assertEqual(action1.shortcut().toString(), 'G')
self.assertEqual(action2.shortcut().toString(), 'H')
# same test, using setObjectKeySequence
QgsSettings().clear()
s = QgsShortcutsManager(None)
shortcut1 = QShortcut(None)
shortcut1.setObjectName('shortcut1')
action1 = QAction('action1', None)
s.registerShortcut(shortcut1, 'A')
s.registerAction(action1, 'C')
self.assertTrue(s.setObjectKeySequence(shortcut1, 'E'))
self.assertTrue(s.setObjectKeySequence(action1, 'G'))
self.assertEqual(shortcut1.key().toString(), 'E')
self.assertEqual(action1.shortcut().toString(), 'G')
s = QgsShortcutsManager(None)
shortcut1 = QShortcut(None)
shortcut1.setObjectName('shortcut1')
action1 = QAction('action1', None)
s.registerShortcut(shortcut1, 'A')
s.registerAction(action1, 'C')
self.assertEqual(shortcut1.key().toString(), 'E')
self.assertEqual(action1.shortcut().toString(), 'G')
# same test, using setKeySequence by name
QgsSettings().clear()
s = QgsShortcutsManager(None)
shortcut1 = QShortcut(None)
shortcut1.setObjectName('shortcut1')
action1 = QAction('action1', None)
s.registerShortcut(shortcut1, 'A')
s.registerAction(action1, 'C')
self.assertFalse(s.setKeySequence('invalid_name', 'E'))
self.assertTrue(s.setKeySequence('shortcut1', 'E'))
self.assertTrue(s.setKeySequence('action1', 'G'))
self.assertEqual(shortcut1.key().toString(), 'E')
self.assertEqual(action1.shortcut().toString(), 'G')
s = QgsShortcutsManager(None)
shortcut1 = QShortcut(None)
shortcut1.setObjectName('shortcut1')
action1 = QAction('action1', None)
s.registerShortcut(shortcut1, 'A')
s.registerAction(action1, 'C')
self.assertEqual(shortcut1.key().toString(), 'E')
self.assertEqual(action1.shortcut().toString(), 'G')
def testBySequence(self):
""" test retrieving by sequence """
QgsSettings().clear()
shortcut1 = QShortcut(None)
shortcut1.setObjectName('shortcut1')
shortcut2 = QShortcut(None)
shortcut2.setObjectName('shortcut2')
action1 = QAction('action1', None)
action2 = QAction('action2', None)
s = QgsShortcutsManager(None)
self.assertFalse(s.actionForSequence('E'))
self.assertFalse(s.objectForSequence('F'))
s.registerShortcut(shortcut1, 'E')
s.registerShortcut(shortcut2, 'A')
s.registerAction(action1, 'F')
s.registerAction(action2, 'B')
# use another way of registering sequences
self.assertTrue(s.setKeySequence(shortcut2, 'G'))
self.assertTrue(s.setKeySequence(action2, 'H'))
self.assertEqual(s.objectForSequence('E'), shortcut1)
self.assertEqual(s.objectForSequence('F'), action1)
self.assertEqual(s.objectForSequence('G'), shortcut2)
self.assertEqual(s.objectForSequence('H'), action2)
self.assertFalse(s.objectForSequence('A'))
self.assertFalse(s.objectForSequence('B'))
self.assertEqual(s.shortcutForSequence('E'), shortcut1)
self.assertFalse(s.shortcutForSequence('F'))
self.assertEqual(s.shortcutForSequence('G'), shortcut2)
self.assertFalse(s.shortcutForSequence('H'))
self.assertFalse(s.actionForSequence('E'))
self.assertEqual(s.actionForSequence('F'), action1)
self.assertFalse(s.actionForSequence('G'))
self.assertEqual(s.actionForSequence('H'), action2)
def testByName(self):
"""" test retrieving actions and shortcuts by name """
QgsSettings().clear()
shortcut1 = QShortcut(None)
shortcut1.setObjectName('shortcut1')
shortcut2 = QShortcut(None)
shortcut2.setObjectName('shortcut2')
action1 = QAction('action1', None)
action2 = QAction('action2', None)
s = QgsShortcutsManager(None)
self.assertFalse(s.actionByName('action1'))
self.assertFalse(s.shortcutByName('shortcut1'))
s.registerShortcut(shortcut1)
s.registerShortcut(shortcut2)
s.registerAction(action1)
s.registerAction(action2)
self.assertEqual(s.shortcutByName('shortcut1'), shortcut1)
self.assertFalse(s.shortcutByName('action1'))
self.assertEqual(s.shortcutByName('shortcut2'), shortcut2)
self.assertFalse(s.shortcutByName('action2'))
self.assertFalse(s.actionByName('shortcut1'))
self.assertEqual(s.actionByName('action1'), action1)
self.assertFalse(s.actionByName('shortcut2'))
self.assertEqual(s.actionByName('action2'), action2)
def testTooltip(self):
"""" test action tooltips """
action1 = QAction('action1', None)
action1.setToolTip('my tooltip')
action2 = QAction('action2', None)
action2.setToolTip('my multiline\ntooltip')
action3 = QAction('action3', None)
action3.setToolTip('my tooltip (Ctrl+S)')
s = QgsShortcutsManager(None)
s.registerAction(action1)
s.registerAction(action2)
s.registerAction(action3, 'Ctrl+S')
self.assertEqual(action1.toolTip(), '<b>my tooltip</b>')
self.assertEqual(action2.toolTip(), '<b>my multiline</b><p>tooltip</p>')
self.assertEqual(action3.toolTip(), '<b>my tooltip </b> (Ctrl+S)')
if __name__ == '__main__':
unittest.main()
| gpl-2.0 |
tedsunnyday/SE-Server | server/lib/flask_mongoengine/pagination.py | 3 | 5909 | # -*- coding: utf-8 -*-
import math
from flask import abort
from mongoengine.queryset import QuerySet
__all__ = ("Pagination", "ListFieldPagination")
class Pagination(object):
def __init__(self, iterable, page, per_page):
if page < 1:
abort(404)
self.iterable = iterable
self.page = page
self.per_page = per_page
if isinstance(iterable, QuerySet):
self.total = iterable.count()
else:
self.total = len(iterable)
start_index = (page - 1) * per_page
end_index = page * per_page
self.items = iterable[start_index:end_index]
if isinstance(self.items, QuerySet):
self.items = self.items.select_related()
if not self.items and page != 1:
abort(404)
@property
def pages(self):
"""The total number of pages"""
return int(math.ceil(self.total / float(self.per_page)))
def prev(self, error_out=False):
"""Returns a :class:`Pagination` object for the previous page."""
assert self.iterable is not None, ('an object is required '
'for this method to work')
iterable = self.iterable
if isinstance(iterable, QuerySet):
iterable._skip = None
iterable._limit = None
iterable = iterable.clone()
return self.__class__(iterable, self.page - 1, self.per_page)
@property
def prev_num(self):
"""Number of the previous page."""
return self.page - 1
@property
def has_prev(self):
"""True if a previous page exists"""
return self.page > 1
def next(self, error_out=False):
"""Returns a :class:`Pagination` object for the next page."""
assert self.iterable is not None, ('an object is required '
'for this method to work')
iterable = self.iterable
if isinstance(iterable, QuerySet):
iterable._skip = None
iterable._limit = None
iterable = iterable.clone()
return self.__class__(iterable, self.page + 1, self.per_page)
@property
def has_next(self):
"""True if a next page exists."""
return self.page < self.pages
@property
def next_num(self):
"""Number of the next page"""
return self.page + 1
def iter_pages(self, left_edge=2, left_current=2,
right_current=5, right_edge=2):
"""Iterates over the page numbers in the pagination. The four
parameters control the thresholds how many numbers should be produced
from the sides. Skipped page numbers are represented as `None`.
This is how you could render such a pagination in the templates:
.. sourcecode:: html+jinja
{% macro render_pagination(pagination, endpoint) %}
<div class=pagination>
{%- for page in pagination.iter_pages() %}
{% if page %}
{% if page != pagination.page %}
<a href="{{ url_for(endpoint, page=page) }}">{{ page }}</a>
{% else %}
<strong>{{ page }}</strong>
{% endif %}
{% else %}
<span class=ellipsis>…</span>
{% endif %}
{%- endfor %}
</div>
{% endmacro %}
"""
last = 0
for num in xrange(1, self.pages + 1):
if num <= left_edge or \
(num > self.page - left_current - 1 and
num < self.page + right_current) or \
num > self.pages - right_edge:
if last + 1 != num:
yield None
yield num
last = num
class ListFieldPagination(Pagination):
def __init__(self, queryset, doc_id, field_name, page, per_page,
total=None):
"""Allows an array within a document to be paginated.
Queryset must contain the document which has the array we're
paginating, and doc_id should be it's _id.
Field name is the name of the array we're paginating.
Page and per_page work just like in Pagination.
Total is an argument because it can be computed more efficiently
elsewhere, but we still use array.length as a fallback.
"""
if page < 1:
abort(404)
self.page = page
self.per_page = per_page
self.queryset = queryset
self.doc_id = doc_id
self.field_name = field_name
start_index = (page - 1) * per_page
field_attrs = {field_name: {"$slice": [start_index, per_page]}}
# Clone for mongoengine 0.7
qs = queryset.clone().filter(pk=doc_id)
self.items = getattr(qs.clone().fields(**field_attrs).first(), field_name)
self.total = total or len(getattr(qs.clone().fields(**{field_name: 1}).first(),
field_name))
if not self.items and page != 1:
abort(404)
def prev(self, error_out=False):
"""Returns a :class:`Pagination` object for the previous page."""
assert self.items is not None, ('a query object is required '
'for this method to work')
return self.__class__(self.queryset, self.doc_id, self.field_name,
self.page - 1, self.per_page, self.total)
def next(self, error_out=False):
"""Returns a :class:`Pagination` object for the next page."""
assert self.items is not None, ('a query object is required '
'for this method to work')
return self.__class__(self.queryset, self.doc_id, self.field_name,
self.page + 1, self.per_page, self.total)
| apache-2.0 |
itskewpie/tempest | tempest/api/compute/test_quotas.py | 3 | 2639 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest.api.compute import base
from tempest.test import attr
class QuotasTestJSON(base.BaseComputeTest):
_interface = 'json'
@classmethod
def setUpClass(cls):
super(QuotasTestJSON, cls).setUpClass()
cls.client = cls.quotas_client
cls.admin_client = cls._get_identity_admin_client()
resp, tenants = cls.admin_client.list_tenants()
cls.tenant_id = [tnt['id'] for tnt in tenants if tnt['name'] ==
cls.client.tenant_name][0]
cls.default_quota_set = set(('injected_file_content_bytes',
'metadata_items', 'injected_files',
'ram', 'floating_ips',
'fixed_ips', 'key_pairs',
'injected_file_path_bytes',
'instances', 'security_group_rules',
'cores', 'security_groups'))
@attr(type='smoke')
def test_get_quotas(self):
# User can get the quota set for it's tenant
expected_quota_set = self.default_quota_set | set(['id'])
resp, quota_set = self.client.get_quota_set(self.tenant_id)
self.assertEqual(200, resp.status)
self.assertEqual(sorted(expected_quota_set),
sorted(quota_set.keys()))
self.assertEqual(quota_set['id'], self.tenant_id)
@attr(type='smoke')
def test_get_default_quotas(self):
# User can get the default quota set for it's tenant
expected_quota_set = self.default_quota_set | set(['id'])
resp, quota_set = self.client.get_default_quota_set(self.tenant_id)
self.assertEqual(200, resp.status)
self.assertEqual(sorted(expected_quota_set),
sorted(quota_set.keys()))
self.assertEqual(quota_set['id'], self.tenant_id)
class QuotasTestXML(QuotasTestJSON):
_interface = 'xml'
| apache-2.0 |
ryfeus/lambda-packs | pytorch/source/caffe2/python/regularizer_context.py | 1 | 1179 | # @package regularizer_context
# Module caffe2.python.regularizer_context
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from caffe2.python import context
from caffe2.python.modifier_context import (
ModifierContext, UseModifierBase)
@context.define_context(allow_default=True)
class RegularizerContext(ModifierContext):
"""
provide context to allow param_info to have different regularizers
"""
def has_regularizer(self, name):
return self._has_modifier(name)
def get_regularizer(self, name):
assert self.has_regularizer(name), (
"{} regularizer is not provided!".format(name))
return self._get_modifier(name)
class UseRegularizer(UseModifierBase):
'''
context class to allow setting the current context.
Example useage with layer:
regularizers = {'reg1': reg1, 'reg2': reg2}
with UseRegularizer(regularizers):
reg = RegularizerContext.current().get_regularizer('reg1')
layer(reg=reg)
'''
def _context_class(self):
return RegularizerContext
| mit |
romonzaman/newfies-dialer | newfies/appointment/admin_filters.py | 4 | 1594 | #
# Newfies-Dialer License
# http://www.newfies-dialer.org
#
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.
#
# Copyright (C) 2011-2015 Star2Billing S.L.
#
# The primary maintainer of this project is
# Arezqui Belaid <[email protected]>
#
from django.contrib.admin import SimpleListFilter
from django.utils.translation import ugettext as _
from appointment.function_def import manager_list_of_calendar_user
from user_profile.models import CalendarUserProfile
class ManagerFilter(SimpleListFilter):
title = _('manager')
parameter_name = 'manager'
def lookups(self, request, model_admin):
"""
Returns a list of tuples. The first element in each
tuple is the coded value for the option that will
appear in the URL query. The second element is the
human-readable name for the option that will appear
in the right sidebar.
"""
return manager_list_of_calendar_user()
def queryset(self, request, queryset):
"""
Returns the filtered queryset based on the value
provided in the query string and retrievable via
`self.value()`.
"""
if self.value() is not None:
calendar_user_id_list = CalendarUserProfile.objects\
.values_list('user_id', flat=True).filter(manager_id=self.value())
return queryset.filter(id__in=calendar_user_id_list)
else:
return queryset
| mpl-2.0 |
denfromufa/PTVS | Python/Product/Pyvot/Pyvot/xl/cache.py | 18 | 15811 | # Copyright (c) Microsoft Corporation.
#
# This source code is subject to terms and conditions of the Apache License, Version 2.0. A
# copy of the license can be found in the LICENSE.txt file at the root of this distribution. If
# you cannot locate the Apache License, Version 2.0, please send an email to
# [email protected]. By using this source code in any fashion, you are agreeing to be bound
# by the terms of the Apache License, Version 2.0.
#
# You must not remove this notice, or any other, from this software.
"""Cache layer
Pyvot must assume that the contents of an Excel workbook can change at any time, due to interactive manipulation
or other external influences. Living out of process, this can be a performance catastrophe. We use a cache when
possible, on the assumption that Excel doesn't change during any single call into to the Pyvot user API.
This module provides the :func:`@cache_result` decorator which adds caching to a function or property,
an :func:`@enable_caching` decorator for enabling the (off by default) cache for the duration of a function call, and the
CacheManager, for non-decorator cache control including invalidation."""
import contextlib
import functools
def enable_caching(f):
"""Decorator which enables caching within the wrapped function. Caching is enabled
until the function exits; i.e. functions called directly or indirectly will also have
caching enabled."""
import functools
@functools.wraps(f)
def _wrapped(*args, **kwargs):
with CacheManager.caching_enabled():
return f(*args, **kwargs)
return _wrapped
class _ResultCachingDescriptor(object):
"""Decorator class for caching the results of method calls / read-only properties. The cache is controlled
by the state of the singleton CacheManager. While caching is enabled, the wrapped function / property
is called only once per unique set of arguments, and the return value is stored to satisfy future
calls with those same arguments. When caching is disabled, all cached values are cleared, and
the wrapped function is always called.
This decorator may only be applied to a method or property within a class. A separate cache is maintained *per instance*;
although argument sets are typically compared for value equality, two equal instances still have separate caches.
Cache statistics are available via the 'stats' attribute (unless wrapping a property):
instance_a.cached_method -> CacheSite instance
instance_a.cached_method.stats.hits -> # of cache hits (similarly for misses)
instance_b.cached_method -> Different CacheSite instance
This information is summarized by CacheManager.cache_info(), in which the per-instance stats are aggregated by class"""
def __init__(self, f, as_property=False):
assert callable(f) or isinstance(f, property)
self._wrapped = f
self._wrapping_property = isinstance(f, property)
# SomeClass.cached_thing returns a _ResultCachingDescriptor instance (see __get__)
# That means that SomeClass.cached_thing.__doc__ should be something useful.
# This call does _not_ update _ResultCachingDescriptor.__doc__
self._update_wrapper(self)
def _update_wrapper(self, o):
"""Updates the given object with __name__, __doc__, etc. from the wrapped thing.
Just like functools.update_wrapper, but handles the case where we are wrapping a property"""
wrapped_func = self._wrapped.fget if self._wrapping_property else self._wrapped
functools.update_wrapper(o, wrapped_func)
def __get__(self, instance, owning_class=None):
# This is called as part of the "descriptor protocol," and handles the "method in a class" case
# http://docs.python.org/reference/datamodel.html#implementing-descriptors
# We'll allow SomeClass.cached_method to return this object, though
# it currently doesn't contain anything useful. This is convenient for introspection
# (why is cached_method behaving strangely? oh, it's a descriptor of some kind)
# and is consistent with the behavior of the @property descriptor
if instance is None: return self
# We store instance-level cache sites on the instances themselves, indexed by the owning _ResultCachingDescriptor
# It produces the following reference structure:
# CacheManager - - (weakref) - -> CacheSite <==> instance ==> class ==> this object
# i.e., the CacheSite and its stored data will be garbage collected along with the instance.
#
# This is better than storing a map of instances -> sites on this object, because it would would prevent such reclamation
# CacheManager - - (weakref) - -> CacheSite <==> instance ==> class ==> this object\
# ^===========================================
# i.e., instances and cache sites would be kept alive by the owning class!
try:
instance_sites = instance.__cache_sites
except AttributeError:
instance_sites = instance.__cache_sites = {}
if not self in instance_sites:
if self._wrapping_property:
# self._wrapped is a property instance, i.e. we are wrapping another descriptor. Here we construct
# a CacheSite-compatible callable that binds a particular instance for the property
def _wrapped_with_instance(): return self._wrapped.__get__(instance, owning_class)
# str(property object) doesn't give the name of what we are wrapping - however, the getter itself is available
site_name = "%s (instance of %s at %x)" % (repr(self._wrapped.fget), str(owning_class), id(instance))
else:
# self._wrapped is a function, not a bound method. Here, we bind 'self'
# The resulting CacheSite calls it as an ordinary function.
def _wrapped_with_instance(*args, **kwargs): return self._wrapped(instance, *args, **kwargs)
site_name = "%s (instance of %s at %x)" % (repr(self._wrapped), str(owning_class), id(instance))
# We use _wrapped rather than _wrapped_with_instance for the key, since the latter is unique per instance
# _wrapped.fget is used if _wrapped is a property, since its __repr__ isn't informative
wrapped_key = self._wrapped if not self._wrapping_property else self._wrapped.fget
# CacheSite uses reports the __name__, __doc__, etc. of the function we give it, so update them appropriately
# This is important for instance.cached_thing.__name__ to work right.
self._update_wrapper(_wrapped_with_instance)
instance_sites[self] = CacheManager.create_cache_site(_wrapped_with_instance, site_name,
site_group_key=(wrapped_key, type(instance)))
if self._wrapping_property:
return instance_sites[self]()
else:
return instance_sites[self]
def __call__(self, *args, **kwargs):
raise TypeError("_ResultCachingDescriptor is not callable. Only methods within a class (not normal functions) may be cached")
cache_result = _ResultCachingDescriptor
class CacheSite(object):
"""Represents a single cache of arguments -> results.
Note that there can be multiple cache sites per @cache_result-wrapped method;
each instance with the caching method uses a separate cache site"""
def __init__(self, source, site_name=None):
assert callable(source)
if site_name is None: site_name = repr(self)
self.source = source
self.stats = CacheSiteStats()
self.site_name = site_name
self._cached = {}
# Copy __doc__, etc. to the instance.
# __doc__, etc. on the class itself are preserved
functools.update_wrapper(self, source)
def clear(self):
self._cached.clear()
def _key(self, *args, **kwargs):
# kwargs (a dict) is not hashable, but its item tuples may be
# Tuple conversion needed because lists are not hashable (since mutable)
return (args, tuple(sorted(kwargs.items())))
def get(self, *args, **kwargs):
if not CacheManager.is_caching_enabled:
self.stats.uncached_misses += 1
return self.source(*args, **kwargs)
k = self._key(*args, **kwargs)
if k in self._cached:
self.stats.hits += 1
return self._cached[k]
else:
self.stats.misses += 1
v = self.source(*args, **kwargs)
self._cached[k] = v
return v
__call__ = get
class CacheSiteStats(object):
"""Container for :attr:`hits`, :attr:`misses`, and :attr:`uncached_misses`
(misses that occurred with cachind disabled). Accessed as :attr:`CacheSite.stats`"""
def __init__(self):
self.hits = self.misses = self.uncached_misses = 0
class CacheManager_class(object):
"""Singleton manager for the program's CacheSites (created through use of @:func:`cache_result`)
Cache state is dynamically scoped on the stack by use of a context manager::
with CacheManager.caching_enabled():
do_stuff()
Within that context, all @cache_result decorators are enabled and may store / return cached values
Cached values are deleted when the context is exited.
The context may be safely nested."""
def __init__(self):
self._cache_level = 0
self._site_weakrefs = set()
self._site_stats = {}
self._iterating_site_weakrefs = False
@contextlib.contextmanager
def caching_enabled(self):
"""Returns an object implementing the context-manager protocol. Within the context,
caching is enabled (this is a context-manager version of the `@enable_caching` decorator).
Cache activation may be nested; there is no harm in enabling caching before calling a function
which does the same::
with xl.CacheManager.caching_enabled():
with xl.CacheManager.caching_enabled():
assert xl.CacheManager.is_caching_enabled()
assert xl.CacheManager.is_caching_enabled()
assert not xl.CacheManager.is_caching_enabled()"""
self._increment_cache_level()
try:
yield
finally:
self._decrement_cache_level()
@contextlib.contextmanager
def caching_disabled(self):
"""Returns an object implementing the context-manager protocol. Within the context, caching is
disabled. When exiting the context, the cache-enable state (incl. nesting level) is restored to its
previous value. Entering the context immediately invalidates all cache sites
::
with xl.CacheManager.caching_enabled():
with xl.CacheManager.caching_disabled():
assert not xl.CacheManager.is_caching_enabled()
assert xl.CacheManager.is_caching_enabled()"""
old_level = self._cache_level
if old_level > 0:
self._cache_level = 0
self.invalidate_all_caches()
try:
yield
finally:
self._cache_level = old_level
@property
def is_caching_enabled(self):
return self._cache_level > 0
def _increment_cache_level(self):
self._cache_level += 1
def _decrement_cache_level(self):
assert self._cache_level > 0
self._cache_level -= 1
if self._cache_level == 0: self.invalidate_all_caches()
def create_cache_site(self, source, site_name, site_group_key):
"""Creates a CacheSite instanced, managed by this CacheManager.
The manager keeps a weak reference to the site ; the lifetime of the
cache is controlled by the caller
The site_group_key specifies the key on which to aggregate hit / miss stats in cache_info()
Note that a reference to site_group_key will continue to be held by the CacheManager, so take
care to select keys that are small in size, or wouldn't be garbage collected anyway (i.e. a module-level class)"""
import weakref
cs = CacheSite(source=source, site_name=site_name)
# Both this CacheManager and the cache site will reference the stats object;
# however, our referencing the stats object will not keep the CacheSite alive
# This allows us to calculate aggregate cache stats in cache_info() without keeping
# cache sites and their owning objects alive.
stats = cs.stats
cs_weak = weakref.ref(cs, self._on_site_unreferenced)
self._site_weakrefs.add(cs_weak)
self._site_stats.setdefault(site_group_key, []).append(stats)
return cs
def cache_info(self):
"""Returns a tuple (site group key, group size, hits, misses, uncached misses) per cache site group.
(uncached misses refers to those misses that occurred without caching enabled (see CacheManager.is_caching_enabled)
A cache site group is an aggregation of cache sites that are considered meaningfully related,
with regards to performance counters.
For example, though a method on a class has a cache site per _instance_, all instance sites
of a method are joined to the same site group."""
for site_group_key, group_stats in self._site_stats.iteritems():
yield (site_group_key, len(group_stats),
sum([stat.hits for stat in group_stats]),
sum([stat.misses for stat in group_stats]),
sum([stat.uncached_misses for stat in group_stats]))
def invalidate_all_caches(self):
"""Invalidates cache sites program-wide. This method should be called whenever the Excel COM API is used to
modify a workbook (for example, it is called by :meth:`xl.range.Range.set`).
Alternatively, one can use :meth:`caching_disabled`, since it invalidates caches on context entry."""
for site in self._iter_site_refs():
site.clear()
def _iter_site_refs(self):
# Iterating on _site_weakrefs is tricky, because the _on_site_unreferenced
# callback modifies it when a site is ready to be GC'd
# Since iterating site refs (ex. to clear caches) may remove strong
# site references, we must prevent modification during iteration (flag shared with
# the callback), and clean the set (to prevent accumulation of dead weakrefs)
old_iter_state = self._iterating_site_weakrefs
self._iterating_site_weakrefs = True
try:
for site_weakref in self._site_weakrefs:
site = site_weakref()
if not site is None: yield site
to_discard = set()
for site_weakref in self._site_weakrefs:
if site_weakref() is None: to_discard.add(site_weakref)
self._site_weakrefs -= to_discard
finally:
self._iterating_site_weakrefs = False
def _on_site_unreferenced(self, site_weakref):
if not self._iterating_site_weakrefs:
self._site_weakrefs.discard( site_weakref )
CacheManager = CacheManager_class()
"""Singleton CacheManager used by all of Pyvot""" | apache-2.0 |
Parisson/cartridge | cartridge/project_template/project_name/settings.py | 4 | 14096 |
from __future__ import absolute_import, unicode_literals
import os
from django.utils.translation import ugettext_lazy as _
######################
# CARTRIDGE SETTINGS #
######################
# The following settings are already defined in cartridge.shop.defaults
# with default values, but are common enough to be put here, commented
# out, for conveniently overriding. Please consult the settings
# documentation for a full list of settings Cartridge implements:
# http://cartridge.jupo.org/configuration.html#default-settings
# Sequence of available credit card types for payment.
# SHOP_CARD_TYPES = ("Mastercard", "Visa", "Diners", "Amex")
# Setting to turn on featured images for shop categories. Defaults to False.
# SHOP_CATEGORY_USE_FEATURED_IMAGE = True
# Set an alternative OrderForm class for the checkout process.
# SHOP_CHECKOUT_FORM_CLASS = 'cartridge.shop.forms.OrderForm'
# If True, the checkout process is split into separate
# billing/shipping and payment steps.
# SHOP_CHECKOUT_STEPS_SPLIT = True
# If True, the checkout process has a final confirmation step before
# completion.
# SHOP_CHECKOUT_STEPS_CONFIRMATION = True
# Controls the formatting of monetary values accord to the locale
# module in the python standard library. If an empty string is
# used, will fall back to the system's locale.
# SHOP_CURRENCY_LOCALE = ""
# Dotted package path and name of the function that
# is called on submit of the billing/shipping checkout step. This
# is where shipping calculation can be performed and set using the
# function ``cartridge.shop.utils.set_shipping``.
# SHOP_HANDLER_BILLING_SHIPPING = \
# "cartridge.shop.checkout.default_billship_handler"
# Dotted package path and name of the function that
# is called once an order is successful and all of the order
# object's data has been created. This is where any custom order
# processing should be implemented.
# SHOP_HANDLER_ORDER = "cartridge.shop.checkout.default_order_handler"
# Dotted package path and name of the function that
# is called on submit of the payment checkout step. This is where
# integration with a payment gateway should be implemented.
# SHOP_HANDLER_PAYMENT = "cartridge.shop.checkout.default_payment_handler"
# Sequence of value/name pairs for order statuses.
# SHOP_ORDER_STATUS_CHOICES = (
# (1, "Unprocessed"),
# (2, "Processed"),
# )
# Sequence of value/name pairs for types of product options,
# eg Size, Colour. NOTE: Increasing the number of these will
# require database migrations!
# SHOP_OPTION_TYPE_CHOICES = (
# (1, "Size"),
# (2, "Colour"),
# )
# Sequence of indexes from the SHOP_OPTION_TYPE_CHOICES setting that
# control how the options should be ordered in the admin,
# eg for "Colour" then "Size" given the above:
# SHOP_OPTION_ADMIN_ORDER = (2, 1)
######################
# MEZZANINE SETTINGS #
######################
# The following settings are already defined with default values in
# the ``defaults.py`` module within each of Mezzanine's apps, but are
# common enough to be put here, commented out, for conveniently
# overriding. Please consult the settings documentation for a full list
# of settings Mezzanine implements:
# http://mezzanine.jupo.org/docs/configuration.html#default-settings
# Controls the ordering and grouping of the admin menu.
#
# ADMIN_MENU_ORDER = (
# ("Content", ("pages.Page", "blog.BlogPost",
# "generic.ThreadedComment", (_("Media Library"), "fb_browse"),)),
# (_("Shop"), ("shop.Product", "shop.ProductOption", "shop.DiscountCode",
# "shop.Sale", "shop.Order")),
# ("Site", ("sites.Site", "redirects.Redirect", "conf.Setting")),
# ("Users", ("auth.User", "auth.Group",)),
# )
# A three item sequence, each containing a sequence of template tags
# used to render the admin dashboard.
#
# DASHBOARD_TAGS = (
# ("blog_tags.quick_blog", "mezzanine_tags.app_list"),
# ("comment_tags.recent_comments",),
# ("mezzanine_tags.recent_actions",),
# )
# A sequence of templates used by the ``page_menu`` template tag. Each
# item in the sequence is a three item sequence, containing a unique ID
# for the template, a label for the template, and the template path.
# These templates are then available for selection when editing which
# menus a page should appear in. Note that if a menu template is used
# that doesn't appear in this setting, all pages will appear in it.
# PAGE_MENU_TEMPLATES = (
# (1, _("Top navigation bar"), "pages/menus/dropdown.html"),
# (2, _("Left-hand tree"), "pages/menus/tree.html"),
# (3, _("Footer"), "pages/menus/footer.html"),
# )
# A sequence of fields that will be injected into Mezzanine's (or any
# library's) models. Each item in the sequence is a four item sequence.
# The first two items are the dotted path to the model and its field
# name to be added, and the dotted path to the field class to use for
# the field. The third and fourth items are a sequence of positional
# args and a dictionary of keyword args, to use when creating the
# field instance. When specifying the field class, the path
# ``django.models.db.`` can be omitted for regular Django model fields.
#
# EXTRA_MODEL_FIELDS = (
# (
# # Dotted path to field.
# "mezzanine.blog.models.BlogPost.image",
# # Dotted path to field class.
# "somelib.fields.ImageField",
# # Positional args for field class.
# (_("Image"),),
# # Keyword args for field class.
# {"blank": True, "upload_to": "blog"},
# ),
# # Example of adding a field to *all* of Mezzanine's content types:
# (
# "mezzanine.pages.models.Page.another_field",
# "IntegerField", # 'django.db.models.' is implied if path is omitted.
# (_("Another name"),),
# {"blank": True, "default": 1},
# ),
# )
# Setting to turn on featured images for blog posts. Defaults to False.
#
# BLOG_USE_FEATURED_IMAGE = True
# If True, the django-modeltranslation will be added to the
# INSTALLED_APPS setting.
USE_MODELTRANSLATION = False
########################
# MAIN DJANGO SETTINGS #
########################
# Hosts/domain names that are valid for this site; required if DEBUG is False
# See https://docs.djangoproject.com/en/dev/ref/settings/#allowed-hosts
ALLOWED_HOSTS = []
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# On Unix systems, a value of None will cause Django to use the same
# timezone as the operating system.
# If running in a Windows environment this must be set to the same as your
# system time zone.
TIME_ZONE = 'UTC'
# If you set this to True, Django will use timezone-aware datetimes.
USE_TZ = True
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = "en"
# Supported languages
LANGUAGES = (
('en', _('English')),
)
# A boolean that turns on/off debug mode. When set to ``True``, stack traces
# are displayed for error pages. Should always be set to ``False`` in
# production. Best set to ``True`` in local_settings.py
DEBUG = False
# Whether a user's session cookie expires when the Web browser is closed.
SESSION_EXPIRE_AT_BROWSER_CLOSE = True
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = False
AUTHENTICATION_BACKENDS = ("mezzanine.core.auth_backends.MezzanineBackend",)
# The numeric mode to set newly-uploaded files to. The value should be
# a mode you'd pass directly to os.chmod.
FILE_UPLOAD_PERMISSIONS = 0o644
#############
# DATABASES #
#############
DATABASES = {
"default": {
# Add "postgresql_psycopg2", "mysql", "sqlite3" or "oracle".
"ENGINE": "django.db.backends.",
# DB name or path to database file if using sqlite3.
"NAME": "",
# Not used with sqlite3.
"USER": "",
# Not used with sqlite3.
"PASSWORD": "",
# Set to empty string for localhost. Not used with sqlite3.
"HOST": "",
# Set to empty string for default. Not used with sqlite3.
"PORT": "",
}
}
#########
# PATHS #
#########
# Full filesystem path to the project.
PROJECT_APP_PATH = os.path.dirname(os.path.abspath(__file__))
PROJECT_APP = os.path.basename(PROJECT_APP_PATH)
PROJECT_ROOT = BASE_DIR = os.path.dirname(PROJECT_APP_PATH)
# Every cache key will get prefixed with this value - here we set it to
# the name of the directory the project is in to try and use something
# project specific.
CACHE_MIDDLEWARE_KEY_PREFIX = PROJECT_APP
# URL prefix for static files.
# Example: "http://media.lawrence.com/static/"
STATIC_URL = "/static/"
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/home/media/media.lawrence.com/static/"
STATIC_ROOT = os.path.join(PROJECT_ROOT, STATIC_URL.strip("/"))
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://media.lawrence.com/media/", "http://example.com/media/"
MEDIA_URL = STATIC_URL + "media/"
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/home/media/media.lawrence.com/media/"
MEDIA_ROOT = os.path.join(PROJECT_ROOT, *MEDIA_URL.strip("/").split("/"))
# Package/module name to import the root urlpatterns from for the project.
ROOT_URLCONF = "%s.urls" % PROJECT_APP
# Put strings here, like "/home/html/django_templates"
# or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
TEMPLATE_DIRS = (os.path.join(PROJECT_ROOT, "templates"),)
################
# APPLICATIONS #
################
INSTALLED_APPS = (
"django.contrib.admin",
"django.contrib.auth",
"django.contrib.contenttypes",
"django.contrib.redirects",
"django.contrib.sessions",
"django.contrib.sites",
"django.contrib.sitemaps",
"django.contrib.staticfiles",
"mezzanine.boot",
"mezzanine.conf",
"mezzanine.core",
"mezzanine.generic",
"mezzanine.pages",
"cartridge.shop",
"mezzanine.blog",
"mezzanine.forms",
"mezzanine.galleries",
"mezzanine.twitter",
# "mezzanine.accounts",
# "mezzanine.mobile",
)
# List of processors used by RequestContext to populate the context.
# Each one should be a callable that takes the request object as its
# only parameter and returns a dictionary to add to the context.
TEMPLATE_CONTEXT_PROCESSORS = (
"django.contrib.auth.context_processors.auth",
"django.contrib.messages.context_processors.messages",
"django.core.context_processors.debug",
"django.core.context_processors.i18n",
"django.core.context_processors.static",
"django.core.context_processors.media",
"django.core.context_processors.request",
"django.core.context_processors.tz",
"mezzanine.conf.context_processors.settings",
"mezzanine.pages.context_processors.page",
)
# List of middleware classes to use. Order is important; in the request phase,
# these middleware classes will be applied in the order given, and in the
# response phase the middleware will be applied in reverse order.
MIDDLEWARE_CLASSES = (
"mezzanine.core.middleware.UpdateCacheMiddleware",
'django.contrib.sessions.middleware.SessionMiddleware',
# Uncomment if using internationalisation or localisation
# 'django.middleware.locale.LocaleMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
"cartridge.shop.middleware.ShopMiddleware",
"mezzanine.core.request.CurrentRequestMiddleware",
"mezzanine.core.middleware.RedirectFallbackMiddleware",
"mezzanine.core.middleware.TemplateForDeviceMiddleware",
"mezzanine.core.middleware.TemplateForHostMiddleware",
"mezzanine.core.middleware.AdminLoginInterfaceSelectorMiddleware",
"mezzanine.core.middleware.SitePermissionMiddleware",
# Uncomment the following if using any of the SSL settings:
# "mezzanine.core.middleware.SSLRedirectMiddleware",
"mezzanine.pages.middleware.PageMiddleware",
"mezzanine.core.middleware.FetchFromCacheMiddleware",
)
# Store these package names here as they may change in the future since
# at the moment we are using custom forks of them.
PACKAGE_NAME_FILEBROWSER = "filebrowser_safe"
PACKAGE_NAME_GRAPPELLI = "grappelli_safe"
#########################
# OPTIONAL APPLICATIONS #
#########################
# These will be added to ``INSTALLED_APPS``, only if available.
OPTIONAL_APPS = (
"debug_toolbar",
"django_extensions",
"compressor",
PACKAGE_NAME_FILEBROWSER,
PACKAGE_NAME_GRAPPELLI,
)
##################
# LOCAL SETTINGS #
##################
# Allow any settings to be defined in local_settings.py which should be
# ignored in your version control system allowing for settings to be
# defined per machine.
try:
from .local_settings import *
except ImportError as e:
if "local_settings" not in str(e):
raise e
####################
# DYNAMIC SETTINGS #
####################
# set_dynamic_settings() will rewrite globals based on what has been
# defined so far, in order to provide some better defaults where
# applicable. We also allow this settings module to be imported
# without Mezzanine installed, as the case may be when using the
# fabfile, where setting the dynamic settings below isn't strictly
# required.
try:
from mezzanine.utils.conf import set_dynamic_settings
except ImportError:
pass
else:
set_dynamic_settings(globals())
| bsd-2-clause |
ekevoo/hfbr | example_settings.py | 1 | 1360 | # -*- coding: utf-8 -*-
from datetime import timedelta
from sys import stdout
TARGETS = [
{
'target_path': 'db.sqlite',
'backup_dir': '/var/backup/sqlite-db',
'retention_plan': 'default',
'pin': ('20150717-1155.sq3.bz2',),
'prune': False,
},
]
PLANS = {
'default': (
# Snapshots are pinned in the order these rules are declared.
('year', None), # permanent yearly snapshots
('month', 9), # 9 monthly snapshots
(timedelta(weeks=1), 6), # 6 weekly snapshots
(timedelta(days=1), 5), # 5 daily snapshots
(timedelta(hours=1), 18), # 18 hourly snapshots
(None, 10), # 10 latest snapshots
),
}
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'formatters': {'fmt': {'datefmt': '%Y-%m-%d %H:%M:%S',
'format': '%(asctime)s %(levelname)-8s %(name)-15s %(message)s'}},
'handlers': {
'console': {'class': 'logging.StreamHandler', 'formatter': 'fmt', 'stream': stdout},
'file': {'class': 'logging.handlers.RotatingFileHandler',
'formatter': 'fmt',
'filename': '/tmp/hfbrw.log',
'maxBytes': 4 * 1024 * 1024, # 4 MB
'backupCount': 5},
},
'loggers': {'hfbrw': {'handlers': ['console'], 'level': 'INFO'}},
}
| apache-2.0 |
toastedcornflakes/scikit-learn | doc/tutorial/text_analytics/solutions/exercise_02_sentiment.py | 9 | 3127 | """Build a sentiment analysis / polarity model
Sentiment analysis can be casted as a binary text classification problem,
that is fitting a linear classifier on features extracted from the text
of the user messages so as to guess wether the opinion of the author is
positive or negative.
In this examples we will use a movie review dataset.
"""
# Author: Olivier Grisel <[email protected]>
# License: Simplified BSD
import sys
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.svm import LinearSVC
from sklearn.pipeline import Pipeline
from sklearn.model_selection import GridSearchCV
from sklearn.datasets import load_files
from sklearn.model_selection import train_test_split
from sklearn import metrics
if __name__ == "__main__":
# NOTE: we put the following in a 'if __name__ == "__main__"' protected
# block to be able to use a multi-core grid search that also works under
# Windows, see: http://docs.python.org/library/multiprocessing.html#windows
# The multiprocessing module is used as the backend of joblib.Parallel
# that is used when n_jobs != 1 in GridSearchCV
# the training data folder must be passed as first argument
movie_reviews_data_folder = sys.argv[1]
dataset = load_files(movie_reviews_data_folder, shuffle=False)
print("n_samples: %d" % len(dataset.data))
# split the dataset in training and test set:
docs_train, docs_test, y_train, y_test = train_test_split(
dataset.data, dataset.target, test_size=0.25, random_state=None)
# TASK: Build a vectorizer / classifier pipeline that filters out tokens
# that are too rare or too frequent
pipeline = Pipeline([
('vect', TfidfVectorizer(min_df=3, max_df=0.95)),
('clf', LinearSVC(C=1000)),
])
# TASK: Build a grid search to find out whether unigrams or bigrams are
# more useful.
# Fit the pipeline on the training set using grid search for the parameters
parameters = {
'vect__ngram_range': [(1, 1), (1, 2)],
}
grid_search = GridSearchCV(pipeline, parameters, n_jobs=-1)
grid_search.fit(docs_train, y_train)
# TASK: print the mean and std for each candidate along with the parameter
# settings for all the candidates explored by grid search.
n_candidates = len(grid_search.results_['params'])
for i in range(n_candidates):
print(i, 'params - %s; mean - %0.2f; std - %0.2f'
% (grid_search.results_['params'][i],
grid_search.results_['test_mean_score'][i],
grid_search.results_['test_std_score'][i]))
# TASK: Predict the outcome on the testing set and store it in a variable
# named y_predicted
y_predicted = grid_search.predict(docs_test)
# Print the classification report
print(metrics.classification_report(y_test, y_predicted,
target_names=dataset.target_names))
# Print and plot the confusion matrix
cm = metrics.confusion_matrix(y_test, y_predicted)
print(cm)
# import matplotlib.pyplot as plt
# plt.matshow(cm)
# plt.show()
| bsd-3-clause |
QGuLL/samba | selftest/selftest.py | 20 | 16381 | #!/usr/bin/python -u
# Bootstrap Samba and run a number of tests against it.
# Copyright (C) 2005-2012 Jelmer Vernooij <[email protected]>
# Copyright (C) 2007-2009 Stefan Metzmacher <[email protected]>
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import atexit
from cStringIO import StringIO
import os
import sys
import signal
import subprocess
from samba import subunit
import traceback
import warnings
import optparse
sys.path.insert(0, os.path.dirname(os.path.dirname(__file__)))
from selftest import (
socket_wrapper,
subunithelper,
testlist,
)
from selftest.client import write_clientconf
from selftest.run import (
expand_command_list,
expand_command_run,
exported_envvars_str,
now,
run_testsuite_command,
)
from selftest.target import (
EnvironmentManager,
NoneTarget,
UnsupportedEnvironment,
)
includes = ()
excludes = ()
def read_excludes(fn):
excludes.extend(testlist.read_test_regexes(fn))
def read_includes(fn):
includes.extend(testlist.read_test_regexes(fn))
parser = optparse.OptionParser("TEST-REGEXES")
parser.add_option("--target", type="choice", choices=["samba", "samba3", "none"], default="samba", help="Samba version to target")
parser.add_option("--quick", help="run quick overall test", action="store_true", default=False)
parser.add_option("--list", help="list available tests", action="store_true", default=False)
parser.add_option("--socket-wrapper", help="enable socket wrapper", action="store_true", default=False)
parser.add_option("--socket-wrapper-pcap", help="save traffic to pcap directories", type="str")
parser.add_option("--socket-wrapper-keep-pcap", help="keep all pcap files, not just those for tests that failed", action="store_true", default=False)
parser.add_option("--one", help="abort when the first test fails", action="store_true", default=False)
parser.add_option("--exclude", action="callback", help="Add file to exclude files", callback=read_excludes)
parser.add_option("--include", action="callback", help="Add file to include files", callback=read_includes)
parser.add_option("--testenv", help="run a shell in the requested test environment", action="store_true", default=False)
parser.add_option("--resetup-environment", help="Re-setup environment", action="store_true", default=False)
parser.add_option("--load-list", help="Load list of tests to load from a file", type=str)
parser.add_option("--prefix", help="prefix to run tests in", type=str, default="./st")
parser.add_option("--srcdir", type=str, default=".", help="source directory")
parser.add_option("--bindir", type=str, default="./bin", help="binaries directory")
parser.add_option("--testlist", type=str, action="append", help="file to read available tests from")
parser.add_option("--ldap", help="back samba onto specified ldap server", choices=["openldap", "fedora-ds"], type="choice")
opts, args = parser.parse_args()
subunit_ops = subunithelper.SubunitOps(sys.stdout)
def handle_signal(sig, frame):
sys.stderr.write("Exiting early because of signal %s.\n" % sig)
sys.exit(1)
for sig in (signal.SIGINT, signal.SIGQUIT, signal.SIGTERM, signal.SIGPIPE):
signal.signal(sig, handle_signal)
def skip(name):
return testlist.find_in_list(excludes, name)
def setup_pcap(name):
if (not opts.socket_wrapper_pcap or
not os.environ.get("SOCKET_WRAPPER_PCAP_DIR")):
return
fname = "".join([x for x in name if x.isalnum() or x == '-'])
pcap_file = os.path.join(
os.environ["SOCKET_WRAPPER_PCAP_DIR"], "%s.pcap" % fname)
socket_wrapper.setup_pcap(pcap_file)
return pcap_file
def cleanup_pcap(pcap_file, exit_code):
if not opts.socket_wrapper_pcap:
return
if opts.socket_wrapper_keep_pcap:
return
if exitcode == 0:
return
if pcap_file is None:
return
os.unlink(pcap_file)
def run_testsuite(name, cmd, subunit_ops, env=None):
"""Run a single testsuite.
:param env: Environment to run in
:param name: Name of the testsuite
:param cmd: Name of the (fully expanded) command to run
:return: exitcode of the command
"""
pcap_file = setup_pcap(name)
exitcode = run_testsuite_command(name, cmd, subunit_ops, env)
if exitcode is None:
sys.exit(1)
cleanup_pcap(pcap_file, exitcode)
if not opts.socket_wrapper_keep_pcap and pcap_file is not None:
sys.stdout.write("PCAP FILE: %s\n" % pcap_file)
if exitcode != 0 and opts.one:
sys.exit(1)
return exitcode
if opts.list and opts.testenv:
sys.stderr.write("--list and --testenv are mutually exclusive\n")
sys.exit(1)
tests = args
# quick hack to disable rpc validation when using valgrind - it is way too slow
if not os.environ.get("VALGRIND"):
os.environ["VALIDATE"] = "validate"
os.environ["MALLOC_CHECK_"] = "3"
# make all our python scripts unbuffered
os.environ["PYTHONUNBUFFERED"] = "1"
bindir_abs = os.path.abspath(opts.bindir)
# Backwards compatibility:
if os.environ.get("TEST_LDAP") == "yes":
if os.environ.get("FEDORA_DS_ROOT"):
ldap = "fedora-ds"
else:
ldap = "openldap"
torture_maxtime = int(os.getenv("TORTURE_MAXTIME", "1200"))
if opts.ldap:
# LDAP is slow
torture_maxtime *= 2
prefix = os.path.normpath(opts.prefix)
# Ensure we have the test prefix around.
#
# We need restrictive permissions on this as some subdirectories in this tree
# will have wider permissions (ie 0777) and this would allow other users on the
# host to subvert the test process.
if not os.path.isdir(prefix):
os.mkdir(prefix, 0700)
else:
os.chmod(prefix, 0700)
prefix_abs = os.path.abspath(prefix)
tmpdir_abs = os.path.abspath(os.path.join(prefix_abs, "tmp"))
if not os.path.isdir(tmpdir_abs):
os.mkdir(tmpdir_abs, 0777)
srcdir_abs = os.path.abspath(opts.srcdir)
if prefix_abs == "/":
raise Exception("using '/' as absolute prefix is a bad idea")
os.environ["PREFIX"] = prefix
os.environ["KRB5CCNAME"] = os.path.join(prefix, "krb5ticket")
os.environ["PREFIX_ABS"] = prefix_abs
os.environ["SRCDIR"] = opts.srcdir
os.environ["SRCDIR_ABS"] = srcdir_abs
os.environ["BINDIR"] = bindir_abs
tls_enabled = not opts.quick
if tls_enabled:
os.environ["TLS_ENABLED"] = "yes"
else:
os.environ["TLS_ENABLED"] = "no"
def prefix_pathvar(name, newpath):
if name in os.environ:
os.environ[name] = "%s:%s" % (newpath, os.environ[name])
else:
os.environ[name] = newpath
prefix_pathvar("PKG_CONFIG_PATH", os.path.join(bindir_abs, "pkgconfig"))
prefix_pathvar("PYTHONPATH", os.path.join(bindir_abs, "python"))
if opts.socket_wrapper_keep_pcap:
# Socket wrapper keep pcap implies socket wrapper pcap
opts.socket_wrapper_pcap = True
if opts.socket_wrapper_pcap:
# Socket wrapper pcap implies socket wrapper
opts.socket_wrapper = True
if opts.socket_wrapper:
socket_wrapper_dir = socket_wrapper.setup_dir(os.path.join(prefix_abs, "w"), opts.socket_wrapper_pcap)
sys.stdout.write("SOCKET_WRAPPER_DIR=%s\n" % socket_wrapper_dir)
elif not opts.list:
if os.getuid() != 0:
warnings.warn("not using socket wrapper, but also not running as root. Will not be able to listen on proper ports")
testenv_default = "none"
# After this many seconds, the server will self-terminate. All tests
# must terminate in this time, and testenv will only stay alive this
# long
if os.environ.get("SMBD_MAXTIME", ""):
server_maxtime = int(os.environ["SMBD_MAXTIME"])
else:
server_maxtime = 7500
def has_socket_wrapper(bindir):
"""Check if Samba has been built with socket wrapper support.
"""
f = StringIO()
subprocess.check_call([os.path.join(bindir, "smbd"), "-b"], stdout=f)
for l in f.readlines():
if "SOCKET_WRAPPER" in l:
return True
return False
if not opts.list:
if opts.target == "samba":
if opts.socket_wrapper and not has_socket_wrapper(opts.bindir):
sys.stderr.write("You must include --enable-socket-wrapper when compiling Samba in order to execute 'make test'. Exiting....\n")
sys.exit(1)
testenv_default = "ad_dc_ntvfs"
from selftest.target.samba import Samba
target = Samba(opts.bindir, ldap, opts.srcdir, server_maxtime)
elif opts.target == "samba3":
if opts.socket_wrapper and not has_socket_wrapper(opts.bindir):
sys.stderr.write("You must include --enable-socket-wrapper when compiling Samba in order to execute 'make test'. Exiting....\n")
sys.exit(1)
testenv_default = "member"
from selftest.target.samba3 import Samba3
target = Samba3(opts.bindir, srcdir_abs, server_maxtime)
elif opts.target == "none":
testenv_default = "none"
target = NoneTarget()
env_manager = EnvironmentManager(target)
atexit.register(env_manager.teardown_all)
interfaces = ",".join([
"127.0.0.11/8",
"127.0.0.12/8",
"127.0.0.13/8",
"127.0.0.14/8",
"127.0.0.15/8",
"127.0.0.16/8"])
clientdir = os.path.join(prefix_abs, "client")
conffile = os.path.join(clientdir, "client.conf")
os.environ["SMB_CONF_PATH"] = conffile
todo = []
if not opts.testlist:
sys.stderr.write("No testlists specified\n")
sys.exit(1)
os.environ["SELFTEST_PREFIX"] = prefix_abs
os.environ["SELFTEST_TMPDIR"] = tmpdir_abs
os.environ["TEST_DATA_PREFIX"] = tmpdir_abs
if opts.socket_wrapper:
os.environ["SELFTEST_INTERFACES"] = interfaces
else:
os.environ["SELFTEST_INTERFACES"] = ""
if opts.quick:
os.environ["SELFTEST_QUICK"] = "1"
else:
os.environ["SELFTEST_QUICK"] = ""
os.environ["SELFTEST_MAXTIME"] = str(torture_maxtime)
available = []
for fn in opts.testlist:
for testsuite in testlist.read_testlist_file(fn):
if not testlist.should_run_test(tests, testsuite):
continue
name = testsuite[0]
if (includes is not None and
testlist.find_in_list(includes, name) is not None):
continue
available.append(testsuite)
if opts.load_list:
restricted_mgr = testlist.RestrictedTestManager.from_path(opts.load_list)
else:
restricted_mgr = None
for testsuite in available:
name = testsuite[0]
skipreason = skip(name)
if restricted_mgr is not None:
match = restricted_mgr.should_run_testsuite(name)
if match == []:
continue
else:
match = None
if skipreason is not None:
if not opts.list:
subunit_ops.skip_testsuite(name, skipreason)
else:
todo.append(testsuite + (match,))
if restricted_mgr is not None:
for name in restricted_mgr.iter_unused():
sys.stdout.write("No test or testsuite found matching %s\n" % name)
if todo == []:
sys.stderr.write("No tests to run\n")
sys.exit(1)
suitestotal = len(todo)
if not opts.list:
subunit_ops.progress(suitestotal, subunit.PROGRESS_SET)
subunit_ops.time(now())
exported_envvars = [
# domain stuff
"DOMAIN",
"REALM",
# domain controller stuff
"DC_SERVER",
"DC_SERVER_IP",
"DC_NETBIOSNAME",
"DC_NETBIOSALIAS",
# domain member
"MEMBER_SERVER",
"MEMBER_SERVER_IP",
"MEMBER_NETBIOSNAME",
"MEMBER_NETBIOSALIAS",
# rpc proxy controller stuff
"RPC_PROXY_SERVER",
"RPC_PROXY_SERVER_IP",
"RPC_PROXY_NETBIOSNAME",
"RPC_PROXY_NETBIOSALIAS",
# domain controller stuff for Vampired DC
"VAMPIRE_DC_SERVER",
"VAMPIRE_DC_SERVER_IP",
"VAMPIRE_DC_NETBIOSNAME",
"VAMPIRE_DC_NETBIOSALIAS",
# domain controller stuff for Vampired DC
"PROMOTED_DC_SERVER",
"PROMOTED_DC_SERVER_IP",
"PROMOTED_DC_NETBIOSNAME",
"PROMOTED_DC_NETBIOSALIAS",
# server stuff
"SERVER",
"SERVER_IP",
"NETBIOSNAME",
"NETBIOSALIAS",
# user stuff
"USERNAME",
"USERID",
"PASSWORD",
"DC_USERNAME",
"DC_PASSWORD",
# misc stuff
"KRB5_CONFIG",
"WINBINDD_SOCKET_DIR",
"WINBINDD_PRIV_PIPE_DIR",
"NMBD_SOCKET_DIR",
"LOCAL_PATH"
]
def switch_env(name, prefix):
if ":" in name:
(envname, option) = name.split(":", 1)
else:
envname = name
option = "client"
env = env_manager.setup_env(envname, prefix)
testenv_vars = env.get_vars()
if option == "local":
socket_wrapper.set_default_iface(testenv_vars["SOCKET_WRAPPER_DEFAULT_IFACE"])
os.environ["SMB_CONF_PATH"] = testenv_vars["SERVERCONFFILE"]
elif option == "client":
socket_wrapper.set_default_iface(11)
write_clientconf(conffile, clientdir, testenv_vars)
os.environ["SMB_CONF_PATH"] = conffile
else:
raise Exception("Unknown option[%s] for envname[%s]" % (option,
envname))
for name in exported_envvars:
if name in testenv_vars:
os.environ[name] = testenv_vars[name]
elif name in os.environ:
del os.environ[name]
return env
# This 'global' file needs to be empty when we start
dns_host_file_path = os.path.join(prefix_abs, "dns_host_file")
if os.path.exists(dns_host_file_path):
os.unlink(dns_host_file_path)
if opts.testenv:
testenv_name = os.environ.get("SELFTEST_TESTENV", testenv_default)
env = switch_env(testenv_name, prefix)
testenv_vars = env.get_vars()
os.environ["PIDDIR"] = testenv_vars["PIDDIR"]
os.environ["ENVNAME"] = testenv_name
envvarstr = exported_envvars_str(testenv_vars, exported_envvars)
term = os.environ.get("TERMINAL", "xterm -e")
cmd = """'echo -e "
Welcome to the Samba4 Test environment '%(testenv_name)'
This matches the client environment used in make test
server is pid `cat \$PIDDIR/samba.pid`
Some useful environment variables:
TORTURE_OPTIONS=\$TORTURE_OPTIONS
SMB_CONF_PATH=\$SMB_CONF_PATH
$envvarstr
\" && LD_LIBRARY_PATH=%(LD_LIBRARY_PATH)s $(SHELL)'""" % {
"testenv_name": testenv_name,
"LD_LIBRARY_PATH": os.environ["LD_LIBRARY_PATH"]}
subprocess.call(term + ' ' + cmd, shell=True)
env_manager.teardown_env(testenv_name)
elif opts.list:
for (name, envname, cmd, supports_loadfile, supports_idlist, subtests) in todo:
cmd = expand_command_list(cmd)
if cmd is None:
warnings.warn("Unable to list tests in %s" % name)
continue
exitcode = subprocess.call(cmd, shell=True)
if exitcode != 0:
sys.stderr.write("%s exited with exit code %s\n" % (cmd, exitcode))
sys.exit(1)
else:
for (name, envname, cmd, supports_loadfile, supports_idlist, subtests) in todo:
try:
env = switch_env(envname, prefix)
except UnsupportedEnvironment:
subunit_ops.start_testsuite(name)
subunit_ops.end_testsuite(name, "skip",
"environment %s is unknown in this test backend - skipping" % envname)
continue
except Exception, e:
subunit_ops.start_testsuite(name)
traceback.print_exc()
subunit_ops.end_testsuite(name, "error",
"unable to set up environment %s: %s" % (envname, e))
continue
cmd, tmpf = expand_command_run(cmd, supports_loadfile, supports_idlist,
subtests)
run_testsuite(name, cmd, subunit_ops, env=env)
if tmpf is not None:
os.remove(tmpf)
if opts.resetup_environment:
env_manager.teardown_env(envname)
env_manager.teardown_all()
sys.stdout.write("\n")
# if there were any valgrind failures, show them
for fn in os.listdir(prefix):
if fn.startswith("valgrind.log"):
sys.stdout.write("VALGRIND FAILURE\n")
f = open(os.path.join(prefix, fn), 'r')
try:
sys.stdout.write(f.read())
finally:
f.close()
sys.exit(0)
| gpl-3.0 |
dvliman/jaikuengine | .google_appengine/lib/django-1.2/tests/regressiontests/templates/nodelist.py | 50 | 1121 | from unittest import TestCase
from django.template.loader import get_template_from_string
from django.template import VariableNode
class NodelistTest(TestCase):
def test_for(self):
source = '{% for i in 1 %}{{ a }}{% endfor %}'
template = get_template_from_string(source)
vars = template.nodelist.get_nodes_by_type(VariableNode)
self.assertEqual(len(vars), 1)
def test_if(self):
source = '{% if x %}{{ a }}{% endif %}'
template = get_template_from_string(source)
vars = template.nodelist.get_nodes_by_type(VariableNode)
self.assertEqual(len(vars), 1)
def test_ifequal(self):
source = '{% ifequal x y %}{{ a }}{% endifequal %}'
template = get_template_from_string(source)
vars = template.nodelist.get_nodes_by_type(VariableNode)
self.assertEqual(len(vars), 1)
def test_ifchanged(self):
source = '{% ifchanged x %}{{ a }}{% endifchanged %}'
template = get_template_from_string(source)
vars = template.nodelist.get_nodes_by_type(VariableNode)
self.assertEqual(len(vars), 1)
| apache-2.0 |
eysho/BestKnownGame-Coins---Source | contrib/spendfrom/spendfrom.py | 792 | 10053 | #!/usr/bin/env python
#
# Use the raw transactions API to spend bitcoins received on particular addresses,
# and send any change back to that same address.
#
# Example usage:
# spendfrom.py # Lists available funds
# spendfrom.py --from=ADDRESS --to=ADDRESS --amount=11.00
#
# Assumes it will talk to a bitcoind or Bitcoin-Qt running
# on localhost.
#
# Depends on jsonrpc
#
from decimal import *
import getpass
import math
import os
import os.path
import platform
import sys
import time
from jsonrpc import ServiceProxy, json
BASE_FEE=Decimal("0.001")
def check_json_precision():
"""Make sure json library being used does not lose precision converting BTC values"""
n = Decimal("20000000.00000003")
satoshis = int(json.loads(json.dumps(float(n)))*1.0e8)
if satoshis != 2000000000000003:
raise RuntimeError("JSON encode/decode loses precision")
def determine_db_dir():
"""Return the default location of the bitcoin data directory"""
if platform.system() == "Darwin":
return os.path.expanduser("~/Library/Application Support/Bitcoin/")
elif platform.system() == "Windows":
return os.path.join(os.environ['APPDATA'], "Bitcoin")
return os.path.expanduser("~/.bitcoin")
def read_bitcoin_config(dbdir):
"""Read the bitcoin.conf file from dbdir, returns dictionary of settings"""
from ConfigParser import SafeConfigParser
class FakeSecHead(object):
def __init__(self, fp):
self.fp = fp
self.sechead = '[all]\n'
def readline(self):
if self.sechead:
try: return self.sechead
finally: self.sechead = None
else:
s = self.fp.readline()
if s.find('#') != -1:
s = s[0:s.find('#')].strip() +"\n"
return s
config_parser = SafeConfigParser()
config_parser.readfp(FakeSecHead(open(os.path.join(dbdir, "bitcoin.conf"))))
return dict(config_parser.items("all"))
def connect_JSON(config):
"""Connect to a bitcoin JSON-RPC server"""
testnet = config.get('testnet', '0')
testnet = (int(testnet) > 0) # 0/1 in config file, convert to True/False
if not 'rpcport' in config:
config['rpcport'] = 19332 if testnet else 9332
connect = "http://%s:%[email protected]:%s"%(config['rpcuser'], config['rpcpassword'], config['rpcport'])
try:
result = ServiceProxy(connect)
# ServiceProxy is lazy-connect, so send an RPC command mostly to catch connection errors,
# but also make sure the bitcoind we're talking to is/isn't testnet:
if result.getmininginfo()['testnet'] != testnet:
sys.stderr.write("RPC server at "+connect+" testnet setting mismatch\n")
sys.exit(1)
return result
except:
sys.stderr.write("Error connecting to RPC server at "+connect+"\n")
sys.exit(1)
def unlock_wallet(bitcoind):
info = bitcoind.getinfo()
if 'unlocked_until' not in info:
return True # wallet is not encrypted
t = int(info['unlocked_until'])
if t <= time.time():
try:
passphrase = getpass.getpass("Wallet is locked; enter passphrase: ")
bitcoind.walletpassphrase(passphrase, 5)
except:
sys.stderr.write("Wrong passphrase\n")
info = bitcoind.getinfo()
return int(info['unlocked_until']) > time.time()
def list_available(bitcoind):
address_summary = dict()
address_to_account = dict()
for info in bitcoind.listreceivedbyaddress(0):
address_to_account[info["address"]] = info["account"]
unspent = bitcoind.listunspent(0)
for output in unspent:
# listunspent doesn't give addresses, so:
rawtx = bitcoind.getrawtransaction(output['txid'], 1)
vout = rawtx["vout"][output['vout']]
pk = vout["scriptPubKey"]
# This code only deals with ordinary pay-to-bitcoin-address
# or pay-to-script-hash outputs right now; anything exotic is ignored.
if pk["type"] != "pubkeyhash" and pk["type"] != "scripthash":
continue
address = pk["addresses"][0]
if address in address_summary:
address_summary[address]["total"] += vout["value"]
address_summary[address]["outputs"].append(output)
else:
address_summary[address] = {
"total" : vout["value"],
"outputs" : [output],
"account" : address_to_account.get(address, "")
}
return address_summary
def select_coins(needed, inputs):
# Feel free to improve this, this is good enough for my simple needs:
outputs = []
have = Decimal("0.0")
n = 0
while have < needed and n < len(inputs):
outputs.append({ "txid":inputs[n]["txid"], "vout":inputs[n]["vout"]})
have += inputs[n]["amount"]
n += 1
return (outputs, have-needed)
def create_tx(bitcoind, fromaddresses, toaddress, amount, fee):
all_coins = list_available(bitcoind)
total_available = Decimal("0.0")
needed = amount+fee
potential_inputs = []
for addr in fromaddresses:
if addr not in all_coins:
continue
potential_inputs.extend(all_coins[addr]["outputs"])
total_available += all_coins[addr]["total"]
if total_available < needed:
sys.stderr.write("Error, only %f BTC available, need %f\n"%(total_available, needed));
sys.exit(1)
#
# Note:
# Python's json/jsonrpc modules have inconsistent support for Decimal numbers.
# Instead of wrestling with getting json.dumps() (used by jsonrpc) to encode
# Decimals, I'm casting amounts to float before sending them to bitcoind.
#
outputs = { toaddress : float(amount) }
(inputs, change_amount) = select_coins(needed, potential_inputs)
if change_amount > BASE_FEE: # don't bother with zero or tiny change
change_address = fromaddresses[-1]
if change_address in outputs:
outputs[change_address] += float(change_amount)
else:
outputs[change_address] = float(change_amount)
rawtx = bitcoind.createrawtransaction(inputs, outputs)
signed_rawtx = bitcoind.signrawtransaction(rawtx)
if not signed_rawtx["complete"]:
sys.stderr.write("signrawtransaction failed\n")
sys.exit(1)
txdata = signed_rawtx["hex"]
return txdata
def compute_amount_in(bitcoind, txinfo):
result = Decimal("0.0")
for vin in txinfo['vin']:
in_info = bitcoind.getrawtransaction(vin['txid'], 1)
vout = in_info['vout'][vin['vout']]
result = result + vout['value']
return result
def compute_amount_out(txinfo):
result = Decimal("0.0")
for vout in txinfo['vout']:
result = result + vout['value']
return result
def sanity_test_fee(bitcoind, txdata_hex, max_fee):
class FeeError(RuntimeError):
pass
try:
txinfo = bitcoind.decoderawtransaction(txdata_hex)
total_in = compute_amount_in(bitcoind, txinfo)
total_out = compute_amount_out(txinfo)
if total_in-total_out > max_fee:
raise FeeError("Rejecting transaction, unreasonable fee of "+str(total_in-total_out))
tx_size = len(txdata_hex)/2
kb = tx_size/1000 # integer division rounds down
if kb > 1 and fee < BASE_FEE:
raise FeeError("Rejecting no-fee transaction, larger than 1000 bytes")
if total_in < 0.01 and fee < BASE_FEE:
raise FeeError("Rejecting no-fee, tiny-amount transaction")
# Exercise for the reader: compute transaction priority, and
# warn if this is a very-low-priority transaction
except FeeError as err:
sys.stderr.write((str(err)+"\n"))
sys.exit(1)
def main():
import optparse
parser = optparse.OptionParser(usage="%prog [options]")
parser.add_option("--from", dest="fromaddresses", default=None,
help="addresses to get bitcoins from")
parser.add_option("--to", dest="to", default=None,
help="address to get send bitcoins to")
parser.add_option("--amount", dest="amount", default=None,
help="amount to send")
parser.add_option("--fee", dest="fee", default="0.0",
help="fee to include")
parser.add_option("--datadir", dest="datadir", default=determine_db_dir(),
help="location of bitcoin.conf file with RPC username/password (default: %default)")
parser.add_option("--testnet", dest="testnet", default=False, action="store_true",
help="Use the test network")
parser.add_option("--dry_run", dest="dry_run", default=False, action="store_true",
help="Don't broadcast the transaction, just create and print the transaction data")
(options, args) = parser.parse_args()
check_json_precision()
config = read_bitcoin_config(options.datadir)
if options.testnet: config['testnet'] = True
bitcoind = connect_JSON(config)
if options.amount is None:
address_summary = list_available(bitcoind)
for address,info in address_summary.iteritems():
n_transactions = len(info['outputs'])
if n_transactions > 1:
print("%s %.8f %s (%d transactions)"%(address, info['total'], info['account'], n_transactions))
else:
print("%s %.8f %s"%(address, info['total'], info['account']))
else:
fee = Decimal(options.fee)
amount = Decimal(options.amount)
while unlock_wallet(bitcoind) == False:
pass # Keep asking for passphrase until they get it right
txdata = create_tx(bitcoind, options.fromaddresses.split(","), options.to, amount, fee)
sanity_test_fee(bitcoind, txdata, amount*Decimal("0.01"))
if options.dry_run:
print(txdata)
else:
txid = bitcoind.sendrawtransaction(txdata)
print(txid)
if __name__ == '__main__':
main()
| mit |
mundofer/napalm-sros | test/unit/conftest.py | 1 | 1635 | """Test fixtures."""
from builtins import super
import pytest
from napalm_base.test import conftest as parent_conftest
from napalm_base.test.double import BaseTestDouble
from napalm_sros import sros
@pytest.fixture(scope='class')
def set_device_parameters(request):
"""Set up the class."""
def fin():
request.cls.device.close()
request.addfinalizer(fin)
request.cls.driver = sros.SRosDriver
request.cls.patched_driver = PatchedSRosDriver
request.cls.vendor = 'sros'
parent_conftest.set_device_parameters(request)
def pytest_generate_tests(metafunc):
"""Generate test cases dynamically."""
parent_conftest.pytest_generate_tests(metafunc, __file__)
class PatchedSRosDriver(sros.SRosDriver):
"""Patched SRos Driver."""
def __init__(self, hostname, username, password, timeout=60, optional_args=None):
"""Patched SRos Driver constructor."""
super().__init__(hostname, username, password, timeout, optional_args)
self.patched_attrs = ['device']
self.device = FakeSRosDevice()
class FakeSRosDevice(BaseTestDouble):
"""SRos device test double."""
def run_commands(self, command_list, encoding='json'):
"""Fake run_commands."""
result = list()
for command in command_list:
filename = '{}.{}'.format(self.sanitize_text(command), encoding)
full_path = self.find_file(filename)
if encoding == 'json':
result.append(self.read_json_file(full_path))
else:
result.append({'output': self.read_txt_file(full_path)})
return result
| apache-2.0 |
pcm17/tensorflow | tensorflow/contrib/learn/python/learn/utils/input_fn_utils.py | 1 | 4651 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utilities for creating input_fns.
InputFnOps is renamed to ServingInputReceiver and moved to
tensorflow/python/estimator/export.py.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import parsing_ops
class InputFnOps(collections.namedtuple('InputFnOps',
['features',
'labels',
'default_inputs'])):
"""A return type for an input_fn.
This return type is currently only supported for serving input_fn.
Training and eval input_fn should return a `(features, labels)` tuple.
The expected return values are:
features: A dict of string to `Tensor` or `SparseTensor`, specifying the
features to be passed to the model.
labels: A `Tensor`, `SparseTensor`, or a dict of string to `Tensor` or
`SparseTensor`, specifying labels for training or eval. For serving, set
`labels` to `None`.
default_inputs: a dict of string to `Tensor` or `SparseTensor`, specifying
the input placeholders (if any) that this input_fn expects to be fed.
Typically, this is used by a serving input_fn, which expects to be fed
serialized `tf.Example` protos.
"""
def build_parsing_serving_input_fn(feature_spec, default_batch_size=None):
"""Build an input_fn appropriate for serving, expecting fed tf.Examples.
Creates an input_fn that expects a serialized tf.Example fed into a string
placeholder. The function parses the tf.Example according to the provided
feature_spec, and returns all parsed Tensors as features. This input_fn is
for use at serving time, so the labels return value is always None.
Args:
feature_spec: a dict of string to `VarLenFeature`/`FixedLenFeature`.
default_batch_size: the number of query examples expected per batch.
Leave unset for variable batch size (recommended).
Returns:
An input_fn suitable for use in serving.
"""
def input_fn():
"""An input_fn that expects a serialized tf.Example."""
serialized_tf_example = array_ops.placeholder(dtype=dtypes.string,
shape=[default_batch_size],
name='input_example_tensor')
inputs = {'examples': serialized_tf_example}
features = parsing_ops.parse_example(serialized_tf_example, feature_spec)
labels = None # these are not known in serving!
return InputFnOps(features, labels, inputs)
return input_fn
def build_default_serving_input_fn(features, default_batch_size=None):
"""Build an input_fn appropriate for serving, expecting feature Tensors.
Creates an input_fn that expects all features to be fed directly.
This input_fn is for use at serving time, so the labels return value is always
None.
Args:
features: a dict of string to `Tensor`.
default_batch_size: the number of query examples expected per batch.
Leave unset for variable batch size (recommended).
Returns:
An input_fn suitable for use in serving.
"""
def input_fn():
"""an input_fn that expects all features to be fed directly."""
features_placeholders = {}
for name, t in features.items():
shape_list = t.get_shape().as_list()
shape_list[0] = default_batch_size
shape = tensor_shape.TensorShape(shape_list)
features_placeholders[name] = array_ops.placeholder(dtype=t.dtype,
shape=shape,
name=t.name)
labels = None # these are not known in serving!
return InputFnOps(features_placeholders, labels, features_placeholders)
return input_fn
| apache-2.0 |
ClaudioNahmad/Servicio-Social | Parametros/CosmoMC/prerrequisitos/plc-2.0/build/pyfits-3.2.2/lib/pyfits/hdu/compressed.py | 3 | 84222 | import ctypes
import math
import re
import time
import warnings
import numpy as np
from pyfits.card import Card
from pyfits.column import Column, ColDefs, _FormatP, TDEF_RE
from pyfits.column import KEYWORD_NAMES as TABLE_KEYWORD_NAMES
from pyfits.fitsrec import FITS_rec
from pyfits.hdu.base import DELAYED, ExtensionHDU
from pyfits.hdu.image import _ImageBaseHDU, ImageHDU
from pyfits.hdu.table import BinTableHDU
from pyfits.header import Header
from pyfits.util import (lazyproperty, _is_pseudo_unsigned, _unsigned_zero,
deprecated, _is_int)
try:
from pyfits import compression
COMPRESSION_SUPPORTED = COMPRESSION_ENABLED = True
except ImportError:
COMPRESSION_SUPPORTED = COMPRESSION_ENABLED = False
# Quantization dithering method constants; these are right out of fitsio.h
NO_DITHER = -1
SUBTRACTIVE_DITHER_1 = 1
SUBTRACTIVE_DITHER_2 = 2
QUANTIZE_METHOD_NAMES = {
NO_DITHER: 'NO_DITHER',
SUBTRACTIVE_DITHER_1: 'SUBTRACTIVE_DITHER_1',
SUBTRACTIVE_DITHER_2: 'SUBTRACTIVE_DITHER_2'
}
DITHER_SEED_CLOCK = 0
DITHER_SEED_CHECKSUM = -1
# Default compression parameter values
DEFAULT_COMPRESSION_TYPE = 'RICE_1'
DEFAULT_QUANTIZE_LEVEL = 16.
DEFAULT_QUANTIZE_METHOD = NO_DITHER
DEFAULT_DITHER_SEED = DITHER_SEED_CLOCK
DEFAULT_HCOMP_SCALE = 0
DEFAULT_HCOMP_SMOOTH = 0
DEFAULT_BLOCK_SIZE = 32
DEFAULT_BYTE_PIX = 4
# CFITSIO version-specific features
if COMPRESSION_SUPPORTED:
try:
CFITSIO_SUPPORTS_GZIPDATA = compression.CFITSIO_VERSION >= 3.28
CFITSIO_SUPPORTS_Q_FORMAT = compression.CFITSIO_VERSION >= 3.35
except AttributeError:
# This generally shouldn't happen unless running setup.py in an
# environment where an old build of pyfits exists
CFITSIO_SUPPORTS_GZIPDATA = True
CFITSIO_SUPPORTS_Q_FORMAT = True
COMPRESSION_KEYWORDS = set(['ZIMAGE', 'ZCMPTYPE', 'ZBITPIX', 'ZNAXIS',
'ZMASKCMP', 'ZSIMPLE', 'ZTENSION', 'ZEXTEND'])
class CompImageHeader(Header):
"""
Header object for compressed image HDUs designed to keep the compression
header and the underlying image header properly synchronized.
This essentially wraps the image header, so that all values are read from
and written to the image header. However, updates to the image header will
also update the table header where appropriate.
"""
# TODO: The difficulty of implementing this screams a need to rewrite this
# module
_keyword_remaps = {
'SIMPLE': 'ZSIMPLE', 'XTENSION': 'ZTENSION', 'BITPIX': 'ZBITPIX',
'NAXIS': 'ZNAXIS', 'EXTEND': 'ZEXTEND', 'BLOCKED': 'ZBLOCKED',
'PCOUNT': 'ZPCOUNT', 'GCOUNT': 'ZGCOUNT', 'CHECKSUM': 'ZHECKSUM',
'DATASUM': 'ZDATASUM'
}
_zdef_re = re.compile(r'(?P<label>^[Zz][a-zA-Z]*)(?P<num>[1-9][0-9 ]*$)?')
_compression_keywords = set(_keyword_remaps.values()).union(
['ZIMAGE', 'ZCMPTYPE', 'ZMASKCMP', 'ZQUANTIZ', 'ZDITHER0'])
_indexed_compression_keywords = set(['ZNAXIS', 'ZTILE', 'ZNAME', 'ZVAL'])
# TODO: Once it place it should be possible to manage some of this through
# the schema system, but it's not quite ready for that yet. Also it still
# makes more sense to change CompImageHDU to subclass ImageHDU :/
def __init__(self, table_header, image_header=None):
if image_header is None:
image_header = Header()
self._cards = image_header._cards
self._keyword_indices = image_header._keyword_indices
self._rvkc_indices = image_header._rvkc_indices
self._modified = image_header._modified
self._table_header = table_header
# We need to override and Header methods that can modify the header, and
# ensure that they sync with the underlying _table_header
def __setitem__(self, key, value):
# This isn't pretty, but if the `key` is either an int or a tuple we
# need to figure out what keyword name that maps to before doing
# anything else; these checks will be repeated later in the
# super().__setitem__ call but I don't see another way around it
# without some major refactoring
if self._set_slice(key, value, self):
return
if isinstance(key, int):
keyword, index = self._keyword_from_index(key)
elif isinstance(key, tuple):
keyword, index = key
else:
# We don't want to specify and index otherwise, because that will
# break the behavior for new keywords and for commentary keywords
keyword, index = key, None
if self._is_reserved_keyword(keyword):
return
super(CompImageHeader, self).__setitem__(key, value)
if index is not None:
remapped_keyword = self._remap_keyword(keyword)
self._table_header[remapped_keyword, index] = value
# Else this will pass through to ._update
def __delitem__(self, key):
if isinstance(key, slice) or self._haswildcard(key):
# If given a slice pass that on to the superclass and bail out
# early; we only want to make updates to _table_header when given
# a key specifying a single keyword
return super(CompImageHeader, self).__delitem__(key)
if isinstance(key, int):
keyword, index = self._keyword_from_index(key)
elif isinstance(key, tuple):
keyword, index = key
else:
keyword, index = key, None
if key not in self:
raise KeyError("Keyword %r not found." % key)
super(CompImageHeader, self).__delitem__(key)
remapped_keyword = self._remap_keyword(keyword)
if remapped_keyword in self._table_header:
if index is not None:
del self._table_header[(remapped_keyword, index)]
else:
del self._table_header[remapped_keyword]
def append(self, card=None, useblanks=True, bottom=False, end=False):
# This logic unfortunately needs to be duplicated from the base class
# in order to determine the keyword
if isinstance(card, basestring):
card = Card(card)
elif isinstance(card, tuple):
card = Card(*card)
elif card is None:
card = Card()
elif not isinstance(card, Card):
raise ValueError(
'The value appended to a Header must be either a keyword or '
'(keyword, value, [comment]) tuple; got: %r' % card)
if self._is_reserved_keyword(card.keyword):
return
super(CompImageHeader, self).append(card=card, useblanks=useblanks,
bottom=bottom, end=end)
remapped_keyword = self._remap_keyword(card.keyword)
card = Card(remapped_keyword, card.value, card.comment)
self._table_header.append(card=card, useblanks=useblanks,
bottom=bottom, end=end)
def insert(self, key, card, useblanks=True, after=False):
if isinstance(key, int):
# Determine condition to pass through to append
if after:
if key == -1:
key = len(self._cards)
else:
key += 1
if key >= len(self._cards):
self.append(card, end=True)
return
if isinstance(card, basestring):
card = Card(card)
elif isinstance(card, tuple):
card = Card(*card)
elif not isinstance(card, Card):
raise ValueError(
'The value inserted into a Header must be either a keyword or '
'(keyword, value, [comment]) tuple; got: %r' % card)
if self._is_reserved_keyword(card.keyword):
return
# Now the tricky part is to determine where to insert in the table
# header. If given a numerical index we need to map that to the
# corresponding index in the table header. Although rare, there may be
# cases where there is no mapping in which case we just try the same
# index
# NOTE: It is crucial that remapped_index in particular is figured out
# before the image header is modified
remapped_index = self._remap_index(key)
remapped_keyword = self._remap_keyword(card.keyword)
super(CompImageHeader, self).insert(key, card, useblanks=useblanks,
after=after)
card = Card(remapped_keyword, card.value, card.comment)
self._table_header.insert(remapped_index, card, useblanks=useblanks,
after=after)
def _update(self, card):
keyword = card[0]
if self._is_reserved_keyword(keyword):
return
super(CompImageHeader, self)._update(card)
remapped_keyword = self._remap_keyword(keyword)
self._table_header._update((remapped_keyword,) + card[1:])
# Last piece needed (I think) for synchronizing with the real header
# This one is tricky since _relativeinsert calls insert
def _relativeinsert(self, card, before=None, after=None, replace=False):
keyword = card[0]
if self._is_reserved_keyword(keyword):
return
# Now we have to figure out how to remap 'before' and 'after'
if before is None:
if isinstance(after, int):
remapped_after = self._remap_index(after)
else:
remapped_after = self._remap_keyword(after)
remapped_before = None
else:
if isinstance(before, int):
remapped_before = self._remap_index(before)
else:
remapped_before = self._remap_keyword(before)
remapped_after = None
super(CompImageHeader, self)._relativeinsert(card, before=before,
after=after,
replace=replace)
remapped_keyword = self._remap_keyword(keyword)
card = Card(remapped_keyword, card[1], card[2])
self._table_header._relativeinsert(card, before=remapped_before,
after=remapped_after,
replace=replace)
@classmethod
def _is_reserved_keyword(cls, keyword, warn=True):
msg = ('Keyword %r is reserved for use by the FITS Tiled Image '
'Convention and will not be stored in the header for the '
'image being compressed.' % keyword)
if keyword == 'TFIELDS':
if warn:
warnings.warn(msg)
return True
m = TDEF_RE.match(keyword)
if m and m.group('label').upper() in TABLE_KEYWORD_NAMES:
if warn:
warnings.warn(msg)
return True
m = cls._zdef_re.match(keyword)
if m:
label = m.group('label').upper()
num = m.group('num')
if num is not None and label in cls._indexed_compression_keywords:
if warn:
warnings.warn(msg)
return True
elif label in cls._compression_keywords:
if warn:
warnings.warn(msg)
return True
return False
@classmethod
def _remap_keyword(cls, keyword):
# Given a keyword that one might set on an image, remap that keyword to
# the name used for it in the COMPRESSED HDU header
# This is mostly just a lookup in _keyword_remaps, but needs handling
# for NAXISn keywords
is_naxisn = False
if keyword[:5] == 'NAXIS':
try:
index = int(keyword[5:])
is_naxisn = index > 0
except ValueError:
pass
if is_naxisn:
return 'ZNAXIS%d' % index
# If the keyword does not need to be remapped then just return the
# original keyword
return cls._keyword_remaps.get(keyword, keyword)
def _remap_index(self, idx):
# Given an integer index into this header, map that to the index in the
# table header for the same card. If the card doesn't exist in the
# table header (generally should *not* be the case) this will just
# return the same index
# This *does* also accept a keyword or (keyword, repeat) tuple and
# obtains the associated numerical index with self._cardindex
if not isinstance(idx, int):
idx = self._cardindex(idx)
keyword, repeat = self._keyword_from_index(idx)
remapped_insert_keyword = self._remap_keyword(keyword)
try:
idx = self._table_header._cardindex((remapped_insert_keyword,
repeat))
except (IndexError, KeyError):
pass
return idx
class CompImageHDU(BinTableHDU):
"""
Compressed Image HDU class.
"""
# Maps deprecated keyword arguments to __init__ to their new names
DEPRECATED_KWARGS = {
'compressionType': 'compression_type', 'tileSize': 'tile_size',
'hcompScale': 'hcomp_scale', 'hcompSmooth': 'hcomp_smooth',
'quantizeLevel': 'quantize_level'
}
def __init__(self, data=None, header=None, name=None,
compression_type=DEFAULT_COMPRESSION_TYPE,
tile_size=None,
hcomp_scale=DEFAULT_HCOMP_SCALE,
hcomp_smooth=DEFAULT_HCOMP_SMOOTH,
quantize_level=DEFAULT_QUANTIZE_LEVEL,
quantize_method=DEFAULT_QUANTIZE_METHOD,
dither_seed=DEFAULT_DITHER_SEED,
do_not_scale_image_data=False,
uint=False, scale_back=False, **kwargs):
"""
Parameters
----------
data : array, optional
Uncompressed image data
header : Header instance, optional
Header to be associated with the image; when reading the HDU from a
file (data=DELAYED), the header read from the file
name : str, optional
The ``EXTNAME`` value; if this value is `None`, then the name from
the input image header will be used; if there is no name in the
input image header then the default name ``COMPRESSED_IMAGE`` is
used.
compression_type : str, optional
Compression algorithm: one of
``'RICE_1'``, ``'RICE_ONE'``, ``'PLIO_1'``, ``'GZIP_1'``,
``'GZIP_2'``, ``'HCOMPRESS_1'``
tile_size : int, optional
Compression tile sizes. Default treats each row of image as a
tile.
hcomp_scale : float, optional
HCOMPRESS scale parameter
hcomp_smooth : float, optional
HCOMPRESS smooth parameter
quantize_level : float, optional
Floating point quantization level; see note below
quantize_method : int, optional
Floating point quantization dithering method; can be either
``NO_DITHER`` (-1), ``SUBTRACTIVE_DITHER_1`` (1; default), or
``SUBTRACTIVE_DITHER_2`` (2); see note below
dither_seed : int, optional
Random seed to use for dithering; can be either an integer in the
range 1 to 1000 (inclusive), ``DITHER_SEED_CLOCK`` (0; default), or
``DITHER_SEED_CHECKSUM`` (-1); see note below
Notes
-----
The pyfits module supports 2 methods of image compression.
1) The entire FITS file may be externally compressed with the gzip
or pkzip utility programs, producing a ``*.gz`` or ``*.zip``
file, respectively. When reading compressed files of this type,
pyfits first uncompresses the entire file into a temporary file
before performing the requested read operations. The pyfits
module does not support writing to these types of compressed
files. This type of compression is supported in the `_File`
class, not in the `CompImageHDU` class. The file compression
type is recognized by the ``.gz`` or ``.zip`` file name
extension.
2) The `CompImageHDU` class supports the FITS tiled image
compression convention in which the image is subdivided into a
grid of rectangular tiles, and each tile of pixels is
individually compressed. The details of this FITS compression
convention are described at the `FITS Support Office web site
<http://fits.gsfc.nasa.gov/registry/tilecompression.html>`_.
Basically, the compressed image tiles are stored in rows of a
variable length arrray column in a FITS binary table. The
pyfits module recognizes that this binary table extension
contains an image and treats it as if it were an image
extension. Under this tile-compression format, FITS header
keywords remain uncompressed. At this time, pyfits does not
support the ability to extract and uncompress sections of the
image without having to uncompress the entire image.
The pyfits module supports 3 general-purpose compression algorithms
plus one other special-purpose compression technique that is designed
for data masks with positive integer pixel values. The 3 general
purpose algorithms are GZIP, Rice, and HCOMPRESS, and the
special-purpose technique is the IRAF pixel list compression technique
(PLIO). The ``compression_type`` parameter defines the compression
algorithm to be used.
The FITS image can be subdivided into any desired rectangular grid of
compression tiles. With the GZIP, Rice, and PLIO algorithms, the
default is to take each row of the image as a tile. The HCOMPRESS
algorithm is inherently 2-dimensional in nature, so the default in this
case is to take 16 rows of the image per tile. In most cases, it makes
little difference what tiling pattern is used, so the default tiles are
usually adequate. In the case of very small images, it could be more
efficient to compress the whole image as a single tile. Note that the
image dimensions are not required to be an integer multiple of the tile
dimensions; if not, then the tiles at the edges of the image will be
smaller than the other tiles. The ``tile_size`` parameter may be
provided as a list of tile sizes, one for each dimension in the image.
For example a ``tile_size`` value of ``[100,100]`` would divide a 300 X
300 image into 9 100 X 100 tiles.
The 4 supported image compression algorithms are all 'lossless' when
applied to integer FITS images; the pixel values are preserved exactly
with no loss of information during the compression and uncompression
process. In addition, the HCOMPRESS algorithm supports a 'lossy'
compression mode that will produce larger amount of image compression.
This is achieved by specifying a non-zero value for the ``hcomp_scale``
parameter. Since the amount of compression that is achieved depends
directly on the RMS noise in the image, it is usually more convenient
to specify the ``hcomp_scale`` factor relative to the RMS noise.
Setting ``hcomp_scale = 2.5`` means use a scale factor that is 2.5
times the calculated RMS noise in the image tile. In some cases it may
be desirable to specify the exact scaling to be used, instead of
specifying it relative to the calculated noise value. This may be done
by specifying the negative of the desired scale value (typically in the
range -2 to -100).
Very high compression factors (of 100 or more) can be achieved by using
large ``hcomp_scale`` values, however, this can produce undesireable
'blocky' artifacts in the compressed image. A variation of the
HCOMPRESS algorithm (called HSCOMPRESS) can be used in this case to
apply a small amount of smoothing of the image when it is uncompressed
to help cover up these artifacts. This smoothing is purely cosmetic
and does not cause any significant change to the image pixel values.
Setting the ``hcomp_smooth`` parameter to 1 will engage the smoothing
algorithm.
Floating point FITS images (which have ``BITPIX`` = -32 or -64) usually
contain too much 'noise' in the least significant bits of the mantissa
of the pixel values to be effectively compressed with any lossless
algorithm. Consequently, floating point images are first quantized
into scaled integer pixel values (and thus throwing away much of the
noise) before being compressed with the specified algorithm (either
GZIP, RICE, or HCOMPRESS). This technique produces much higher
compression factors than simply using the GZIP utility to externally
compress the whole FITS file, but it also means that the original
floating point value pixel values are not exactly perserved. When done
properly, this integer scaling technique will only discard the
insignificant noise while still preserving all the real imformation in
the image. The amount of precision that is retained in the pixel
values is controlled by the ``quantize_level`` parameter. Larger
values will result in compressed images whose pixels more closely match
the floating point pixel values, but at the same time the amount of
compression that is achieved will be reduced. Users should experiment
with different values for this parameter to determine the optimal value
that preserves all the useful information in the image, without
needlessly preserving all the 'noise' which will hurt the compression
efficiency.
The default value for the ``quantize_level`` scale factor is 16, which
means that scaled integer pixel values will be quantized such that the
difference between adjacent integer values will be 1/16th of the noise
level in the image background. An optimized algorithm is used to
accurately estimate the noise in the image. As an example, if the RMS
noise in the background pixels of an image = 32.0, then the spacing
between adjacent scaled integer pixel values will equal 2.0 by default.
Note that the RMS noise is independently calculated for each tile of
the image, so the resulting integer scaling factor may fluctuate
slightly for each tile. In some cases, it may be desireable to specify
the exact quantization level to be used, instead of specifying it
relative to the calculated noise value. This may be done by specifying
the negative of desired quantization level for the value of
``quantize_level``. In the previous example, one could specify
``quantize_level = -2.0`` so that the quantized integer levels differ
by 2.0. Larger negative values for ``quantize_level`` means that the
levels are more coarsely-spaced, and will produce higher compression
factors.
The quantization algorithm can also apply one of two random dithering
methods in order to reduce bias in the measured intensity of background
regions. The default method, specified with the constant
``SUBTRACTIVE_DITHER_1`` adds dithering to the zero-point of the
quantization array itself rather than adding noise to the actual image.
The random noise is added on a pixel-by-pixel basis, so in order
restore each pixel from its integer value to its floating point value
it is necessary to replay the same sequence of random numbers for each
pixel (see below). The other method, ``SUBTRACTIVE_DITHER_2``, is
exactly like the first except that before dithering any pixel with a
floating point value of ``0.0`` is replaced with the special integer
value ``-2147483647``. When the image is uncompressed, pixels with
this value are restored back to ``0.0`` exactly. Finally, a value of
``NO_DITHER`` disables dithering entirely.
As mentioned above, when using the subtractive dithering algorithm it
is necessary to be able to generate a (pseudo-)random sequence of noise
for each pixel, and replay that same sequence upon decompressing. To
facilitate this, a random seed between 1 and 10000 (inclusive) is used
to seed a random number generator, and that seed is stored in the
``ZDITHER0`` keyword in the header of the compressed HDU. In order to
use that seed to generate the same sequence of random numbers the same
random number generator must be used at compression and decompression
time; for that reason the tiled image convention provides an
implementation of a very simple pseudo-random number generator. The
seed itself can be provided in one of three ways, controllable by the
``dither_seed`` argument: It may be specified manually, or it may be
generated arbitrarily based on the system's clock
(``DITHER_SEED_CLOCK``) or based on a checksum of the pixels in the
image's first tile (``DITHER_SEED_CHECKSUM``). The clock-based method
is the default, and is sufficient to ensure that the value is
reasonably "arbitrary" and that the same seed is unlikely to be
generated sequentially. The checksum method, on the other hand,
ensures that the same seed is used every time for a specific image.
This is particularly useful for software testing as it ensures that the
same image will always use the same seed.
"""
if not COMPRESSION_SUPPORTED:
raise Exception('The pyfits.compression module is not available. '
'Creation of compressed image HDUs is disabled.')
# Handle deprecated keyword arguments
compression_opts = {}
for oldarg, newarg in self.DEPRECATED_KWARGS.items():
if oldarg in kwargs:
warnings.warn('Keyword argument %s to %s is pending '
'deprecation; use %s instead' %
(oldarg, self.__class__.__name__, newarg),
PendingDeprecationWarning)
compression_opts[newarg] = kwargs[oldarg]
del kwargs[oldarg]
else:
compression_opts[newarg] = locals()[newarg]
# Include newer compression options that don't required backwards
# compatibility with deprecated spellings
compression_opts['quantize_method'] = quantize_method
compression_opts['dither_seed'] = dither_seed
if data is DELAYED:
# Reading the HDU from a file
super(CompImageHDU, self).__init__(data=data, header=header)
else:
# Create at least a skeleton HDU that matches the input
# header and data (if any were input)
super(CompImageHDU, self).__init__(data=None, header=header)
# Store the input image data
self.data = data
# Update the table header (_header) to the compressed
# image format and to match the input data (if any);
# Create the image header (_image_header) from the input
# image header (if any) and ensure it matches the input
# data; Create the initially empty table data array to
# hold the compressed data.
self._update_header_data(header, name, **compression_opts)
# TODO: A lot of this should be passed on to an internal image HDU o
# something like that, see ticket #88
self._do_not_scale_image_data = do_not_scale_image_data
self._uint = uint
self._scale_back = scale_back
self._axes = [self._header.get('ZNAXIS' + str(axis + 1), 0)
for axis in xrange(self._header.get('ZNAXIS', 0))]
# store any scale factors from the table header
if do_not_scale_image_data:
self._bzero = 0
self._bscale = 1
else:
self._bzero = self._header.get('BZERO', 0)
self._bscale = self._header.get('BSCALE', 1)
self._bitpix = self._header['ZBITPIX']
self._orig_bzero = self._bzero
self._orig_bscale = self._bscale
self._orig_bitpix = self._bitpix
@classmethod
def match_header(cls, header):
card = header.cards[0]
if card.keyword != 'XTENSION':
return False
xtension = card.value
if isinstance(xtension, basestring):
xtension = xtension.rstrip()
if xtension not in ('BINTABLE', 'A3DTABLE'):
return False
if 'ZIMAGE' not in header or header['ZIMAGE'] != True:
return False
if COMPRESSION_SUPPORTED and COMPRESSION_ENABLED:
return True
elif not COMPRESSION_SUPPORTED:
warnings.warn('Failure matching header to a compressed image '
'HDU: The compression module is not available.\n'
'The HDU will be treated as a Binary Table HDU.')
return False
else:
# Compression is supported but disabled; just pass silently (#92)
return False
def _update_header_data(self, image_header,
name=None,
compression_type=None,
tile_size=None,
hcomp_scale=None,
hcomp_smooth=None,
quantize_level=None,
quantize_method=None,
dither_seed=None):
"""
Update the table header (`_header`) to the compressed
image format and to match the input data (if any). Create
the image header (`_image_header`) from the input image
header (if any) and ensure it matches the input
data. Create the initially-empty table data array to hold
the compressed data.
This method is mainly called internally, but a user may wish to
call this method after assigning new data to the `CompImageHDU`
object that is of a different type.
Parameters
----------
image_header : Header instance
header to be associated with the image
name : str, optional
the ``EXTNAME`` value; if this value is `None`, then the name from
the input image header will be used; if there is no name in the
input image header then the default name 'COMPRESSED_IMAGE' is used
compression_type : str, optional
compression algorithm 'RICE_1', 'PLIO_1', 'GZIP_1', 'HCOMPRESS_1';
if this value is `None`, use value already in the header; if no
value already in the header, use 'RICE_1'
tile_size : sequence of int, optional
compression tile sizes as a list; if this value is `None`, use
value already in the header; if no value already in the header,
treat each row of image as a tile
hcomp_scale : float, optional
HCOMPRESS scale parameter; if this value is `None`, use the value
already in the header; if no value already in the header, use 1
hcomp_smooth : float, optional
HCOMPRESS smooth parameter; if this value is `None`, use the value
already in the header; if no value already in the header, use 0
quantize_level : float, optional
floating point quantization level; if this value is `None`, use the
value already in the header; if no value already in header, use 16
quantize_method : int, optional
floating point quantization dithering method; can be either
NO_DITHER (-1), SUBTRACTIVE_DITHER_1 (1; default), or
SUBTRACTIVE_DITHER_2 (2)
dither_seed : int, optional
random seed to use for dithering; can be either an integer in the
range 1 to 1000 (inclusive), DITHER_SEED_CLOCK (0; default), or
DITHER_SEED_CHECKSUM (-1)
"""
image_hdu = ImageHDU(data=self.data, header=self._header)
self._image_header = CompImageHeader(self._header, image_hdu.header)
self._axes = image_hdu._axes
del image_hdu
# Determine based on the size of the input data whether to use the Q
# column format to store compressed data or the P format.
# The Q format is used only if the uncompressed data is larger than
# 4 GB. This is not a perfect heuristic, as one can contrive an input
# array which, when compressed, the entire binary table representing
# the compressed data is larger than 4GB. That said, this is the same
# heuristic used by CFITSIO, so this should give consistent results.
# And the cases where this heuristic is insufficient are extreme and
# almost entirely contrived corner cases, so it will do for now
huge_hdu = self.data.nbytes > 2 ** 32
if huge_hdu and not CFITSIO_SUPPORTS_Q_FORMAT:
raise IOError(
"PyFITS cannot compress images greater than 4 GB in size "
"(%s is %s bytes) without CFITSIO >= 3.35" %
((self.name, self.ver), self.data.nbytes))
# Update the extension name in the table header
if not name and not 'EXTNAME' in self._header:
name = 'COMPRESSED_IMAGE'
if name:
self._header.set('EXTNAME', name,
'name of this binary table extension',
after='TFIELDS')
self.name = name
else:
self.name = self._header['EXTNAME']
# Set the compression type in the table header.
if compression_type:
if compression_type not in ['RICE_1', 'GZIP_1', 'PLIO_1',
'HCOMPRESS_1']:
warnings.warn('Unknown compression type provided. Default '
'(%s) compression used.' %
DEFAULT_COMPRESSION_TYPE)
compression_type = DEFAULT_COMPRESSION_TYPE
self._header.set('ZCMPTYPE', compression_type,
'compression algorithm', after='TFIELDS')
else:
compression_type = self._header.get('ZCMPTYPE',
DEFAULT_COMPRESSION_TYPE)
# If the input image header had BSCALE/BZERO cards, then insert
# them in the table header.
if image_header:
bzero = image_header.get('BZERO', 0.0)
bscale = image_header.get('BSCALE', 1.0)
after_keyword = 'EXTNAME'
if bscale != 1.0:
self._header.set('BSCALE', bscale, after=after_keyword)
after_keyword = 'BSCALE'
if bzero != 0.0:
self._header.set('BZERO', bzero, after=after_keyword)
bitpix_comment = image_header.comments['BITPIX']
naxis_comment = image_header.comments['NAXIS']
else:
bitpix_comment = 'data type of original image'
naxis_comment = 'dimension of original image'
# Set the label for the first column in the table
self._header.set('TTYPE1', 'COMPRESSED_DATA', 'label for field 1',
after='TFIELDS')
# Set the data format for the first column. It is dependent
# on the requested compression type.
if compression_type == 'PLIO_1':
tform1 = '1QI' if huge_hdu else '1PI'
else:
tform1 = '1QB' if huge_hdu else '1PB'
self._header.set('TFORM1', tform1,
'data format of field: variable length array',
after='TTYPE1')
# Create the first column for the table. This column holds the
# compressed data.
col1 = Column(name=self._header['TTYPE1'], format=tform1)
# Create the additional columns required for floating point
# data and calculate the width of the output table.
zbitpix = self._image_header['BITPIX']
if zbitpix < 0 and quantize_level != 0.0:
# floating point image has 'COMPRESSED_DATA',
# 'UNCOMPRESSED_DATA', 'ZSCALE', and 'ZZERO' columns (unless using
# lossless compression, per CFITSIO)
ncols = 4
# CFITSIO 3.28 and up automatically use the GZIP_COMPRESSED_DATA
# store floating point data that couldn't be quantized, instead
# of the UNCOMPRESSED_DATA column. There's no way to control
# this behavior so the only way to determine which behavior will
# be employed is via the CFITSIO version
if CFITSIO_SUPPORTS_GZIPDATA:
ttype2 = 'GZIP_COMPRESSED_DATA'
# The required format for the GZIP_COMPRESSED_DATA is actually
# missing from the standard docs, but CFITSIO suggests it
# should be 1PB, which is logical.
tform2 = '1QB' if huge_hdu else '1PB'
else:
# Q format is not supported for UNCOMPRESSED_DATA columns.
ttype2 = 'UNCOMPRESSED_DATA'
if zbitpix == 8:
tform2 = '1QB' if huge_hdu else '1PB'
elif zbitpix == 16:
tform2 = '1QI' if huge_hdu else '1PI'
elif zbitpix == 32:
tform2 = '1QJ' if huge_hdu else '1PJ'
elif zbitpix == -32:
tform2 = '1QE' if huge_hdu else '1PE'
else:
tform2 = '1QD' if huge_hdu else '1PD'
# Set up the second column for the table that will hold any
# uncompressable data.
self._header.set('TTYPE2', ttype2, 'label for field 2',
after='TFORM1')
self._header.set('TFORM2', tform2,
'data format of field: variable length array',
after='TTYPE2')
col2 = Column(name=ttype2, format=tform2)
# Set up the third column for the table that will hold
# the scale values for quantized data.
self._header.set('TTYPE3', 'ZSCALE', 'label for field 3',
after='TFORM2')
self._header.set('TFORM3', '1D',
'data format of field: 8-byte DOUBLE',
after='TTYPE3')
col3 = Column(name=self._header['TTYPE3'],
format=self._header['TFORM3'])
# Set up the fourth column for the table that will hold
# the zero values for the quantized data.
self._header.set('TTYPE4', 'ZZERO', 'label for field 4',
after='TFORM3')
self._header.set('TFORM4', '1D',
'data format of field: 8-byte DOUBLE',
after='TTYPE4')
after = 'TFORM4'
col4 = Column(name=self._header['TTYPE4'],
format=self._header['TFORM4'])
# Create the ColDefs object for the table
cols = ColDefs([col1, col2, col3, col4])
else:
# default table has just one 'COMPRESSED_DATA' column
ncols = 1
after = 'TFORM1'
# remove any header cards for the additional columns that
# may be left over from the previous data
to_remove = ['TTYPE2', 'TFORM2', 'TTYPE3', 'TFORM3', 'TTYPE4',
'TFORM4']
for k in to_remove:
try:
del self._header[k]
except KeyError:
pass
# Create the ColDefs object for the table
cols = ColDefs([col1])
# Update the table header with the width of the table, the
# number of fields in the table, the indicator for a compressed
# image HDU, the data type of the image data and the number of
# dimensions in the image data array.
self._header.set('NAXIS1', cols.dtype.itemsize,
'width of table in bytes')
self._header.set('TFIELDS', ncols, 'number of fields in each row')
self._header.set('ZIMAGE', True, 'extension contains compressed image',
after=after)
self._header.set('ZBITPIX', zbitpix,
bitpix_comment, after='ZIMAGE')
self._header.set('ZNAXIS', self._image_header['NAXIS'], naxis_comment,
after='ZBITPIX')
# Strip the table header of all the ZNAZISn and ZTILEn keywords
# that may be left over from the previous data
idx = 1
while True:
try:
del self._header['ZNAXIS' + str(idx)]
del self._header['ZTILE' + str(idx)]
idx += 1
except KeyError:
break
# Verify that any input tile size parameter is the appropriate
# size to match the HDU's data.
naxis = self._image_header['NAXIS']
if not tile_size:
tile_size = []
elif len(tile_size) != naxis:
warnings.warn('Provided tile size not appropriate for the data. '
'Default tile size will be used.')
tile_size = []
# Set default tile dimensions for HCOMPRESS_1
if compression_type == 'HCOMPRESS_1':
if (self._image_header['NAXIS1'] < 4 or
self._image_header['NAXIS2'] < 4):
raise ValueError('Hcompress minimum image dimension is '
'4 pixels')
elif tile_size:
if tile_size[0] < 4 or tile_size[1] < 4:
# user specified tile size is too small
raise ValueError('Hcompress minimum tile dimension is '
'4 pixels')
major_dims = len(filter(lambda x: x > 1, tile_size))
if major_dims > 2:
raise ValueError(
'HCOMPRESS can only support 2-dimensional tile sizes.'
'All but two of the tile_size dimensions must be set '
'to 1.')
if tile_size and (tile_size[0] == 0 and tile_size[1] == 0):
# compress the whole image as a single tile
tile_size[0] = self._image_header['NAXIS1']
tile_size[1] = self._image_header['NAXIS2']
for i in range(2, naxis):
# set all higher tile dimensions = 1
tile_size[i] = 1
elif not tile_size:
# The Hcompress algorithm is inherently 2D in nature, so the
# row by row tiling that is used for other compression
# algorithms is not appropriate. If the image has less than 30
# rows, then the entire image will be compressed as a single
# tile. Otherwise the tiles will consist of 16 rows of the
# image. This keeps the tiles to a reasonable size, and it
# also includes enough rows to allow good compression
# efficiency. It the last tile of the image happens to contain
# less than 4 rows, then find another tile size with between 14
# and 30 rows (preferably even), so that the last tile has at
# least 4 rows.
# 1st tile dimension is the row length of the image
tile_size.append(self._image_header['NAXIS1'])
if self._image_header['NAXIS2'] <= 30:
tile_size.append(self._image_header['NAXIS1'])
else:
# look for another good tile dimension
naxis2 = self._image_header['NAXIS2']
for dim in [16, 24, 20, 30, 28, 26, 22, 18, 14]:
if naxis2 % dim == 0 or naxis2 % dim > 3:
tile_size.append(dim)
break
else:
tile_size.append(17)
for i in range(2, naxis):
# set all higher tile dimensions = 1
tile_size.append(1)
# check if requested tile size causes the last tile to have
# less than 4 pixels
remain = self._image_header['NAXIS1'] % tile_size[0] # 1st dimen
if remain > 0 and remain < 4:
tile_size[0] += 1 # try increasing tile size by 1
remain = self._image_header['NAXIS1'] % tile_size[0]
if remain > 0 and remain < 4:
raise ValueError('Last tile along 1st dimension has '
'less than 4 pixels')
remain = self._image_header['NAXIS2'] % tile_size[1] # 2nd dimen
if remain > 0 and remain < 4:
tile_size[1] += 1 # try increasing tile size by 1
remain = self._image_header['NAXIS2'] % tile_size[1]
if remain > 0 and remain < 4:
raise ValueError('Last tile along 2nd dimension has '
'less than 4 pixels')
# Set up locations for writing the next cards in the header.
last_znaxis = 'ZNAXIS'
if self._image_header['NAXIS'] > 0:
after1 = 'ZNAXIS1'
else:
after1 = 'ZNAXIS'
# Calculate the number of rows in the output table and
# write the ZNAXISn and ZTILEn cards to the table header.
nrows = 1
for idx, axis in enumerate(self._axes):
naxis = 'NAXIS' + str(idx + 1)
znaxis = 'ZNAXIS' + str(idx + 1)
ztile = 'ZTILE' + str(idx + 1)
if tile_size and len(tile_size) >= idx + 1:
ts = tile_size[idx]
else:
if not ztile in self._header:
# Default tile size
if not idx:
ts = self._image_header['NAXIS1']
else:
ts = 1
else:
ts = self._header[ztile]
tile_size.append(ts)
nrows = nrows * ((axis - 1) // ts + 1)
if image_header and naxis in image_header:
self._header.set(znaxis, axis, image_header.comments[naxis],
after=last_znaxis)
else:
self._header.set(znaxis, axis,
'length of original image axis',
after=last_znaxis)
self._header.set(ztile, ts, 'size of tiles to be compressed',
after=after1)
last_znaxis = znaxis
after1 = ztile
# Set the NAXIS2 header card in the table hdu to the number of
# rows in the table.
self._header.set('NAXIS2', nrows, 'number of rows in table')
self.columns = cols
# Set the compression parameters in the table header.
# First, setup the values to be used for the compression parameters
# in case none were passed in. This will be either the value
# already in the table header for that parameter or the default
# value.
idx = 1
while True:
zname = 'ZNAME' + str(idx)
if zname not in self._header:
break
zval = 'ZVAL' + str(idx)
if self._header[zname] == 'NOISEBIT':
if quantize_level is None:
quantize_level = self._header[zval]
if self._header[zname] == 'SCALE ':
if hcomp_scale is None:
hcomp_scale = self._header[zval]
if self._header[zname] == 'SMOOTH ':
if hcomp_smooth is None:
hcomp_smooth = self._header[zval]
idx += 1
if quantize_level is None:
quantize_level = DEFAULT_QUANTIZE_LEVEL
if hcomp_scale is None:
hcomp_scale = DEFAULT_HCOMP_SCALE
if hcomp_smooth is None:
hcomp_smooth = DEFAULT_HCOMP_SCALE
# Next, strip the table header of all the ZNAMEn and ZVALn keywords
# that may be left over from the previous data
idx = 1
while True:
zname = 'ZNAME' + str(idx)
if zname not in self._header:
break
zval = 'ZVAL' + str(idx)
del self._header[zname]
del self._header[zval]
idx += 1
# Finally, put the appropriate keywords back based on the
# compression type.
after_keyword = 'ZCMPTYPE'
idx = 1
if compression_type == 'RICE_1':
self._header.set('ZNAME1', 'BLOCKSIZE', 'compression block size',
after=after_keyword)
self._header.set('ZVAL1', DEFAULT_BLOCK_SIZE, 'pixels per block',
after='ZNAME1')
self._header.set('ZNAME2', 'BYTEPIX',
'bytes per pixel (1, 2, 4, or 8)', after='ZVAL1')
if self._header['ZBITPIX'] == 8:
bytepix = 1
elif self._header['ZBITPIX'] == 16:
bytepix = 2
else:
bytepix = DEFAULT_BYTE_PIX
self._header.set('ZVAL2', bytepix,
'bytes per pixel (1, 2, 4, or 8)',
after='ZNAME2')
after_keyword = 'ZVAL2'
idx = 3
elif compression_type == 'HCOMPRESS_1':
self._header.set('ZNAME1', 'SCALE', 'HCOMPRESS scale factor',
after=after_keyword)
self._header.set('ZVAL1', hcomp_scale, 'HCOMPRESS scale factor',
after='ZNAME1')
self._header.set('ZNAME2', 'SMOOTH', 'HCOMPRESS smooth option',
after='ZVAL1')
self._header.set('ZVAL2', hcomp_smooth, 'HCOMPRESS smooth option',
after='ZNAME2')
after_keyword = 'ZVAL2'
idx = 3
if self._image_header['BITPIX'] < 0: # floating point image
self._header.set('ZNAME' + str(idx), 'NOISEBIT',
'floating point quantization level',
after=after_keyword)
self._header.set('ZVAL' + str(idx), quantize_level,
'floating point quantization level',
after='ZNAME' + str(idx))
# Add the dither method and seed
if quantize_method:
if quantize_method not in [NO_DITHER, SUBTRACTIVE_DITHER_1,
SUBTRACTIVE_DITHER_2]:
name = QUANTIZE_METHOD_NAMES[DEFAULT_QUANTIZE_METHOD]
warnings.warn('Unknown quantization method provided. '
'Default method (%s) used.' % name)
quantize_method = DEFAULT_QUANTIZE_METHOD
if quantize_method == NO_DITHER:
zquantiz_comment = 'No dithering during quantization'
else:
zquantiz_comment = 'Pixel Quantization Algorithm'
self._header.set('ZQUANTIZ',
QUANTIZE_METHOD_NAMES[quantize_method],
zquantiz_comment,
after='ZVAL' + str(idx))
else:
# If the ZQUANTIZ keyword is missing the default is to assume
# no dithering, rather than whatever DEFAULT_QUANTIZE_METHOD
# is set to
quantize_method = self._header.get('ZQUANTIZ', NO_DITHER)
if isinstance(quantize_method, basestring):
for k, v in QUANTIZE_METHOD_NAMES.items():
if v.upper() == quantize_method:
quantize_method = k
break
else:
quantize_method = NO_DITHER
if quantize_method == NO_DITHER:
if 'ZDITHER0' in self._header:
# If dithering isn't being used then there's no reason to
# keep the ZDITHER0 keyword
del self._header['ZDITHER0']
else:
if dither_seed:
dither_seed = self._generate_dither_seed(dither_seed)
elif 'ZDITHER0' in self._header:
dither_seed = self._header['ZDITHER0']
else:
dither_seed = self._generate_dither_seed(
DEFAULT_DITHER_SEED)
self._header.set('ZDITHER0', dither_seed,
'dithering offset when quantizing floats',
after='ZQUANTIZ')
if image_header:
# Move SIMPLE card from the image header to the
# table header as ZSIMPLE card.
if 'SIMPLE' in image_header:
self._header.set('ZSIMPLE', image_header['SIMPLE'],
image_header.comments['SIMPLE'],
before='ZBITPIX')
# Move EXTEND card from the image header to the
# table header as ZEXTEND card.
if 'EXTEND' in image_header:
self._header.set('ZEXTEND', image_header['EXTEND'],
image_header.comments['EXTEND'])
# Move BLOCKED card from the image header to the
# table header as ZBLOCKED card.
if 'BLOCKED' in image_header:
self._header.set('ZBLOCKED', image_header['BLOCKED'],
image_header.comments['BLOCKED'])
# Move XTENSION card from the image header to the
# table header as ZTENSION card.
# Since we only handle compressed IMAGEs, ZTENSION should
# always be IMAGE, even if the caller has passed in a header
# for some other type of extension.
if 'XTENSION' in image_header:
self._header.set('ZTENSION', 'IMAGE',
image_header.comments['XTENSION'],
before='ZBITPIX')
# Move PCOUNT and GCOUNT cards from image header to the table
# header as ZPCOUNT and ZGCOUNT cards.
if 'PCOUNT' in image_header:
self._header.set('ZPCOUNT', image_header['PCOUNT'],
image_header.comments['PCOUNT'],
after=last_znaxis)
if 'GCOUNT' in image_header:
self._header.set('ZGCOUNT', image_header['GCOUNT'],
image_header.comments['GCOUNT'],
after='ZPCOUNT')
# Move CHECKSUM and DATASUM cards from the image header to the
# table header as XHECKSUM and XDATASUM cards.
if 'CHECKSUM' in image_header:
self._header.set('ZHECKSUM', image_header['CHECKSUM'],
image_header.comments['CHECKSUM'])
if 'DATASUM' in image_header:
self._header.set('ZDATASUM', image_header['DATASUM'],
image_header.comments['DATASUM'])
else:
# Move XTENSION card from the image header to the
# table header as ZTENSION card.
# Since we only handle compressed IMAGEs, ZTENSION should
# always be IMAGE, even if the caller has passed in a header
# for some other type of extension.
if 'XTENSION' in self._image_header:
self._header.set('ZTENSION', 'IMAGE',
self._image_header.comments['XTENSION'],
before='ZBITPIX')
# Move PCOUNT and GCOUNT cards from image header to the table
# header as ZPCOUNT and ZGCOUNT cards.
if 'PCOUNT' in self._image_header:
self._header.set('ZPCOUNT', self._image_header['PCOUNT'],
self._image_header.comments['PCOUNT'],
after=last_znaxis)
if 'GCOUNT' in self._image_header:
self._header.set('ZGCOUNT', self._image_header['GCOUNT'],
self._image_header.comments['GCOUNT'],
after='ZPCOUNT')
# When we have an image checksum we need to ensure that the same
# number of blank cards exist in the table header as there were in
# the image header. This allows those blank cards to be carried
# over to the image header when the hdu is uncompressed.
if 'ZHECKSUM' in self._header:
required_blanks = image_header._countblanks()
image_blanks = self._image_header._countblanks()
table_blanks = self._header._countblanks()
for _ in range(required_blanks - image_blanks):
self._image_header.append()
table_blanks += 1
for _ in range(required_blanks - table_blanks):
self._header.append()
@deprecated('3.2', alternative='(refactor your code)', pending=True)
def updateHeaderData(self, image_header,
name=None,
compressionType=None,
tileSize=None,
hcompScale=None,
hcompSmooth=None,
quantizeLevel=None):
self._update_header_data(image_header, name=name,
compression_type=compressionType,
tile_size=tileSize,
hcomp_scale=hcompScale,
hcomp_smooth=hcompSmooth,
quantize_level=quantizeLevel)
@lazyproperty
def data(self):
# The data attribute is the image data (not the table data).
data = compression.decompress_hdu(self)
# Scale the data if necessary
if (self._orig_bzero != 0 or self._orig_bscale != 1):
new_dtype = self._dtype_for_bitpix()
data = np.array(data, dtype=new_dtype)
zblank = None
if 'ZBLANK' in self.compressed_data.columns.names:
zblank = self.compressed_data['ZBLANK']
else:
if 'ZBLANK' in self._header:
zblank = np.array(self._header['ZBLANK'], dtype='int32')
elif 'BLANK' in self._header:
zblank = np.array(self._header['BLANK'], dtype='int32')
if zblank is not None:
blanks = (data == zblank)
if self._bscale != 1:
np.multiply(data, self._bscale, data)
if self._bzero != 0:
data += self._bzero
if zblank is not None:
data = np.where(blanks, np.nan, data)
# Right out of _ImageBaseHDU.data
self._update_header_scale_info(data.dtype)
return data
@data.setter
def data(self, data):
if (data is not None) and (not isinstance(data, np.ndarray) or
data.dtype.fields is not None):
raise TypeError('CompImageHDU data has incorrect type:%s; '
'dtype.fields = %s' %
(type(data), data.dtype.fields))
@lazyproperty
def compressed_data(self):
# First we will get the table data (the compressed
# data) from the file, if there is any.
compressed_data = super(BinTableHDU, self).data
if isinstance(compressed_data, np.rec.recarray):
del self.data
return compressed_data
else:
# This will actually set self.compressed_data with the
# pre-allocated space for the compression data; this is something I
# might do away with in the future
self._update_compressed_data()
return self.compressed_data
@lazyproperty
@deprecated('3.2', alternative='the `.compressed_data attribute`',
pending=True)
def compData(self):
return self.compressed_data
@property
def shape(self):
"""
Shape of the image array--should be equivalent to ``self.data.shape``.
"""
# Determine from the values read from the header
return tuple(reversed(self._axes))
@lazyproperty
def header(self):
# The header attribute is the header for the image data. It
# is not actually stored in the object dictionary. Instead,
# the _image_header is stored. If the _image_header attribute
# has already been defined we just return it. If not, we nust
# create it from the table header (the _header attribute).
if hasattr(self, '_image_header'):
return self._image_header
# Start with a copy of the table header.
image_header = self._header.copy()
# Delete cards that are related to the table. And move
# the values of those cards that relate to the image from
# their corresponding table cards. These include
# ZBITPIX -> BITPIX, ZNAXIS -> NAXIS, and ZNAXISn -> NAXISn.
for keyword in list(image_header):
if CompImageHeader._is_reserved_keyword(keyword, warn=False):
del image_header[keyword]
if 'ZSIMPLE' in self._header:
image_header.set('SIMPLE', self._header['ZSIMPLE'],
self._header.comments['ZSIMPLE'], before=0)
elif 'ZTENSION' in self._header:
if self._header['ZTENSION'] != 'IMAGE':
warnings.warn("ZTENSION keyword in compressed "
"extension != 'IMAGE'")
image_header.set('XTENSION', 'IMAGE',
self._header.comments['ZTENSION'], before=0)
else:
image_header.set('XTENSION', 'IMAGE', before=0)
image_header.set('BITPIX', self._header['ZBITPIX'],
self._header.comments['ZBITPIX'], before=1)
image_header.set('NAXIS', self._header['ZNAXIS'],
self._header.comments['ZNAXIS'], before=2)
last_naxis = 'NAXIS'
for idx in range(image_header['NAXIS']):
znaxis = 'ZNAXIS' + str(idx + 1)
naxis = znaxis[1:]
image_header.set(naxis, self._header[znaxis],
self._header.comments[znaxis],
after=last_naxis)
last_naxis = naxis
# Delete any other spurious NAXISn keywords:
naxis = image_header['NAXIS']
for keyword in list(image_header['NAXIS?*']):
try:
n = int(keyword[5:])
except:
continue
if n > naxis:
del image_header[keyword]
# Although PCOUNT and GCOUNT are considered mandatory for IMAGE HDUs,
# ZPCOUNT and ZGCOUNT are optional, probably because for IMAGE HDUs
# their values are always 0 and 1 respectively
if 'ZPCOUNT' in self._header:
image_header.set('PCOUNT', self._header['ZPCOUNT'],
self._header.comments['ZPCOUNT'],
after=last_naxis)
else:
image_header.set('PCOUNT', 0, after=last_naxis)
if 'ZGCOUNT' in self._header:
image_header.set('GCOUNT', self._header['ZGCOUNT'],
self._header.comments['ZGCOUNT'],
after='PCOUNT')
else:
image_header.set('GCOUNT', 1, after='PCOUNT')
if 'ZEXTEND' in self._header:
image_header.set('EXTEND', self._header['ZEXTEND'],
self._header.comments['ZEXTEND'])
if 'ZBLOCKED' in self._header:
image_header.set('BLOCKED', self._header['ZBLOCKED'],
self._header.comments['ZBLOCKED'])
# Move the ZHECKSUM and ZDATASUM cards to the image header
# as CHECKSUM and DATASUM
if 'ZHECKSUM' in self._header:
image_header.set('CHECKSUM', self._header['ZHECKSUM'],
self._header.comments['ZHECKSUM'])
if 'ZDATASUM' in self._header:
image_header.set('DATASUM', self._header['ZDATASUM'],
self._header.comments['ZDATASUM'])
# Remove the EXTNAME card if the value in the table header
# is the default value of COMPRESSED_IMAGE.
if ('EXTNAME' in self._header and
self._header['EXTNAME'] == 'COMPRESSED_IMAGE'):
del image_header['EXTNAME']
# Look to see if there are any blank cards in the table
# header. If there are, there should be the same number
# of blank cards in the image header. Add blank cards to
# the image header to make it so.
table_blanks = self._header._countblanks()
image_blanks = image_header._countblanks()
for _ in range(table_blanks - image_blanks):
image_header.append()
# Create the CompImageHeader that syncs with the table header, and save
# it off to self._image_header so it can be referenced later
# unambiguously
self._image_header = CompImageHeader(self._header, image_header)
return self._image_header
def _summary(self):
"""
Summarize the HDU: name, dimensions, and formats.
"""
class_name = self.__class__.__name__
# if data is touched, use data info.
if self._data_loaded:
if self.data is None:
_shape, _format = (), ''
else:
# the shape will be in the order of NAXIS's which is the
# reverse of the numarray shape
_shape = list(self.data.shape)
_format = self.data.dtype.name
_shape.reverse()
_shape = tuple(_shape)
_format = _format[_format.rfind('.') + 1:]
# if data is not touched yet, use header info.
else:
_shape = ()
for idx in range(self.header['NAXIS']):
_shape += (self.header['NAXIS' + str(idx + 1)],)
_format = _ImageBaseHDU.NumCode[self.header['BITPIX']]
return (self.name, class_name, len(self.header), _shape,
_format)
def _update_compressed_data(self):
"""
Compress the image data so that it may be written to a file.
"""
# Check to see that the image_header matches the image data
image_bitpix = _ImageBaseHDU.ImgCode[self.data.dtype.name]
if image_bitpix != self._orig_bitpix or self.data.shape != self.shape:
self._update_header_data(self.header)
# TODO: This is copied right out of _ImageBaseHDU._writedata_internal;
# it would be cool if we could use an internal ImageHDU and use that to
# write to a buffer for compression or something. See ticket #88
# deal with unsigned integer 16, 32 and 64 data
old_data = self.data
if _is_pseudo_unsigned(self.data.dtype):
# Convert the unsigned array to signed
self.data = np.array(
self.data - _unsigned_zero(self.data.dtype),
dtype='=i%d' % self.data.dtype.itemsize)
should_swap = False
else:
should_swap = not self.data.dtype.isnative
if should_swap:
self.data.byteswap(True)
try:
nrows = self._header['NAXIS2']
tbsize = self._header['NAXIS1'] * nrows
self._header['PCOUNT'] = 0
if 'THEAP' in self._header:
del self._header['THEAP']
self._theap = tbsize
# Compress the data.
# The current implementation of compress_hdu assumes the empty
# compressed data table has already been initialized in
# self.compressed_data, and writes directly to it
# compress_hdu returns the size of the heap for the written
# compressed image table
heapsize, self.compressed_data = compression.compress_hdu(self)
finally:
# if data was byteswapped return it to its original order
if should_swap:
self.data.byteswap(True)
self.data = old_data
# CFITSIO will write the compressed data in big-endian order
dtype = self.columns.dtype.newbyteorder('>')
buf = self.compressed_data
compressed_data = buf[:self._theap].view(dtype=dtype,
type=np.rec.recarray)
self.compressed_data = compressed_data.view(FITS_rec)
self.compressed_data._coldefs = self.columns
self.compressed_data._heapoffset = self._theap
self.compressed_data._heapsize = heapsize
self.compressed_data.formats = self.columns.formats
# Update the table header cards to match the compressed data.
self._update_header()
@deprecated('3.2', alternative='(refactor your code)', pending=True)
def updateCompressedData(self):
self._update_compressed_data()
def _update_header(self):
"""
Update the table header cards to match the compressed data.
"""
# Get the _heapsize attribute to match the data.
self.compressed_data._scale_back()
# Check that TFIELDS and NAXIS2 match the data.
self._header['TFIELDS'] = self.compressed_data._nfields
self._header['NAXIS2'] = self.compressed_data.shape[0]
# Calculate PCOUNT, for variable length tables.
_tbsize = self._header['NAXIS1'] * self._header['NAXIS2']
_heapstart = self._header.get('THEAP', _tbsize)
self.compressed_data._gap = _heapstart - _tbsize
_pcount = self.compressed_data._heapsize + self.compressed_data._gap
if _pcount > 0:
self._header['PCOUNT'] = _pcount
# Update TFORM for variable length columns.
for idx in range(self.compressed_data._nfields):
format = self.compressed_data._coldefs._recformats[idx]
if isinstance(format, _FormatP):
_max = self.compressed_data.field(idx).max
format_cls = format.__class__
format = format_cls(format.dtype, repeat=format.repeat,
max=_max)
self._header['TFORM' + str(idx + 1)] = format.tform
# Insure that for RICE_1 that the BLOCKSIZE and BYTEPIX cards
# are present and set to the hard coded values used by the
# compression algorithm.
if self._header['ZCMPTYPE'] == 'RICE_1':
self._header.set('ZNAME1', 'BLOCKSIZE', 'compression block size',
after='ZCMPTYPE')
self._header.set('ZVAL1', DEFAULT_BLOCK_SIZE, 'pixels per block',
after='ZNAME1')
self._header.set('ZNAME2', 'BYTEPIX',
'bytes per pixel (1, 2, 4, or 8)', after='ZVAL1')
if self._header['ZBITPIX'] == 8:
bytepix = 1
elif self._header['ZBITPIX'] == 16:
bytepix = 2
else:
bytepix = DEFAULT_BYTE_PIX
self._header.set('ZVAL2', bytepix,
'bytes per pixel (1, 2, 4, or 8)',
after='ZNAME2')
@deprecated('3.2', alternative='(refactor your code)', pending=True)
def updateHeader(self):
self._update_header()
def scale(self, type=None, option='old', bscale=1, bzero=0):
"""
Scale image data by using ``BSCALE`` and ``BZERO``.
Calling this method will scale `self.data` and update the keywords of
``BSCALE`` and ``BZERO`` in `self._header` and `self._image_header`.
This method should only be used right before writing to the output
file, as the data will be scaled and is therefore not very usable after
the call.
Parameters
----------
type : str, optional
destination data type, use a string representing a numpy dtype
name, (e.g. ``'uint8'``, ``'int16'``, ``'float32'`` etc.). If is
`None`, use the current data type.
option : str, optional
how to scale the data: if ``"old"``, use the original ``BSCALE``
and ``BZERO`` values when the data was read/created. If
``"minmax"``, use the minimum and maximum of the data to scale.
The option will be overwritten by any user-specified bscale/bzero
values.
bscale, bzero : int, optional
user specified ``BSCALE`` and ``BZERO`` values.
"""
if self.data is None:
return
# Determine the destination (numpy) data type
if type is None:
type = _ImageBaseHDU.NumCode[self._bitpix]
_type = getattr(np, type)
# Determine how to scale the data
# bscale and bzero takes priority
if (bscale != 1 or bzero != 0):
_scale = bscale
_zero = bzero
else:
if option == 'old':
_scale = self._orig_bscale
_zero = self._orig_bzero
elif option == 'minmax':
if isinstance(_type, np.floating):
_scale = 1
_zero = 0
else:
_min = np.minimum.reduce(self.data.flat)
_max = np.maximum.reduce(self.data.flat)
if _type == np.uint8: # uint8 case
_zero = _min
_scale = (_max - _min) / (2. ** 8 - 1)
else:
_zero = (_max + _min) / 2.
# throw away -2^N
_scale = (_max - _min) / (2. ** (8 * _type.bytes) - 2)
# Do the scaling
if _zero != 0:
self.data += -_zero
self.header['BZERO'] = _zero
else:
# Delete from both headers
for header in (self.header, self._header):
try:
del header['BZERO']
except KeyError:
pass
if _scale != 1:
self.data /= _scale
self.header['BSCALE'] = _scale
else:
for header in (self.header, self._header):
try:
del header['BSCALE']
except KeyError:
pass
if self.data.dtype.type != _type:
self.data = np.array(np.around(self.data), dtype=_type) # 0.7.7.1
# Update the BITPIX Card to match the data
self._bitpix = _ImageBaseHDU.ImgCode[self.data.dtype.name]
self._bzero = self.header.get('BZERO', 0)
self._bscale = self.header.get('BSCALE', 1)
# Update BITPIX for the image header specificially
# TODO: Make this more clear by using self._image_header, but only once
# this has been fixed so that the _image_header attribute is guaranteed
# to be valid
self.header['BITPIX'] = self._bitpix
# Update the table header to match the scaled data
self._update_header_data(self.header)
# Since the image has been manually scaled, the current
# bitpix/bzero/bscale now serve as the 'original' scaling of the image,
# as though the original image has been completely replaced
self._orig_bitpix = self._bitpix
self._orig_bzero = self._bzero
self._orig_bscale = self._bscale
# TODO: Fix this class so that it doesn't actually inherit from
# BinTableHDU, but instead has an internal BinTableHDU reference
def _prewriteto(self, checksum=False, inplace=False):
if self._scale_back:
self.scale(_ImageBaseHDU.NumCode[self._orig_bitpix])
if self._has_data:
self._update_compressed_data()
# Use methods in the superclass to update the header with
# scale/checksum keywords based on the data type of the image data
self._update_uint_scale_keywords()
# Shove the image header and data into a new ImageHDU and use that
# to compute the image checksum
image_hdu = ImageHDU(data=self.data, header=self.header)
image_hdu._update_checksum(checksum)
if 'CHECKSUM' in image_hdu.header:
# This will also pass through to the ZHECKSUM keyword and
# ZDATASUM keyword
self._image_header.set('CHECKSUM',
image_hdu.header['CHECKSUM'],
image_hdu.header.comments['CHECKSUM'])
if 'DATASUM' in image_hdu.header:
self._image_header.set('DATASUM', image_hdu.header['DATASUM'],
image_hdu.header.comments['DATASUM'])
# Store a temporary backup of self.data in a different attribute;
# see below
self._imagedata = self.data
# Now we need to perform an ugly hack to set the compressed data as
# the .data attribute on the HDU so that the call to _writedata
# handles it propertly
self.__dict__['data'] = self.compressed_data
# Doesn't call the super's _prewriteto, since it calls
# self.data._scale_back(), which is meaningless here.
return ExtensionHDU._prewriteto(self, checksum=checksum,
inplace=inplace)
def _writeheader(self, fileobj):
"""
Bypasses `BinTableHDU._writeheader()` which updates the header with
metadata about the data that is meaningless here; another reason
why this class maybe shouldn't inherit directly from BinTableHDU...
"""
return ExtensionHDU._writeheader(self, fileobj)
def _writedata(self, fileobj):
"""
Wrap the basic ``_writedata`` method to restore the ``.data``
attribute to the uncompressed image data in the case of an exception.
"""
try:
return super(CompImageHDU, self)._writedata(fileobj)
finally:
# Restore the .data attribute to its rightful value (if any)
if hasattr(self, '_imagedata'):
self.__dict__['data'] = self._imagedata
del self._imagedata
else:
del self.data
# TODO: This was copied right out of _ImageBaseHDU; get rid of it once we
# find a way to rewrite this class as either a subclass or wrapper for an
# ImageHDU
def _dtype_for_bitpix(self):
"""
Determine the dtype that the data should be converted to depending on
the BITPIX value in the header, and possibly on the BSCALE value as
well. Returns None if there should not be any change.
"""
bitpix = self._orig_bitpix
# Handle possible conversion to uints if enabled
if self._uint and self._orig_bscale == 1:
for bits, dtype in ((16, np.dtype('uint16')),
(32, np.dtype('uint32')),
(64, np.dtype('uint64'))):
if bitpix == bits and self._orig_bzero == 1 << (bits - 1):
return dtype
if bitpix > 16: # scale integers to Float64
return np.dtype('float64')
elif bitpix > 0: # scale integers to Float32
return np.dtype('float32')
def _update_header_scale_info(self, dtype=None):
if (not self._do_not_scale_image_data and
not (self._orig_bzero == 0 and self._orig_bscale == 1)):
for keyword in ['BSCALE', 'BZERO']:
# Make sure to delete from both the image header and the table
# header; later this will be streamlined
for header in (self.header, self._header):
try:
del header[keyword]
# Since _update_header_scale_info can, currently, be
# called *after* _prewriteto(), replace these with
# blank cards so the header size doesn't change
header.append()
except KeyError:
pass
if dtype is None:
dtype = self._dtype_for_bitpix()
if dtype is not None:
self.header['BITPIX'] = _ImageBaseHDU.ImgCode[dtype.name]
self._bzero = 0
self._bscale = 1
self._bitpix = self.header['BITPIX']
def _generate_dither_seed(self, seed):
if not _is_int(seed):
raise TypeError("Seed must be an integer")
if not -1 <= seed <= 10000:
raise ValueError(
"Seed for random dithering must be either between 1 and "
"10000 inclusive, 0 for autogeneration from the system "
"clock, or -1 for autogeneration from a checksum of the first "
"image tile (got %s)" % seed)
if seed == DITHER_SEED_CHECKSUM:
# Determine the tile dimensions from the ZTILEn keywords
naxis = self._header['ZNAXIS']
tile_dims = [self._header['ZTILE%d' % (idx + 1)]
for idx in range(naxis)]
tile_dims.reverse()
# Get the first tile by using the tile dimensions as the end
# indices of slices (starting from 0)
first_tile = self.data[tuple(slice(d) for d in tile_dims)]
# The checksum agorithm used is literally just the sum of the bytes
# of the tile data (not its actual floating point values). Integer
# overflow is irrelevant.
csum = first_tile.view(dtype='uint8').sum()
# Since CFITSIO uses an unsigned long (which may be different on
# different platforms) go ahead and truncate the sum to its
# unsigned long value and take the result modulo 10000
return (ctypes.c_ulong(csum).value % 10000) + 1
elif seed == DITHER_SEED_CLOCK:
# This isn't exactly the same algorithm as CFITSIO, but that's okay
# since the result is meant to be arbitrary. The primary difference
# is that CFITSIO incorporates the HDU number into the result in
# the hopes of heading off the possibility of the same seed being
# generated for two HDUs at the same time. Here instead we just
# add in the HDU object's id
return ((sum(int(x) for x in math.modf(time.time())) + id(self)) %
10000) + 1
else:
return seed
| gpl-3.0 |
cculianu/gemuo | src/gemuo/engine/menu.py | 2 | 1975 | #
# GemUO
#
# (c) 2005-2010 Max Kellermann <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; version 2 of the License.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
from twisted.internet import reactor
from twisted.python.failure import Failure
import uo.packets as p
from gemuo.error import *
from gemuo.engine import Engine
class NoSuchOption(Exception):
def __init__(self, message='No such menu option'):
Exception.__init__(self, message)
def select_option(menu, item):
for i, option in enumerate(menu.options):
if option.text[:len(item)] == item:
return i + 1
return None
class MenuResponse(Engine):
def __init__(self, client, responses):
Engine.__init__(self, client)
assert len(responses) > 0
self.responses = list(responses)
self.call_id = reactor.callLater(5, self._timeout)
def abort(self):
Engine.abort(self)
self.call_id.cancel()
def on_packet(self, packet):
if isinstance(packet, p.Menu):
response, self.responses = self.responses[0], self.responses[1:]
option = select_option(packet, response)
if option is None:
self.call_id.cancel()
self._failure(NoSuchOption())
return
self._client.send(p.MenuResponse(packet.dialog_serial, option))
if len(self.responses) == 0:
self.call_id.cancel()
self._success()
def _timeout(self):
# waiting for the menu to appear has taken too long; give up
self._failure(Timeout('Menu timeout'))
| gpl-2.0 |
skycucumber/Messaging-Gateway | webapp/venv/lib/python2.7/site-packages/pip/_vendor/requests/packages/chardet/cp949prober.py | 2801 | 1782 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is mozilla.org code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
from .mbcharsetprober import MultiByteCharSetProber
from .codingstatemachine import CodingStateMachine
from .chardistribution import EUCKRDistributionAnalysis
from .mbcssm import CP949SMModel
class CP949Prober(MultiByteCharSetProber):
def __init__(self):
MultiByteCharSetProber.__init__(self)
self._mCodingSM = CodingStateMachine(CP949SMModel)
# NOTE: CP949 is a superset of EUC-KR, so the distribution should be
# not different.
self._mDistributionAnalyzer = EUCKRDistributionAnalysis()
self.reset()
def get_charset_name(self):
return "CP949"
| gpl-2.0 |
karlnewell/net-virt-platform | sdncon/controller/config.py | 3 | 13234 | #
# Copyright (c) 2013 Big Switch Networks, Inc.
#
# Licensed under the Eclipse Public License, Version 1.0 (the
# "License"); you may not use this file except in compliance with the
# License. You may obtain a copy of the License at
#
# http://www.eclipse.org/legal/epl-v10.html
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied. See the License for the specific language governing
# permissions and limitations under the License.
#
from sdncon.rest.config import add_config_handler
from sdncon.controller.models import Feature, GlobalConfig, Controller, \
ControllerInterface, ControllerDomainNameServer, \
FirewallRule, ControllerAlias, SnmpServerConfig, ImageDropUser
from sdncon.controller.models import TacacsPlusConfig, TacacsPlusHost
from oswrapper import exec_os_wrapper
import os
import re
import sdncon
from django.core import serializers
# FIXME: Can probably get rid of default_id when the rest of the code is
# in place for supporting multiple controller IDs. But what about
# unit tests where we shouldn't rely on the boot-config file existing
def get_local_controller_id(default_id='localhost'):
local_controller_id = default_id
f = None
try:
f = open("%s/run/boot-config" % sdncon.SDN_ROOT, 'r')
data = f.read()
match = re.search("^controller-id=([0-9a-zA-Z\-]*)$", data, re.MULTILINE)
if match:
local_controller_id = match.group(1)
except Exception, _e:
# If there was any error, then just leave the controller ID as the
# default value.
pass
finally:
if f:
f.close()
return local_controller_id
# Add the config handlers here. Check the comments for add_config_handler in rest/config.py
# for a description of the calling conventions for config handlers.
def network_config_handler(op, old_instance, new_instance, modified_fields):
valid_instance = old_instance if (op == 'DELETE') else new_instance
if isinstance(valid_instance, Controller):
controller_node = valid_instance
controller_id = controller_node.id
if op == 'DELETE':
# no further configuration here
return
elif isinstance(valid_instance, ControllerDomainNameServer) \
or isinstance(valid_instance, ControllerInterface):
controller_id = valid_instance.controller_id
try:
controller_node = Controller.objects.get(pk=controller_id)
except Exception, _e:
# unknown controller node during delete, no need to
# do anything with any of these interfaces
return
else:
raise Exception('Unknown model change trigger network config handler')
if controller_id != get_local_controller_id():
return
if op == 'DELETE':
# don't reconfigure the interfaces during delete, since
# for deletes, the values of ip/netmask don't get updated
dns = ControllerDomainNameServer.objects.filter(
controller=controller_node).order_by('timestamp')
exec_os_wrapper('NetworkConfig', 'set',
[serializers.serialize("json", [controller_node]),
serializers.serialize("json", dns)])
else:
# op != 'DELETE'
#
# XXX what about HA?
# 'ifs' below isn't filtered by controller, the NetConfig
# target will only select interfaces assocaited with the
# controller-node 'localhost.
dns = ControllerDomainNameServer.objects.filter(
controller=controller_node).order_by('-priority')
ifs = ControllerInterface.objects.filter(controller=controller_node)
exec_os_wrapper('NetworkConfig', 'set',
[serializers.serialize("json", [controller_node]),
serializers.serialize("json", dns),
serializers.serialize("json", ifs)])
def firewall_entry_handler(op, old_instance, new_instance, modified_fields=None):
#allow in on eth0 proto tcp from any to any port 80
print "XXX: firewall handler called-1"
command = ""
if op == "DELETE" and str(old_instance.interface.controller) == get_local_controller_id():
command += "delete "
instance = old_instance
elif (op == "INSERT" or op == "UPDATE") and str(new_instance.interface.controller) == get_local_controller_id():
instance = new_instance
else:
return
print instance.action
print instance.proto
print instance.port
print instance.src_ip
print instance.vrrp_ip
print "XXX: firewall handler called-2"
controller_interface = instance.interface
eth = 'eth' + str(controller_interface.number) #LOOK! Hardcoded to eth interface
proto_str = ""
if instance.proto != '' and instance.proto != 'vrrp':
proto_str = " proto " + instance.proto
action_str = instance.action
src_str = " from any"
if instance.src_ip != '':
src_str = " from " + instance.src_ip
dst_str = " to any"
if instance.vrrp_ip != '':
dst_str = " to " + instance.vrrp_ip
print "dst_str = ", dst_str
port_str = ""
if instance.port != 0:
port_str = " port " + str(instance.port)
command += (action_str + " in on " + eth + proto_str + src_str + dst_str + port_str)
print command
exec_os_wrapper('ExecuteUfwCommand', 'set', [command])
if instance.port == 6633 and action_str == 'reject' and op != 'DELETE':
exec_os_wrapper('RestartSDNPlatform', 'set', [])
def ntp_config_handler(op, old_instance, new_instance, modified_fields=None):
if new_instance != None:
exec_os_wrapper("SetNtpServer", 'set', [new_instance.ntp_server])
def tz_config_handler(op, old_instance, new_instance, modified_fields=None):
if op == "DELETE":
if str(old_instance.id) != get_local_controller_id():
return
exec_os_wrapper("UnsetTimezone", 'set')
elif op == "INSERT" or op == "UPDATE":
if str(new_instance.id) != get_local_controller_id():
return
if new_instance.time_zone != None and str(new_instance.time_zone) != "":
exec_os_wrapper("SetTimezone", 'set', [new_instance.time_zone])
def logging_server_config_handler(op, old_instance, new_instance, modified_fields=None):
if op == "DELETE":
if str(old_instance.id) != get_local_controller_id():
return
exec_os_wrapper("UnsetSyslogServer", 'set',
[old_instance.logging_server, old_instance.logging_level])
elif op == "INSERT" or op == "UPDATE":
if str(new_instance.id) != get_local_controller_id():
return
if new_instance.logging_server != "" and new_instance.logging_enabled:
exec_os_wrapper("SetSyslogServer", 'set',
[new_instance.logging_server, new_instance.logging_level])
else:
exec_os_wrapper("UnsetSyslogServer", 'set',
[new_instance.logging_server, new_instance.logging_level])
def vrrp_virtual_router_id_config_handle(op, old_instance, new_instance, modified_fields=None):
if op == "INSERT" or op == "UPDATE":
exec_os_wrapper("SetVrrpVirtualRouterId", 'set',
[new_instance.cluster_number])
def netvirt_feature_config_handler(op, old_instance, new_instance, modified_fields=None):
if op == "INSERT" or op == "UPDATE":
if new_instance.netvirt_feature:
exec_os_wrapper("SetDefaultConfig", 'set', [new_instance.netvirt_feature])
else:
exec_os_wrapper("SetStaticFlowOnlyConfig", 'set', [new_instance.netvirt_feature])
def controller_alias_config_handler(op, old_instance, new_instance, modified_fields=None):
if op == 'INSERT' or op == 'UPDATE':
if str(new_instance.controller) == get_local_controller_id():
exec_os_wrapper("SetHostname", 'set', [new_instance.alias])
def tacacs_plus_config_handler(op, old_instance, new_instance, modified_fields=None):
if isinstance(old_instance, TacacsPlusConfig):
if op == 'DELETE':
# deleting the config singleton (presumably during shutdown?)
return
if isinstance(old_instance, TacacsPlusConfig):
config_id = old_instance.id
else:
config_id = 'tacacs'
try:
config = TacacsPlusConfig.objects.get(pk=config_id)
except TacacsPlusConfig.DoesNotExist:
# cons up a dummy config object, not necessary to save it
config = TacacsPlusConfig()
# get current list of hosts (op==DELETE ignored here)
##hosts = TacacsPlusHost.objects.order_by('timestamp')
def timestampSort(h1, h2):
return cmp(h1.timestamp, h2.timestamp)
hosts = sorted(TacacsPlusHost.objects.all(), timestampSort)
# XXX roth -- config is passed as-is, not as a single-element list
cj = serializers.serialize("json", [config])
hj = serializers.serialize("json", hosts)
print "Calling oswrapper with:", [cj, hj]
exec_os_wrapper('TacacsPlusConfig', 'set', [cj, hj])
def snmp_server_config_handler(op, old_instance, new_instance, modified_fields=None):
if op == 'DELETE':
exec_os_wrapper('UnsetSnmpServerConfig', 'set', [])
elif op == 'INSERT' or op == 'UPDATE':
# enable_changed is true if operation is INSERT, else compare with old instance
if (op == 'INSERT'):
enable_changed = (new_instance.server_enable is True) #since default is False
print 'operation= insert, enable_changed = ', enable_changed
else:
enable_changed = (new_instance.server_enable != old_instance.server_enable)
server_enable = new_instance.server_enable
community = '' if new_instance.community is None else new_instance.community
location = '' if new_instance.location is None else new_instance.location
contact = '' if new_instance.contact is None else new_instance.contact
print "Calling oswrapper with:", [server_enable, community, location, contact, enable_changed]
exec_os_wrapper('SetSnmpServerConfig', 'set',
[server_enable, community, location, contact, enable_changed])
def test_config_handler(op, old_instance, new_instance, modified_fields=None):
pass
def images_user_ssh_key_config_handler(op, old_instance, new_instance, modified_fields=None):
if op == 'INSERT' or op == 'UPDATE':
sshkey = "\"" + str(new_instance.images_user_ssh_key) + "\""
exec_os_wrapper('SetImagesUserSSHKey', 'set', [sshkey])
def init_config():
#
# Associate the config handlers with specific callout for each of the fields
# Keep in mind that these are the django names, NOT the rest api names,
#
disabled_by_shell_variable = os.environ.get('SDNCON_CONFIG_HANDLERS_DISABLED', False)
disabled_by_file = os.path.exists("%s/sdncon_config_handlers_disabled" % sdncon.SDN_ROOT)
if not disabled_by_shell_variable and not disabled_by_file:
add_config_handler({Controller: ['ntp_server']}, ntp_config_handler)
add_config_handler({Controller: ['time_zone']}, tz_config_handler)
add_config_handler(
{
Controller: ['domain_lookups_enabled', 'domain_name', 'default_gateway'],
ControllerDomainNameServer: None,
ControllerInterface: ['ip', 'netmask', 'mode'],
}, network_config_handler)
add_config_handler({ControllerAlias: ['alias']}, controller_alias_config_handler)
add_config_handler({Controller: ['logging_enabled', 'logging_server', 'logging_level']}, logging_server_config_handler)
add_config_handler({Feature: ['netvirt_feature']}, netvirt_feature_config_handler)
add_config_handler({FirewallRule: None}, firewall_entry_handler)
add_config_handler({GlobalConfig: ['cluster_number']}, vrrp_virtual_router_id_config_handle)
add_config_handler({ TacacsPlusConfig: ["tacacs_plus_authn", "tacacs_plus_authz", "tacacs_plus_acct",
"local_authn", "local_authz",
"timeout", "key",],
TacacsPlusHost: ['ip', 'key'],
},
tacacs_plus_config_handler)
add_config_handler({SnmpServerConfig: ['server_enable', 'community', 'location', 'contact']}, snmp_server_config_handler)
add_config_handler({ImageDropUser: ['images_user_ssh_key']}, images_user_ssh_key_config_handler)
else:
add_config_handler(
{
Controller: ['domain_lookups_enabled', 'domain_name', 'default_gateway'],
ControllerDomainNameServer: None,
ControllerInterface: ['ip', 'netmask', 'mode'],
ControllerAlias: ['alias'],
FirewallRule: None,
Feature: None,
GlobalConfig: ['ha-enabled', 'cluster-number'],
}, test_config_handler)
| epl-1.0 |
GladeRom/android_external_chromium_org | chrome/test/mini_installer/verifier.py | 85 | 2063 | # Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
class Verifier:
"""Verifies that the current machine states match the expectation."""
def VerifyInput(self, verifier_input, variable_expander):
"""Verifies that the current machine states match |verifier_input|.
Args:
verifier_input: An input to the verifier. It is a dictionary where each
key is an expectation name and the associated value is an expectation
dictionary. The expectation dictionary may contain an optional
'condition' property, a string that determines whether the expectation
should be verified. Each subclass can specify a different expectation
name and expectation dictionary.
variable_expander: A VariableExpander object.
"""
for expectation_name, expectation in verifier_input.iteritems():
if 'condition' in expectation:
condition = variable_expander.Expand(expectation['condition'])
if not self._EvaluateCondition(condition):
continue
self._VerifyExpectation(expectation_name, expectation, variable_expander)
def _VerifyExpectation(self, expectation_name, expectation,
variable_expander):
"""Verifies that the current machine states match |verifier_input|.
This is an abstract method for subclasses to override.
Args:
expectation_name: An expectation name. Each subclass can specify a
different expectation name format.
expectation: An expectation dictionary. Each subclass can specify a
different expectation dictionary format.
variable_expander: A VariableExpander object.
"""
raise NotImplementedError()
def _EvaluateCondition(self, condition):
"""Evaluates |condition| using eval().
Args:
condition: A condition string.
Returns:
The result of the evaluated condition.
"""
return eval(condition, {'__builtins__': None}, None)
| bsd-3-clause |
cogmission/nupic.research | projects/sequence_classification/analyze_union_column_model.py | 11 | 10397 | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2016, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
Analyze experiment results from the RDSE->Union model
One needs to run the script "run_encoder_with_union.py" first to get the
experiment results (distance matrices)
"""
import pickle
import matplotlib.pyplot as plt
import matplotlib as mpl
from util_functions import *
plt.ion()
mpl.rcParams['pdf.fonttype'] = 42
mpl.rcParams.update({'figure.autolayout': True})
def runTMOverDatasetFast(tm, activeColumns, unionLength=0):
"""
Run encoder -> tm network over dataset, save activeColumn and activeCells
traces
:param tm:
:param encoder:
:param dataset:
:return:
"""
sequenceLength = len(activeColumns[0])
numSequence = len(activeColumns)
numCells = tm.getCellsPerColumn() * tm.getColumnDimensions()[0]
numSteps = sequenceLength / unionLength
if np.mod(sequenceLength, unionLength) > 0:
numSteps += 1
predictedActiveCellsUnionTrace = []
activationFrequencyTrace = []
predictedActiveFrequencyTrace = []
for i in range(numSequence):
activeCellsTrace = []
predictiveCellsTrace = []
predictedActiveCellsTrace = []
unionStep = 0
# unionCells = np.zeros((numCells, ))
unionBatchIdx = 0
unionCells = set()
activationFrequency = np.zeros((numCells, ))
predictedActiveFrequency = np.zeros((numCells,))
for t in range(sequenceLength):
tm.compute(activeColumns[i][t], learn=False)
activeCellsTrace.append(set(tm.getActiveCells()))
predictiveCellsTrace.append(set(tm.getPredictiveCells()))
if t == 0:
predictedActiveCells = set()
else:
# predictedActiveCells = activeCellsTrace[t]
predictedActiveCells = activeCellsTrace[t].intersection(predictiveCellsTrace[t-1])
activationFrequency[tm.getActiveCells()] += 1
predictedActiveFrequency[list(predictedActiveCells)] += 1
unionCells = unionCells.union(predictedActiveCells)
# unionCells[list(predictedActiveCells)] += 1
unionStep += 1
if unionStep == unionLength:
predictedActiveCellsTrace.append(unionCells)
# predictedActiveCellsUnionTrace[i, unionBatchIdx, :] = unionCells
# unionCells = np.zeros((numCells,))
unionStep = 0
unionBatchIdx += 1
unionCells = set()
if unionStep > 0:
predictedActiveCellsTrace.append(unionCells)
# predictedActiveCellsUnionTrace[i, unionBatchIdx, :] = unionCells
activationFrequency = activationFrequency/np.linalg.norm(activationFrequency)
predictedActiveFrequency = predictedActiveFrequency / np.linalg.norm(predictedActiveFrequency)
predictedActiveCellsUnionTrace.append(predictedActiveCellsTrace)
activationFrequencyTrace.append(activationFrequency)
predictedActiveFrequencyTrace.append(predictedActiveFrequency)
print "{} out of {} done ".format(i, numSequence)
return (predictedActiveCellsUnionTrace,
activationFrequencyTrace,
predictedActiveFrequencyTrace)
def runEncoderOverDataset(encoder, dataset):
activeColumnsData = []
for i in range(dataset.shape[0]):
activeColumnsTrace = []
for element in dataset[i, :]:
encoderOutput = encoder.encode(element)
activeColumns = set(np.where(encoderOutput > 0)[0])
activeColumnsTrace.append(activeColumns)
activeColumnsData.append(activeColumnsTrace)
# print "{} out of {} done ".format(i, dataset.shape[0])
return activeColumnsData
if __name__ == "__main__":
plt.close('all')
datasetName = "SyntheticData"
dataSetList = listDataSets(datasetName)
datasetName = 'UCR_TS_Archive_2015'
dataSetList = listDataSets(datasetName)
# dataSetList = ["synthetic_control"]
accuracyAll = []
dataSetNameList = []
for dataName in dataSetList:
plt.close('all')
trainData, trainLabel, testData, testLabel = loadDataset(dataName, datasetName)
numTest = len(testLabel)
numTrain = len(trainLabel)
sequenceLength = len(trainData[0])
classList = np.unique(trainLabel).tolist()
numClass = len(classList)
# if numTrain <= 30:
# continue
# print
print "Processing {}".format(dataName)
print "Train Sample # {}, Test Sample # {}".format(numTrain, numTest)
print "Sequence Length {} Class # {}".format(sequenceLength, len(classList))
# if max(numTest, numTrain) * sequenceLength > 600 * 600:
# print "skip this large dataset for now"
# continue
try:
unionLengthList = [1, 5, 10, 15, 20]
for unionLength in unionLengthList:
expResultTM = pickle.load(open('results/distanceMat/{}_columnOnly_union_{}'.format(
dataName, unionLength), 'r'))
except:
continue
EuclideanDistanceMat = calculateEuclideanDistanceMat(testData, trainData)
outcomeEuclidean = calculateEuclideanModelAccuracy(trainData, trainLabel, testData, testLabel)
accuracyEuclideanDist = np.mean(outcomeEuclidean)
print "Euclidean model accuracy: {}".format(accuracyEuclideanDist)
maxValue = np.max(trainData)
minValue = np.min(trainData)
numCols = 2048
w = 41
try:
searchResolution = pickle.load(
open('results/optimalEncoderResolution/{}'.format(dataName), 'r'))
nBucketList = searchResolution['nBucketList']
accuracyVsResolution = searchResolution['accuracyVsResolution']
optNumBucket = nBucketList[smoothArgMax(np.array(accuracyVsResolution))]
optimalResolution = (maxValue - minValue) / optNumBucket
except:
continue
expResultTM = pickle.load(open('results/distanceMat/{}_columnOnly_union_{}'.format(
dataName, 1), 'r'))
distMatColumnTest = expResultTM['distMatColumnTest']
distMatColumnTrain = expResultTM['distMatColumnTrain']
# fit supervised model
testAccuracyListVsUnionLength = []
trainAccuracyListVsUnionLength = []
unionLengthList = [1, 5, 10, 15, 20]
for unionLength in unionLengthList:
expResultTM = pickle.load(open('results/distanceMat/{}_columnOnly_union_{}'.format(
dataName, unionLength), 'r'))
distMatColumnUnionTest = expResultTM['distMatColumnTest']
distMatColumnUnionTrain = expResultTM['distMatColumnTrain']
options = {"useColumnRepresentation": True,
"useCellRepresentation": True}
# calculate accuracy
trainAccuracyColumnUnion, outcomeColumn = calculateAccuracy(
distMatColumnUnionTrain, trainLabel, trainLabel)
testAccuracyColumnUnion, outcomeColumn = calculateAccuracy(
distMatColumnUnionTest, trainLabel, testLabel)
trainAccuracyColumnOnly, outcomeColumn = calculateAccuracy(
distMatColumnTrain, trainLabel, trainLabel)
testAccuracyColumnOnly, outcomeColumn = calculateAccuracy(
distMatColumnTest, trainLabel, testLabel)
print "Column Only model, Accuracy: {}".format(testAccuracyColumnOnly)
print "Column wt Union model, Accuracy: {}".format(testAccuracyColumnUnion)
accuracyListTrain = np.array([accuracyEuclideanDist,
trainAccuracyColumnOnly,
trainAccuracyColumnUnion])
accuracyListTest = np.array([accuracyEuclideanDist,
testAccuracyColumnOnly,
testAccuracyColumnUnion])
testAccuracyListVsUnionLength.append(accuracyListTest)
trainAccuracyListVsUnionLength.append(accuracyListTest)
trainAccuracyListVsUnionLength = np.array(trainAccuracyListVsUnionLength)
testAccuracyListVsUnionLength = np.array(testAccuracyListVsUnionLength)
numModel = testAccuracyListVsUnionLength.shape[1]
bestAccuracy = np.zeros((numModel, ))
for i in range(numModel):
idx = np.argmax(trainAccuracyListVsUnionLength[:, i])
bestAccuracy[i] = testAccuracyListVsUnionLength[idx, i]
bestAccuracy[1] = testAccuracyListVsUnionLength[0, 1]
accuracyAll.append(bestAccuracy)
dataSetNameList.append(dataName)
continue
accuracyAll = np.array(accuracyAll)
# fig, ax = plt.subplots(1, 2)
# (T, p) = scipy.stats.wilcoxon(accuracyAll[:, 1], accuracyAll[:, 6])
# ax[0].plot(accuracyAll[:, 1]*100, accuracyAll[:, 6]*100, 'ko')
# ax[0].plot([0, 105], [0, 105], 'k--')
# ax[0].set_xlim([0, 105])
# ax[0].set_ylim([0, 105])
# ax[0].set_xlabel('1-NN Accuracy (%)')
# ax[0].set_ylabel('classifier Accuracy (%)')
# ax[0].set_aspect('equal')
# ax[0].set_title('n={} p={}'.format(len(accuracyAll), p))
fig, ax = plt.subplots(2, 2)
ax[0, 0].plot(accuracyAll[:, 0] * 100, accuracyAll[:, 1] * 100, 'ko')
ax[0, 0].plot([0, 105], [0, 105], 'k--')
ax[0, 0].set_xlim([0, 105])
ax[0, 0].set_ylim([0, 105])
ax[0, 0].set_ylabel('column representation (%)')
ax[0, 0].set_xlabel('Euclidean distance (%)')
ax[0, 0].set_aspect('equal')
improv = np.mean((accuracyAll[:, 1] - accuracyAll[:, 0]) / accuracyAll[:, 0])
ax[0, 0].set_title('n={} improv={:3f}'.format(len(accuracyAll), improv))
ax[0, 1].plot(accuracyAll[:, 0] * 100, accuracyAll[:, 2] * 100, 'ko')
ax[0, 1].plot([0, 105], [0, 105], 'k--')
ax[0, 1].set_xlim([0, 105])
ax[0, 1].set_ylim([0, 105])
ax[0, 1].set_xlabel('Euclidean distance (%)')
ax[0, 1].set_ylabel('column wt union (%)')
ax[0, 1].set_aspect('equal')
improv = np.mean((accuracyAll[:, 2] - accuracyAll[:, 0]) / accuracyAll[:, 0])
ax[0, 1].set_title('n={} improv={:3f}'.format(len(accuracyAll), improv))
plt.savefig("figures/rdse_union_model_performance.pdf")
| agpl-3.0 |
joshka/SoundCloud2.Bundle | Contents/Libraries/Shared/simplejson/tests/test_bigint_as_string.py | 122 | 2238 | from unittest import TestCase
import simplejson as json
class TestBigintAsString(TestCase):
# Python 2.5, at least the one that ships on Mac OS X, calculates
# 2 ** 53 as 0! It manages to calculate 1 << 53 correctly.
values = [(200, 200),
((1 << 53) - 1, 9007199254740991),
((1 << 53), '9007199254740992'),
((1 << 53) + 1, '9007199254740993'),
(-100, -100),
((-1 << 53), '-9007199254740992'),
((-1 << 53) - 1, '-9007199254740993'),
((-1 << 53) + 1, -9007199254740991)]
options = (
{"bigint_as_string": True},
{"int_as_string_bitcount": 53}
)
def test_ints(self):
for opts in self.options:
for val, expect in self.values:
self.assertEqual(
val,
json.loads(json.dumps(val)))
self.assertEqual(
expect,
json.loads(json.dumps(val, **opts)))
def test_lists(self):
for opts in self.options:
for val, expect in self.values:
val = [val, val]
expect = [expect, expect]
self.assertEqual(
val,
json.loads(json.dumps(val)))
self.assertEqual(
expect,
json.loads(json.dumps(val, **opts)))
def test_dicts(self):
for opts in self.options:
for val, expect in self.values:
val = {'k': val}
expect = {'k': expect}
self.assertEqual(
val,
json.loads(json.dumps(val)))
self.assertEqual(
expect,
json.loads(json.dumps(val, **opts)))
def test_dict_keys(self):
for opts in self.options:
for val, _ in self.values:
expect = {str(val): 'value'}
val = {val: 'value'}
self.assertEqual(
expect,
json.loads(json.dumps(val)))
self.assertEqual(
expect,
json.loads(json.dumps(val, **opts)))
| mit |
sbrichards/rockstor-core | src/rockstor/backup/views/plugin.py | 2 | 1165 | """
Copyright (c) 2012-2014 RockStor, Inc. <http://rockstor.com>
This file is part of RockStor.
RockStor is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published
by the Free Software Foundation; either version 2 of the License,
or (at your option) any later version.
RockStor is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
from rest_framework.response import Response
from system.services import superctl
from generic_view import GenericView
class PluginView(GenericView):
def post(self, request, command):
"""
valid commands are status, off and on.
"""
try:
out, err, rc = superctl('backup-plugin', command)
return Response({'status': out[0].split()[1],})
except:
return Response({'error': err,})
| gpl-3.0 |
luca3m/urllib3 | docs/conf.py | 17 | 7377 | # -*- coding: utf-8 -*-
#
# urllib3 documentation build configuration file, created by
# sphinx-quickstart on Wed Oct 5 13:15:40 2011.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
from datetime import date
import os
import sys
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
root_path = os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))
sys.path.insert(0, root_path)
import urllib3
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.doctest',
'sphinx.ext.intersphinx',
]
# Test code blocks only when explicitly specified
doctest_test_doctest_blocks = ''
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'urllib3'
copyright = u'{year}, Andrey Petrov'.format(year=date.today().year)
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = urllib3.__version__
# The full version, including alpha/beta/rc tags.
release = version
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'nature'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
#html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'urllib3doc'
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'urllib3.tex', u'urllib3 Documentation',
u'Andrey Petrov', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'urllib3', u'urllib3 Documentation',
[u'Andrey Petrov'], 1)
]
intersphinx_mapping = {'python': ('http://docs.python.org/2.7', None)}
| mit |
angryrancor/kivy | kivy/uix/scatterlayout.py | 26 | 3191 | '''
Scatter Layout
===============
.. versionadded:: 1.6.0
This layout behaves just like a
:class:`~kivy.uix.relativelayout.RelativeLayout`.
When a widget is added with position = (0,0) to a :class:`ScatterLayout`,
the child widget will also move when you change the position of the
:class:`ScatterLayout`. The child widget's coordinates remain
(0,0) as they are relative to the parent layout.
However, since :class:`ScatterLayout` is implemented using a
:class:`~kivy.uix.scatter.Scatter`
widget, you can also translate, rotate and scale the layout using touches
or clicks, just like in the case of a normal Scatter widget, and the child
widgets will behave as expected.
In contrast to a Scatter, the Layout favours 'hint' properties, such as
size_hint, size_hint_x, size_hint_y and pos_hint.
.. note::
The :class:`ScatterLayout` is implemented as a
:class:`~kivy.uix.floatlayout.FloatLayout`
inside a :class:`~kivy.uix.scatter.Scatter`.
.. warning::
Since the actual :class:`ScatterLayout` is a
:class:`~kivy.uix.scatter.Scatter`, its
add_widget and remove_widget functions are overridden to add children
to the embedded :class:`~kivy.uix.floatlayout.FloatLayout` (accessible as
the `content` property of :class:`~kivy.uix.scatter.Scatter`)
automatically. So if you want to access the added child elements,
you need self.content.children instead of self.children.
.. warning::
The :class:`ScatterLayout` was introduced in 1.7.0 and was called
:class:`~kivy.uix.relativelayout.RelativeLayout` in prior versions.
The :class:`~kivy.uix.relativelayout.RelativeLayout` is now an optimized
implementation that uses only a positional transform to avoid some of the
heavier calculation involved for :class:`~kivy.uix.scatter.Scatter`.
'''
__all__ = ('ScatterLayout', 'ScatterPlaneLayout')
from kivy.uix.floatlayout import FloatLayout
from kivy.uix.scatter import Scatter, ScatterPlane
from kivy.properties import ObjectProperty
class ScatterLayout(Scatter):
'''ScatterLayout class, see module documentation for more information.
'''
content = ObjectProperty()
def __init__(self, **kw):
self.content = FloatLayout()
super(ScatterLayout, self).__init__(**kw)
if self.content.size != self.size:
self.content.size = self.size
super(ScatterLayout, self).add_widget(self.content)
self.fbind('size', self.update_size)
def update_size(self, instance, size):
self.content.size = size
def add_widget(self, *l):
self.content.add_widget(*l)
def remove_widget(self, *l):
self.content.remove_widget(*l)
def clear_widgets(self):
self.content.clear_widgets()
class ScatterPlaneLayout(ScatterPlane):
'''ScatterPlaneLayout class, see module documentation for more information.
Similar to ScatterLayout, but based on ScatterPlane - so the input is not
bounded.
.. versionadded:: 1.9.0
'''
def __init__(self, **kwargs):
kwargs.setdefault('auto_bring_to_front', False)
super(ScatterPlaneLayout, self).__init__(**kwargs)
def collide_point(self, x, y):
return True
| mit |
fairbird/OpenPLI-BlackHole | lib/python/Components/Renderer/Listbox.py | 12 | 3100 | from Renderer import Renderer
from enigma import eListbox
# the listbox renderer is the listbox, but no listbox content.
# the content will be provided by the source (or converter).
# the source should emit the 'changed' signal whenever
# it has a new listbox content.
# the source needs to have the 'content' property for the
# used listbox content
# it should expose exactly the non-content related functions
# of the eListbox class. more or less.
class Listbox(Renderer, object):
def __init__(self):
Renderer.__init__(self)
self.__content = None
self.__wrap_around = True
self.__selection_enabled = True
self.__scrollbarMode = "showOnDemand"
GUI_WIDGET = eListbox
def contentChanged(self):
self.content = self.source.content
def setContent(self, content):
self.__content = content
if self.instance is not None:
self.instance.setContent(self.__content)
content = property(lambda self: self.__content, setContent)
def postWidgetCreate(self, instance):
if self.__content is not None:
instance.setContent(self.__content)
instance.selectionChanged.get().append(self.selectionChanged)
self.wrap_around = self.wrap_around # trigger
self.selection_enabled = self.selection_enabled # trigger
self.scrollbarMode = self.scrollbarMode # trigger
def preWidgetRemove(self, instance):
instance.setContent(None)
instance.selectionChanged.get().remove(self.selectionChanged)
def setWrapAround(self, wrap_around):
self.__wrap_around = wrap_around
if self.instance is not None:
self.instance.setWrapAround(self.__wrap_around)
wrap_around = property(lambda self: self.__wrap_around, setWrapAround)
def selectionChanged(self):
self.source.selectionChanged(self.index)
def getIndex(self):
if self.instance is None:
return 0
return self.instance.getCurrentIndex()
def moveToIndex(self, index):
if self.instance is None:
return
self.instance.moveSelectionTo(index)
index = property(getIndex, moveToIndex)
def move(self, direction):
if self.instance is not None:
self.instance.moveSelection(direction)
def setSelectionEnabled(self, enabled):
self.__selection_enabled = enabled
if self.instance is not None:
self.instance.setSelectionEnable(enabled)
selection_enabled = property(lambda self: self.__selection_enabled, setSelectionEnabled)
def setScrollbarMode(self, mode):
self.__scrollbarMode = mode
if self.instance is not None:
self.instance.setScrollbarMode(int(
{ "showOnDemand": 0,
"showAlways": 1,
"showNever": 2,
}[mode]))
scrollbarMode = property(lambda self: self.__scrollbarMode, setScrollbarMode)
def changed(self, what):
if hasattr(self.source, "selectionEnabled"):
self.selection_enabled = self.source.selectionEnabled
if hasattr(self.source, "scrollbarMode"):
self.scrollbarMode = self.source.scrollbarMode
if len(what) > 1 and isinstance(what[1], str) and what[1] == "style":
return
if self.content:
return
self.content = self.source.content
def entry_changed(self, index):
if self.instance is not None:
self.instance.entryChanged(index)
| gpl-2.0 |
i5o/openshot-sugar | ignacio/openshot/uploads/youtube/atom/mock_http_core.py | 102 | 12008 | #!/usr/bin/env python
#
# Copyright (C) 2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This module is used for version 2 of the Google Data APIs.
__author__ = '[email protected] (Jeff Scudder)'
import StringIO
import pickle
import os.path
import tempfile
import atom.http_core
class Error(Exception):
pass
class NoRecordingFound(Error):
pass
class MockHttpClient(object):
debug = None
real_client = None
last_request_was_live = False
# The following members are used to construct the session cache temp file
# name.
# These are combined to form the file name
# /tmp/cache_prefix.cache_case_name.cache_test_name
cache_name_prefix = 'gdata_live_test'
cache_case_name = ''
cache_test_name = ''
def __init__(self, recordings=None, real_client=None):
self._recordings = recordings or []
if real_client is not None:
self.real_client = real_client
def add_response(self, http_request, status, reason, headers=None,
body=None):
response = MockHttpResponse(status, reason, headers, body)
# TODO Scrub the request and the response.
self._recordings.append((http_request._copy(), response))
AddResponse = add_response
def request(self, http_request):
"""Provide a recorded response, or record a response for replay.
If the real_client is set, the request will be made using the
real_client, and the response from the server will be recorded.
If the real_client is None (the default), this method will examine
the recordings and find the first which matches.
"""
request = http_request._copy()
_scrub_request(request)
if self.real_client is None:
self.last_request_was_live = False
for recording in self._recordings:
if _match_request(recording[0], request):
return recording[1]
else:
# Pass along the debug settings to the real client.
self.real_client.debug = self.debug
# Make an actual request since we can use the real HTTP client.
self.last_request_was_live = True
response = self.real_client.request(http_request)
scrubbed_response = _scrub_response(response)
self.add_response(request, scrubbed_response.status,
scrubbed_response.reason,
dict(atom.http_core.get_headers(scrubbed_response)),
scrubbed_response.read())
# Return the recording which we just added.
return self._recordings[-1][1]
raise NoRecordingFound('No recoding was found for request: %s %s' % (
request.method, str(request.uri)))
Request = request
def _save_recordings(self, filename):
recording_file = open(os.path.join(tempfile.gettempdir(), filename),
'wb')
pickle.dump(self._recordings, recording_file)
recording_file.close()
def _load_recordings(self, filename):
recording_file = open(os.path.join(tempfile.gettempdir(), filename),
'rb')
self._recordings = pickle.load(recording_file)
recording_file.close()
def _delete_recordings(self, filename):
full_path = os.path.join(tempfile.gettempdir(), filename)
if os.path.exists(full_path):
os.remove(full_path)
def _load_or_use_client(self, filename, http_client):
if os.path.exists(os.path.join(tempfile.gettempdir(), filename)):
self._load_recordings(filename)
else:
self.real_client = http_client
def use_cached_session(self, name=None, real_http_client=None):
"""Attempts to load recordings from a previous live request.
If a temp file with the recordings exists, then it is used to fulfill
requests. If the file does not exist, then a real client is used to
actually make the desired HTTP requests. Requests and responses are
recorded and will be written to the desired temprary cache file when
close_session is called.
Args:
name: str (optional) The file name of session file to be used. The file
is loaded from the temporary directory of this machine. If no name
is passed in, a default name will be constructed using the
cache_name_prefix, cache_case_name, and cache_test_name of this
object.
real_http_client: atom.http_core.HttpClient the real client to be used
if the cached recordings are not found. If the default
value is used, this will be an
atom.http_core.HttpClient.
"""
if real_http_client is None:
real_http_client = atom.http_core.HttpClient()
if name is None:
self._recordings_cache_name = self.get_cache_file_name()
else:
self._recordings_cache_name = name
self._load_or_use_client(self._recordings_cache_name, real_http_client)
def close_session(self):
"""Saves recordings in the temporary file named in use_cached_session."""
if self.real_client is not None:
self._save_recordings(self._recordings_cache_name)
def delete_session(self, name=None):
"""Removes recordings from a previous live request."""
if name is None:
self._delete_recordings(self._recordings_cache_name)
else:
self._delete_recordings(name)
def get_cache_file_name(self):
return '%s.%s.%s' % (self.cache_name_prefix, self.cache_case_name,
self.cache_test_name)
def _dump(self):
"""Provides debug information in a string."""
output = 'MockHttpClient\n real_client: %s\n cache file name: %s\n' % (
self.real_client, self.get_cache_file_name())
output += ' recordings:\n'
i = 0
for recording in self._recordings:
output += ' recording %i is for: %s %s\n' % (
i, recording[0].method, str(recording[0].uri))
i += 1
return output
def _match_request(http_request, stored_request):
"""Determines whether a request is similar enough to a stored request
to cause the stored response to be returned."""
# Check to see if the host names match.
if (http_request.uri.host is not None
and http_request.uri.host != stored_request.uri.host):
return False
# Check the request path in the URL (/feeds/private/full/x)
elif http_request.uri.path != stored_request.uri.path:
return False
# Check the method used in the request (GET, POST, etc.)
elif http_request.method != stored_request.method:
return False
# If there is a gsession ID in either request, make sure that it is matched
# exactly.
elif ('gsessionid' in http_request.uri.query
or 'gsessionid' in stored_request.uri.query):
if 'gsessionid' not in stored_request.uri.query:
return False
elif 'gsessionid' not in http_request.uri.query:
return False
elif (http_request.uri.query['gsessionid']
!= stored_request.uri.query['gsessionid']):
return False
# Ignores differences in the query params (?start-index=5&max-results=20),
# the body of the request, the port number, HTTP headers, just to name a
# few.
return True
def _scrub_request(http_request):
""" Removes email address and password from a client login request.
Since the mock server saves the request and response in plantext, sensitive
information like the password should be removed before saving the
recordings. At the moment only requests sent to a ClientLogin url are
scrubbed.
"""
if (http_request and http_request.uri and http_request.uri.path and
http_request.uri.path.endswith('ClientLogin')):
# Remove the email and password from a ClientLogin request.
http_request._body_parts = []
http_request.add_form_inputs(
{'form_data': 'client login request has been scrubbed'})
else:
# We can remove the body of the post from the recorded request, since
# the request body is not used when finding a matching recording.
http_request._body_parts = []
return http_request
def _scrub_response(http_response):
return http_response
class EchoHttpClient(object):
"""Sends the request data back in the response.
Used to check the formatting of the request as it was sent. Always responds
with a 200 OK, and some information from the HTTP request is returned in
special Echo-X headers in the response. The following headers are added
in the response:
'Echo-Host': The host name and port number to which the HTTP connection is
made. If no port was passed in, the header will contain
host:None.
'Echo-Uri': The path portion of the URL being requested. /example?x=1&y=2
'Echo-Scheme': The beginning of the URL, usually 'http' or 'https'
'Echo-Method': The HTTP method being used, 'GET', 'POST', 'PUT', etc.
"""
def request(self, http_request):
return self._http_request(http_request.uri, http_request.method,
http_request.headers, http_request._body_parts)
def _http_request(self, uri, method, headers=None, body_parts=None):
body = StringIO.StringIO()
response = atom.http_core.HttpResponse(status=200, reason='OK', body=body)
if headers is None:
response._headers = {}
else:
# Copy headers from the request to the response but convert values to
# strings. Server response headers always come in as strings, so an int
# should be converted to a corresponding string when echoing.
for header, value in headers.iteritems():
response._headers[header] = str(value)
response._headers['Echo-Host'] = '%s:%s' % (uri.host, str(uri.port))
response._headers['Echo-Uri'] = uri._get_relative_path()
response._headers['Echo-Scheme'] = uri.scheme
response._headers['Echo-Method'] = method
for part in body_parts:
if isinstance(part, str):
body.write(part)
elif hasattr(part, 'read'):
body.write(part.read())
body.seek(0)
return response
class SettableHttpClient(object):
"""An HTTP Client which responds with the data given in set_response."""
def __init__(self, status, reason, body, headers):
"""Configures the response for the server.
See set_response for details on the arguments to the constructor.
"""
self.set_response(status, reason, body, headers)
self.last_request = None
def set_response(self, status, reason, body, headers):
"""Determines the response which will be sent for each request.
Args:
status: An int for the HTTP status code, example: 200, 404, etc.
reason: String for the HTTP reason, example: OK, NOT FOUND, etc.
body: The body of the HTTP response as a string or a file-like
object (something with a read method).
headers: dict of strings containing the HTTP headers in the response.
"""
self.response = atom.http_core.HttpResponse(status=status, reason=reason,
body=body)
self.response._headers = headers.copy()
def request(self, http_request):
self.last_request = http_request
return self.response
class MockHttpResponse(atom.http_core.HttpResponse):
def __init__(self, status=None, reason=None, headers=None, body=None):
self._headers = headers or {}
if status is not None:
self.status = status
if reason is not None:
self.reason = reason
if body is not None:
# Instead of using a file-like object for the body, store as a string
# so that reads can be repeated.
if hasattr(body, 'read'):
self._body = body.read()
else:
self._body = body
def read(self):
return self._body
| gpl-3.0 |
neteler/QGIS | python/plugins/processing/algs/qgis/QGISAlgorithmProvider.py | 1 | 9417 | # -*- coding: utf-8 -*-
"""
***************************************************************************
QGISAlgorithmProvider.py
---------------------
Date : December 2012
Copyright : (C) 2012 by Victor Olaya
Email : volayaf at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Victor Olaya'
__date__ = 'December 2012'
__copyright__ = '(C) 2012, Victor Olaya'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
import os
try:
import matplotlib.pyplot
hasMatplotlib = True
except:
hasMatplotlib = False
from PyQt4.QtGui import QIcon
from processing.core.AlgorithmProvider import AlgorithmProvider
from processing.script.ScriptUtils import ScriptUtils
from RegularPoints import RegularPoints
from SymetricalDifference import SymetricalDifference
from VectorSplit import VectorSplit
from VectorGrid import VectorGrid
from RandomExtract import RandomExtract
from RandomExtractWithinSubsets import RandomExtractWithinSubsets
from ExtractByLocation import ExtractByLocation
from PointsInPolygon import PointsInPolygon
from PointsInPolygonUnique import PointsInPolygonUnique
from PointsInPolygonWeighted import PointsInPolygonWeighted
from SumLines import SumLines
from BasicStatisticsNumbers import BasicStatisticsNumbers
from BasicStatisticsStrings import BasicStatisticsStrings
from NearestNeighbourAnalysis import NearestNeighbourAnalysis
from LinesIntersection import LinesIntersection
from MeanCoords import MeanCoords
from PointDistance import PointDistance
from UniqueValues import UniqueValues
from ReprojectLayer import ReprojectLayer
from ExportGeometryInfo import ExportGeometryInfo
from Centroids import Centroids
from Delaunay import Delaunay
from VoronoiPolygons import VoronoiPolygons
from DensifyGeometries import DensifyGeometries
from MultipartToSingleparts import MultipartToSingleparts
from SimplifyGeometries import SimplifyGeometries
from LinesToPolygons import LinesToPolygons
from PolygonsToLines import PolygonsToLines
from SinglePartsToMultiparts import SinglePartsToMultiparts
from ExtractNodes import ExtractNodes
from ConvexHull import ConvexHull
from FixedDistanceBuffer import FixedDistanceBuffer
from VariableDistanceBuffer import VariableDistanceBuffer
from Clip import Clip
from Difference import Difference
from Dissolve import Dissolve
from Intersection import Intersection
from ExtentFromLayer import ExtentFromLayer
from RandomSelection import RandomSelection
from RandomSelectionWithinSubsets import RandomSelectionWithinSubsets
from SelectByLocation import SelectByLocation
from Union import Union
from DensifyGeometriesInterval import DensifyGeometriesInterval
from Eliminate import Eliminate
from SpatialJoin import SpatialJoin
from DeleteColumn import DeleteColumn
from DeleteDuplicateGeometries import DeleteDuplicateGeometries
from TextToFloat import TextToFloat
from ExtractByAttribute import ExtractByAttribute
from SelectByAttribute import SelectByAttribute
from Grid import Grid
from Gridify import Gridify
from HubDistance import HubDistance
from HubLines import HubLines
from Merge import Merge
from GeometryConvert import GeometryConvert
from ConcaveHull import ConcaveHull
from Polygonize import Polygonize
from RasterLayerStatistics import RasterLayerStatistics
from StatisticsByCategories import StatisticsByCategories
from EquivalentNumField import EquivalentNumField
from AddTableField import AddTableField
from FieldsCalculator import FieldsCalculator
from SaveSelectedFeatures import SaveSelectedFeatures
from Explode import Explode
from AutoincrementalField import AutoincrementalField
from FieldPyculator import FieldsPyculator
from JoinAttributes import JoinAttributes
from CreateConstantRaster import CreateConstantRaster
from PointsLayerFromTable import PointsLayerFromTable
from PointsDisplacement import PointsDisplacement
from ZonalStatistics import ZonalStatistics
from PointsFromPolygons import PointsFromPolygons
from PointsFromLines import PointsFromLines
from RandomPointsExtent import RandomPointsExtent
from RandomPointsLayer import RandomPointsLayer
from RandomPointsPolygonsFixed import RandomPointsPolygonsFixed
from RandomPointsPolygonsVariable import RandomPointsPolygonsVariable
from RandomPointsAlongLines import RandomPointsAlongLines
from PointsToPaths import PointsToPaths
from PostGISExecuteSQL import PostGISExecuteSQL
from ImportIntoPostGIS import ImportIntoPostGIS
from SetVectorStyle import SetVectorStyle
from SetRasterStyle import SetRasterStyle
from SelectByExpression import SelectByExpression
from HypsometricCurves import HypsometricCurves
from SplitLinesWithLines import SplitLinesWithLines
import processing.resources_rc
class QGISAlgorithmProvider(AlgorithmProvider):
_icon = QIcon(':/processing/images/qgis.png')
def __init__(self):
AlgorithmProvider.__init__(self)
self.alglist = [SumLines(), PointsInPolygon(),
PointsInPolygonWeighted(), PointsInPolygonUnique(),
BasicStatisticsStrings(), BasicStatisticsNumbers(),
NearestNeighbourAnalysis(), MeanCoords(),
LinesIntersection(), UniqueValues(), PointDistance(),
ReprojectLayer(), ExportGeometryInfo(), Centroids(),
Delaunay(), VoronoiPolygons(), SimplifyGeometries(),
DensifyGeometries(), DensifyGeometriesInterval(),
MultipartToSingleparts(), SinglePartsToMultiparts(),
PolygonsToLines(), LinesToPolygons(), ExtractNodes(),
Eliminate(), ConvexHull(), FixedDistanceBuffer(),
VariableDistanceBuffer(), Dissolve(), Difference(),
Intersection(), Union(), Clip(), ExtentFromLayer(),
RandomSelection(), RandomSelectionWithinSubsets(),
SelectByLocation(), RandomExtract(),
RandomExtractWithinSubsets(), ExtractByLocation(),
SpatialJoin(), RegularPoints(), SymetricalDifference(),
VectorSplit(), VectorGrid(), DeleteColumn(),
DeleteDuplicateGeometries(), TextToFloat(),
ExtractByAttribute(), SelectByAttribute(), Grid(),
Gridify(), HubDistance(), HubLines(), Merge(),
GeometryConvert(), AddTableField(), FieldsCalculator(),
SaveSelectedFeatures(), JoinAttributes(),
AutoincrementalField(), Explode(), FieldsPyculator(),
EquivalentNumField(), PointsLayerFromTable(),
StatisticsByCategories(), ConcaveHull(), Polygonize(),
RasterLayerStatistics(), PointsDisplacement(),
ZonalStatistics(), PointsFromPolygons(),
PointsFromLines(), RandomPointsExtent(),
RandomPointsLayer(), RandomPointsPolygonsFixed(),
RandomPointsPolygonsVariable(),
RandomPointsAlongLines(), PointsToPaths(),
PostGISExecuteSQL(), ImportIntoPostGIS(),
SetVectorStyle(), SetRasterStyle(),
SelectByExpression(), HypsometricCurves(),
SplitLinesWithLines(), CreateConstantRaster(),
]
if hasMatplotlib:
from VectorLayerHistogram import VectorLayerHistogram
from RasterLayerHistogram import RasterLayerHistogram
from VectorLayerScatterplot import VectorLayerScatterplot
from MeanAndStdDevPlot import MeanAndStdDevPlot
from BarPlot import BarPlot
from PolarPlot import PolarPlot
self.alglist.extend([
VectorLayerHistogram(), RasterLayerHistogram(),
VectorLayerScatterplot(), MeanAndStdDevPlot(), BarPlot(),
PolarPlot(),
])
folder = os.path.join(os.path.dirname(__file__), 'scripts')
scripts = ScriptUtils.loadFromFolder(folder)
for script in scripts:
script.allowEdit = False
self.alglist.extend(scripts)
for alg in self.alglist:
alg._icon = self._icon
def initializeSettings(self):
AlgorithmProvider.initializeSettings(self)
def unload(self):
AlgorithmProvider.unload(self)
def getName(self):
return 'qgis'
def getDescription(self):
return 'QGIS geoalgorithms'
def getIcon(self):
return self._icon
def _loadAlgorithms(self):
self.algs = self.alglist
def supportsNonFileBasedOutput(self):
return True
| gpl-2.0 |
dwillmer/blaze | blaze/server/spider.py | 1 | 4659 | #!/usr/bin/env python
from __future__ import absolute_import
import os
import sys
import argparse
import yaml
from odo import resource
from odo.utils import ignoring
from .server import Server, DEFAULT_PORT
__all__ = 'spider', 'from_yaml'
def _spider(resource_path, ignore, followlinks, hidden):
resources = {}
for filename in (os.path.join(resource_path, x)
for x in os.listdir(resource_path)):
basename = os.path.basename(filename)
if (basename.startswith(os.curdir) and not hidden or
os.path.islink(filename) and not followlinks):
continue
if os.path.isdir(filename):
new_resources = _spider(filename, ignore=ignore,
followlinks=followlinks, hidden=hidden)
if new_resources:
resources[basename] = new_resources
else:
with ignoring(*ignore):
resources[basename] = resource(filename)
return resources
def spider(path, ignore=(ValueError, NotImplementedError), followlinks=True,
hidden=False):
"""Traverse a directory and call ``odo.resource`` on its contentso
Parameters
----------
path : str
Path to a directory of resources to load
ignore : tuple of Exception, optional
Ignore these exceptions when calling resource
followlinks : bool, optional
Follow symbolic links
hidden : bool, optional
Load hidden files
Returns
-------
dict
Possibly nested dictionary of containing basenames mapping to resources
"""
return {
os.path.basename(path): _spider(path, ignore=ignore,
followlinks=followlinks,
hidden=hidden)
}
def from_yaml(path, ignore=(ValueError, NotImplementedError), followlinks=True,
hidden=False):
"""Construct a dictionary of resources from a YAML specification.
Parameters
----------
path : str
Path to a YAML specification of resources to load
ignore : tuple of Exception, optional
Ignore these exceptions when calling resource
followlinks : bool, optional
Follow symbolic links
hidden : bool, optional
Load hidden files
Returns
-------
dict
A dictionary mapping top level keys in a YAML file to resources.
See Also
--------
spider : Traverse a directory tree for resources
"""
resources = {}
for name, info in yaml.load(path.read()).items():
if 'source' not in info:
raise ValueError('source key not found for data source named %r' %
name)
source = info['source']
if os.path.isdir(source):
resources[name] = spider(os.path.expanduser(source),
ignore=ignore,
followlinks=followlinks,
hidden=hidden)
else:
resources[name] = resource(source, dshape=info.get('dshape'))
return resources
def _parse_args():
p = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
p.add_argument('path', type=argparse.FileType('r'), nargs='?',
default=sys.stdin,
help='A YAML file specifying the resources to load')
p.add_argument('-p', '--port', type=int, default=DEFAULT_PORT,
help='Port number')
p.add_argument('-H', '--host', type=str, default='127.0.0.1',
help='Host name. Use 0.0.0.0 to listen on all public IPs')
p.add_argument('-l', '--follow-links', action='store_true',
help='Follow links when listing files')
p.add_argument('-e', '--ignored-exception', nargs='*',
default=['Exception'],
help='Exceptions to ignore when calling resource on a file')
p.add_argument('-d', '--hidden', action='store_true',
help='Call resource on hidden files')
p.add_argument('-D', '--debug', action='store_true',
help='Start the Flask server in debug mode')
return p.parse_args()
def _main():
args = _parse_args()
ignore = tuple(getattr(__builtins__, e) for e in args.ignored_exception)
resources = from_yaml(args.path,
ignore=ignore,
followlinks=args.follow_links,
hidden=args.hidden)
Server(resources).run(host=args.host, port=args.port, debug=args.debug)
if __name__ == '__main__':
_main()
| bsd-3-clause |
andmos/ansible | lib/ansible/modules/network/avi/avi_ipamdnsproviderprofile.py | 31 | 5896 | #!/usr/bin/python
#
# @author: Gaurav Rastogi ([email protected])
# Eric Anderson ([email protected])
# module_check: supported
#
# Copyright: (c) 2017 Gaurav Rastogi, <[email protected]>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
#
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: avi_ipamdnsproviderprofile
author: Gaurav Rastogi (@grastogi23) <[email protected]>
short_description: Module for setup of IpamDnsProviderProfile Avi RESTful Object
description:
- This module is used to configure IpamDnsProviderProfile object
- more examples at U(https://github.com/avinetworks/devops)
requirements: [ avisdk ]
version_added: "2.4"
options:
state:
description:
- The state that should be applied on the entity.
default: present
choices: ["absent", "present"]
avi_api_update_method:
description:
- Default method for object update is HTTP PUT.
- Setting to patch will override that behavior to use HTTP PATCH.
version_added: "2.5"
default: put
choices: ["put", "patch"]
avi_api_patch_op:
description:
- Patch operation to use when using avi_api_update_method as patch.
version_added: "2.5"
choices: ["add", "replace", "delete"]
allocate_ip_in_vrf:
description:
- If this flag is set, only allocate ip from networks in the virtual service vrf.
- Applicable for avi vantage ipam only.
- Field introduced in 17.2.4.
- Default value when not specified in API or module is interpreted by Avi Controller as False.
version_added: "2.5"
type: bool
aws_profile:
description:
- Provider details if type is aws.
azure_profile:
description:
- Provider details if type is microsoft azure.
- Field introduced in 17.2.1.
version_added: "2.5"
custom_profile:
description:
- Provider details if type is custom.
- Field introduced in 17.1.1.
gcp_profile:
description:
- Provider details if type is google cloud.
infoblox_profile:
description:
- Provider details if type is infoblox.
internal_profile:
description:
- Provider details if type is avi.
name:
description:
- Name for the ipam/dns provider profile.
required: true
openstack_profile:
description:
- Provider details if type is openstack.
proxy_configuration:
description:
- Field introduced in 17.1.1.
tenant_ref:
description:
- It is a reference to an object of type tenant.
type:
description:
- Provider type for the ipam/dns provider profile.
- Enum options - IPAMDNS_TYPE_INFOBLOX, IPAMDNS_TYPE_AWS, IPAMDNS_TYPE_OPENSTACK, IPAMDNS_TYPE_GCP, IPAMDNS_TYPE_INFOBLOX_DNS, IPAMDNS_TYPE_CUSTOM,
- IPAMDNS_TYPE_CUSTOM_DNS, IPAMDNS_TYPE_AZURE, IPAMDNS_TYPE_INTERNAL, IPAMDNS_TYPE_INTERNAL_DNS, IPAMDNS_TYPE_AWS_DNS, IPAMDNS_TYPE_AZURE_DNS.
required: true
url:
description:
- Avi controller URL of the object.
uuid:
description:
- Uuid of the ipam/dns provider profile.
extends_documentation_fragment:
- avi
'''
EXAMPLES = """
- name: Create IPAM DNS provider setting
avi_ipamdnsproviderprofile:
controller: '{{ controller }}'
username: '{{ username }}'
password: '{{ password }}'
internal_profile:
dns_service_domain:
- domain_name: ashish.local
num_dns_ip: 1
pass_through: true
record_ttl: 100
- domain_name: guru.local
num_dns_ip: 1
pass_through: true
record_ttl: 200
ttl: 300
name: Ashish-DNS
tenant_ref: Demo
type: IPAMDNS_TYPE_INTERNAL
"""
RETURN = '''
obj:
description: IpamDnsProviderProfile (api/ipamdnsproviderprofile) object
returned: success, changed
type: dict
'''
from ansible.module_utils.basic import AnsibleModule
try:
from ansible.module_utils.network.avi.avi import (
avi_common_argument_spec, HAS_AVI, avi_ansible_api)
except ImportError:
HAS_AVI = False
def main():
argument_specs = dict(
state=dict(default='present',
choices=['absent', 'present']),
avi_api_update_method=dict(default='put',
choices=['put', 'patch']),
avi_api_patch_op=dict(choices=['add', 'replace', 'delete']),
allocate_ip_in_vrf=dict(type='bool',),
aws_profile=dict(type='dict',),
azure_profile=dict(type='dict',),
custom_profile=dict(type='dict',),
gcp_profile=dict(type='dict',),
infoblox_profile=dict(type='dict',),
internal_profile=dict(type='dict',),
name=dict(type='str', required=True),
openstack_profile=dict(type='dict',),
proxy_configuration=dict(type='dict',),
tenant_ref=dict(type='str',),
type=dict(type='str', required=True),
url=dict(type='str',),
uuid=dict(type='str',),
)
argument_specs.update(avi_common_argument_spec())
module = AnsibleModule(
argument_spec=argument_specs, supports_check_mode=True)
if not HAS_AVI:
return module.fail_json(msg=(
'Avi python API SDK (avisdk>=17.1) is not installed. '
'For more details visit https://github.com/avinetworks/sdk.'))
return avi_ansible_api(module, 'ipamdnsproviderprofile',
set([]))
if __name__ == '__main__':
main()
| gpl-3.0 |
jabibi/sale-workflow | sale_order_merge/tests/test_sale_order_merge.py | 9 | 8151 | # -*- coding: utf-8 -*-
# Copyright 2016 Opener B.V. - Stefan Rijnhart
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl).
from openerp.tests.common import TransactionCase
class TestSaleOrderMerge(TransactionCase):
def setUp(self):
super(TestSaleOrderMerge, self).setUp()
self.journal_sale = self.env.ref('account.sales_journal')
self.env.ref('product.product_product_24').write({
'list_price': 2,
})
self.env.ref('product.product_product_25').write({
'list_price': 3,
})
self.period_id = self.env['account.period'].find().id
def create_sale_orders(self, policy):
order1 = self.env['sale.order'].create({
'partner_id': self.env.ref('base.res_partner_2').id,
'order_policy': policy,
'order_line': [
(0, 0, {
'product_id': self.env.ref(
'product.product_product_24').id,
'product_uom_qty': 1,
}),
]
})
order2 = self.env['sale.order'].create({
'partner_id': self.env.ref('base.res_partner_2').id,
'order_policy': policy,
'order_line': [
(0, 0, {
'product_id': self.env.ref(
'product.product_product_24').id,
'product_uom_qty': 1,
}),
(0, 0, {
'product_id': self.env.ref(
'product.product_product_25').id,
'product_uom_qty': 1,
}),
]
})
return order1, order2
def merge(self, order1, order2):
""" Create a wizard, check that mergeable orders are added by default.
Reset mergeable orders to only order2, excluding other orders from
demo data. Perform the merge """
wiz = self.env['sale.order.merge'].browse(
order1.button_merge()['res_id'])
self.assertIn(order2, wiz.to_merge)
wiz.to_merge = order2
wiz.merge()
self.assertEqual(order2.state, 'cancel')
def pay_invoice(self, invoice):
""" Confirm and pay the invoice """
invoice.signal_workflow('invoice_open')
invoice.pay_and_reconcile(
invoice.amount_total, self.env.ref('account.cash').id,
self.period_id, self.env.ref('account.bank_journal').id,
self.env.ref('account.cash').id,
self.period_id, self.env.ref('account.bank_journal').id,)
self.assertEqual(invoice.state, 'paid')
def test_01_policy_manual(self):
""" Check that state is manual after merging orders in different states
because otherwise the button to create additional invoices is not
visible.
"""
order1, order2 = self.create_sale_orders('manual')
order1.action_button_confirm()
order2.action_button_confirm()
self.assertEqual(order1.state, 'manual')
order1.action_invoice_create()
order1.signal_workflow('manual_invoice')
invoice1 = order1.invoice_ids
self.assertEqual(order1.state, 'progress')
self.merge(order1, order2)
self.assertEqual(order1.state, 'manual')
# Pay first invoice
self.pay_invoice(invoice1)
self.assertLess(order1.invoiced_rate, 100)
order1.action_invoice_create()
order1.signal_workflow('manual_invoice')
self.assertEqual(order1.state, 'progress')
# Pay second invoice
self.assertEqual(len(order1.invoice_ids), 2)
invoice2 = order1.invoice_ids - invoice1
self.pay_invoice(invoice2)
self.assertEqual(order1.invoiced_rate, 100)
picking1 = order1.picking_ids
picking1.force_assign()
picking1.do_prepare_partial()
picking1.do_transfer()
self.assertEqual(order1.state, 'done')
def test_02_policy_prepaid(self):
""" Merge prepaid orders and check procurment trigger """
order1, order2 = self.create_sale_orders('prepaid')
order1.action_button_confirm()
order2.action_button_confirm()
self.assertEqual(order1.amount_untaxed, 2)
self.assertEqual(order1.state, 'progress')
self.assertEqual(order2.state, 'progress')
self.assertIn(order1, order2.merge_with)
self.assertIn(order2, order1.merge_with)
self.assertTrue(order1.merge_ok)
self.assertTrue(order2.merge_ok)
# Pay order1's invoice to trigger procurement
invoice1 = order1.invoice_ids
self.pay_invoice(invoice1)
self.assertEqual(order1.invoiced_rate, 100)
picking1 = order1.picking_ids
self.assertEqual(len(picking1.move_lines), 1)
self.merge(order1, order2)
self.assertLess(order1.invoiced_rate, 100)
# The procurement of the additional lines has been triggered
self.assertEqual(len(picking1.move_lines), 3)
# Deliver order and check order status
picking1.force_assign()
picking1.do_prepare_partial()
picking1.do_transfer()
self.assertEqual(order1.state, 'done')
def test_03_policy_picking(self):
""" Merge a partially delivered order into an undelivered one """
order1, order2 = self.create_sale_orders('picking')
order1.action_button_confirm()
order2.action_button_confirm()
self.assertEqual(order1.amount_untaxed, 2)
self.assertEqual(order1.state, 'progress')
self.assertEqual(order2.state, 'progress')
self.assertIn(order1, order2.merge_with)
self.assertIn(order2, order1.merge_with)
self.assertTrue(order1.merge_ok)
self.assertTrue(order2.merge_ok)
move_line1 = order1.picking_ids.move_lines
self.assertEqual(len(move_line1), 1)
# Partially deliver order 2 before merging
picking2 = order2.picking_ids[0]
picking2.force_assign()
picking2.do_prepare_partial()
self.env['stock.pack.operation'].search([
('picking_id', '=', picking2.id),
('product_id', '=', self.env.ref(
'product.product_product_24').id)]).unlink()
picking2.do_transfer()
invoice_id = picking2.with_context(
inv_type='out_invoice').action_invoice_create(
journal_id=self.journal_sale.id, group=False,
type='out_invoice')[0]
invoice2 = self.env['account.invoice'].browse(invoice_id)
self.merge(order1, order2)
self.assertIn(picking2, order1.picking_ids)
self.assertEqual(picking2.origin, order1.name)
self.assertIn(invoice2, order1.invoice_ids)
self.assertEqual(len(order1.order_line), 3)
self.assertEqual(order1.amount_untaxed, 7)
# Retrieve the remaining picking from the original move line, as it may
# have been merged in order2's back order (or the other way around)
picking1 = move_line1.picking_id
self.assertEqual(len(picking1.move_lines), 2)
self.assertIn(picking1, order1.picking_ids)
# Process the remaining goods from the combined order
picking1.force_assign()
picking1.do_prepare_partial()
picking1.do_transfer()
self.assertEqual(order1.state, 'done')
invoice_id = picking1.with_context(
inv_type='out_invoice').action_invoice_create(
journal_id=self.journal_sale.id, group=False,
type='out_invoice')[0]
invoice1 = self.env['account.invoice'].browse(invoice_id)
invoice1.signal_workflow('invoice_open')
invoice2.signal_workflow('invoice_open')
self.assertEqual(order1.invoiced_rate, 100)
def test_04_order_policy(self):
""" The order policy is propagated from the confirmed to the draft """
order1, order2 = self.create_sale_orders('prepaid')
order2.write({'order_policy': 'manual'})
order2.action_button_confirm()
self.merge(order1, order2)
self.assertEqual(order1.order_policy, 'manual')
| agpl-3.0 |
tomvansteijn/openradar | openradar/gridtools.py | 1 | 11674 | # -*- coding: utf-8 -*-
# (c) Nelen & Schuurmans. GPL licensed, see LICENSE.rst.
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import absolute_import
from __future__ import division
from osgeo import gdal
from osgeo import gdalconst
from osgeo import ogr
from matplotlib.backends import backend_agg
from matplotlib import figure
from matplotlib import colors
from matplotlib import cm
from matplotlib import patches
from PIL import Image
import numpy as np
def ds2ma(dataset, bandnumber=1):
"""
Return np masked array from band in gdal dataset.
"""
band = dataset.GetRasterBand(bandnumber)
fill_value = band.GetNoDataValue()
array = band.ReadAsArray()
mask = np.equal(array, fill_value)
masked_array = np.ma.array(array, mask=mask, fill_value=fill_value)
return masked_array
def h5ds2ma(dataset):
"""
Return np masked array dataset.
Expects an attribute fillvalue set on dataset.
"""
fill_value = dataset.attrs['fill_value']
array = dataset[:]
mask = np.equal(array, fill_value)
masked_array = np.ma.array(array, mask=mask, fill_value=fill_value)
return masked_array
def default_normalize(array):
normalize = colors.Normalize()
return normalize(array)
class BaseGrid(object):
"""
A grid is defined by its size, extent and projection.
Extent is (left, right, top, bottom); cellsize is (width, height);
projection is a wkt string.
"""
def __init__(self, dataset=None, extent=None, size=None, projection=None):
"""
Either use a dataset, or an extent, a cellsize and optionally
a projection
"""
if dataset and not (extent or size or projection):
self._init_from_dataset(dataset)
elif (size is not None and extent is not None) and not dataset:
for s in size:
if not isinstance(s, int):
raise TypeError('Size elements must be of type int.')
self.size = size
self.extent = extent
self.projection = projection
else:
raise NotImplementedError('Incompatible arguments')
def _init_from_dataset(self, dataset):
self.size = dataset.RasterXSize, dataset.RasterYSize
self.projection = dataset.GetProjection()
x, a, b, y, c, d = dataset.GetGeoTransform()
self.extent = x, x + a * self.size[0], y, y + d * self.size[1]
def get_geotransform(self):
left, right, top, bottom = self.extent
cellwidth = (right - left) / self.size[0]
cellheight = (top - bottom) / self.size[1]
return left, cellwidth, 0, top, 0, -cellheight
def get_center(self):
left, right, top, bottom = self.extent
return (right - left) / 2, (top - bottom) / 2
def get_cellsize(self):
left, right, top, bottom = self.extent
cellwidth = (right - left) / self.size[0]
cellheight = (top - bottom) / self.size[1]
return cellwidth, cellheight
def get_shape(self):
return self.size[::-1]
def get_grid(self):
"""
Return x and y coordinates of cell centers.
"""
cellwidth, cellheight = self.get_cellsize()
left, right, top, bottom = self.extent
xcount, ycount = self.size
xmin = left + cellwidth / 2
xmax = right - cellwidth / 2
ymin = bottom + cellheight / 2
ymax = top - cellheight / 2
y, x = np.mgrid[
ymax:ymin:ycount * 1j, xmin:xmax:xcount * 1j]
return x, y
def create_dataset(self, bands=1,
nodatavalue=-9999, datatype=gdalconst.GDT_Float64):
"""
Return empty in-memory dataset.
It has our size, extent and projection.
"""
dataset = gdal.GetDriverByName(b'MEM').Create(
b'', self.size[0], self.size[1], bands, datatype,
)
dataset.SetGeoTransform(self.get_geotransform())
dataset.SetProjection(self.projection)
rasterbands = [dataset.GetRasterBand(i + 1) for i in range(bands)]
for band in rasterbands:
band.SetNoDataValue(nodatavalue)
band.Fill(nodatavalue)
return dataset
def create_imagelayer(self, image):
pass
def create_vectorlayer(self):
""" Create and return VectorLayer. """
return VectorLayer(self)
class AbstractLayer(BaseGrid):
""" Add imaging methods """
def _rgba():
raise NotImplementedError
def _save_img(self, filepath):
self.image().save(filepath)
def _save_tif(self, filepath, rgba=True):
dataset = self._rgba_dataset() if rgba else self._single_band_dataset()
gdal.GetDriverByName(b'GTiff').CreateCopy(
str(filepath), dataset, 0, ['COMPRESS=DEFLATE'],
)
def _save_asc(self, filepath):
""" Save as asc file. """
dataset = self._single_band_dataset()
gdal.GetDriverByName(b'AAIGrid').CreateCopy(filepath, dataset)
def _rgba_dataset(self):
dataset = self.create_dataset(bands=4, datatype=gdalconst.GDT_Byte)
bands = [dataset.GetRasterBand(i + 1) for i in range(4)]
data = self._rgba().transpose(2, 0, 1)
for band, array in zip(bands, data):
band.WriteArray(array)
return dataset
def _single_band_dataset(self):
dataset = self.create_dataset()
band = dataset.GetRasterBand(1)
band.WriteArray(self.ma.filled())
band.SetNoDataValue(self.ma.fill_value)
return dataset
def _checker_image(self):
pattern = (np.indices(
self.get_shape(),
) // 8).sum(0) % 2
return Image.fromarray(cm.gray_r(pattern / 2., bytes=True))
def show(self):
"""
Show after adding checker pattern for transparent pixels.
"""
image = self.image()
checker = self._checker_image()
checker.paste(image, None, image)
checker.show()
def image(self):
return Image.fromarray(self._rgba())
def save(self, filepath, **kwargs):
"""
Save as image file.
"""
if filepath.endswith('.tif') or filepath.endswith('.tiff'):
self._save_tif(filepath, **kwargs)
elif filepath.endswith('.asc'):
self._save_asc(filepath)
else:
self._save_img(filepath)
class RasterLayer(AbstractLayer):
"""
Layer containing grid data.
"""
def __init__(self, dataset=None, band=1, colormap=None, normalize=None,
array=None, extent=None, projection=None):
if dataset and array is None and extent is None and projection is None:
rasterband = dataset.GetRasterBand(band)
self._init_from_dataset(dataset=dataset)
self._ma_from_rasterband(rasterband=rasterband)
elif array is not None and dataset is None:
self.size = array.shape[::-1]
if extent is None:
self.extent = [0, self.size[0], 0, self.size[1]]
else:
self.extent = extent
self.projection = projection
self._ma_from_array(array=array)
else:
raise NotImplementedError('Incompatible arguments')
self.normalize = normalize or default_normalize
self.colormap = colormap or cm.gray
def _ma_from_rasterband(self, rasterband):
"""
Store masked array and gridproperties
"""
fill_value = rasterband.GetNoDataValue()
array = rasterband.ReadAsArray()
mask = np.equal(array, fill_value)
self.ma = np.ma.array(array, mask=mask, fill_value=fill_value)
def _ma_from_array(self, array):
self.ma = np.ma.array(
array,
mask=array.mask if hasattr(array, 'mask') else False,
)
def _rgba(self):
return self.colormap(self.normalize(self.ma), bytes=True)
class VectorLayer(AbstractLayer):
def __init__(self, basegrid):
"""
"""
self.projection = basegrid.projection
self.extent = basegrid.extent
self.size = basegrid.size
self._add_axes()
def _add_axes(self):
"""
Add matplotlib axes with coordinates setup according to geo.
"""
dpi = 72
figsize = tuple(c / dpi for c in self.size)
fig = figure.Figure(figsize, dpi, facecolor='g')
fig.patch.set_alpha(0)
backend_agg.FigureCanvasAgg(fig)
rect, axis = self._mpl_config()
axes = fig.add_axes(rect, axisbg='y')
axes.axis(axis)
axes.autoscale(False)
axes.patch.set_alpha(0)
axes.get_xaxis().set_visible(False)
axes.get_yaxis().set_visible(False)
self.axes = axes
def _mpl_config(self):
"""
Return rect, axis.
To get the matplotlib axes to match exactly the geotransform
coordinates, an appropriate combination of the axes rect and
the axis limits is required.
Moreover, a factor is applied to make the axes a little larger
than the figure, because otherwise some edge artifacts may
be visible.
"""
factor = 0.1
rect = (-factor, -factor, 1 + 2 * factor, 1 + 2 * factor)
left, right, top, bottom = self.extent
width = right - left
height = top - bottom
cellwidth, cellheight = self.get_cellsize()
# For some reason, 2 pixels have to be added to
axis = (
left - width * factor + cellwidth * 0,
right + width * factor + cellwidth * 1,
bottom - height * factor + cellheight * 0,
top + height * factor + cellheight * 1,
)
return rect, axis
def _rgba(self):
canvas = self.axes.get_figure().canvas
buf, shape = canvas.print_to_buffer()
rgba = np.fromstring(buf, dtype=np.uint8).reshape(
*(self.get_shape() + tuple([4]))
)
return rgba
def add_image(self, image_path):
""" Add a raster image, assuming extent matches ours. """
image = Image.open(image_path)
self.axes.imshow(image, extent=self.extent)
def add_line(self, shapepath, *plotargs, **plotkwargs):
""" Plot shape as matplotlib line """
dataset = ogr.Open(str(shapepath))
for layer in dataset:
for feature in layer:
x, y = np.array(feature.geometry().GetPoints()).transpose()
self.axes.plot(x, y, *plotargs, **plotkwargs)
def add_patch(self, shapepath, *plotargs, **plotkwargs):
""" Plot shape as matplotlib line """
dataset = ogr.Open(shapepath)
for layer in dataset:
for feature in layer:
xy = np.array(feature.geometry().GetBoundary().GetPoints())
self.axes.add_patch(
patches.Polygon(xy, *plotargs, **plotkwargs)
)
def add_multipolygon(self, shapepath, *plotargs, **plotkwargs):
""" Plot shape as matplotlib line """
dataset = ogr.Open(shapepath)
for layer in dataset:
for feature in layer:
count = feature.geometry().GetGeometryCount()
polygons = [feature.geometry().GetGeometryRef(i)
for i in range(count)]
for polygon in polygons:
xy = np.array(polygon.GetBoundary().GetPoints())
self.axes.add_patch(
patches.Polygon(xy, *plotargs, **plotkwargs)
)
| gpl-3.0 |
DinoCow/airflow | airflow/migrations/versions/8f966b9c467a_set_conn_type_as_non_nullable.py | 10 | 2267 | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Set conn_type as non-nullable
Revision ID: 8f966b9c467a
Revises: 3c20cacc0044
Create Date: 2020-06-08 22:36:34.534121
"""
import sqlalchemy as sa
from alembic import op
from sqlalchemy.ext.declarative import declarative_base
# revision identifiers, used by Alembic.
revision = "8f966b9c467a"
down_revision = "3c20cacc0044"
branch_labels = None
depends_on = None
def upgrade():
"""Apply Set conn_type as non-nullable"""
Base = declarative_base()
class Connection(Base):
__tablename__ = "connection"
id = sa.Column(sa.Integer(), primary_key=True)
conn_id = sa.Column(sa.String(250))
conn_type = sa.Column(sa.String(500))
# Generate run type for existing records
connection = op.get_bind()
sessionmaker = sa.orm.sessionmaker()
session = sessionmaker(bind=connection)
# imap_default was missing it's type, let's fix that up
session.query(Connection).filter_by(conn_id="imap_default", conn_type=None).update(
{Connection.conn_type: "imap"}, synchronize_session=False
)
session.commit()
with op.batch_alter_table("connection", schema=None) as batch_op:
batch_op.alter_column("conn_type", existing_type=sa.VARCHAR(length=500), nullable=False)
def downgrade():
"""Unapply Set conn_type as non-nullable"""
with op.batch_alter_table("connection", schema=None) as batch_op:
batch_op.alter_column("conn_type", existing_type=sa.VARCHAR(length=500), nullable=True)
| apache-2.0 |
cchurch/ansible-modules-core | network/openswitch/ops_config.py | 30 | 10509 | #!/usr/bin/python
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
DOCUMENTATION = """
---
module: ops_config
version_added: "2.1"
author: "Peter Sprygada (@privateip)"
short_description: Manage OpenSwitch configuration using CLI
description:
- OpenSwitch configurations use a simple block indent file syntax
for segmenting configuration into sections. This module provides
an implementation for working with ops configuration sections in
a deterministic way.
extends_documentation_fragment: openswitch
options:
lines:
description:
- The ordered set of commands that should be configured in the
section. The commands must be the exact same commands as found
in the device running-config. Be sure to note the configuration
command syntax as some commands are automatically modified by the
device config parser.
required: false
default: null
parents:
description:
- The ordered set of parents that uniquely identify the section
the commands should be checked against. If the parents argument
is omitted, the commands are checked against the set of top
level or global commands.
required: false
default: null
src:
description:
- The I(src) argument provides a path to the configuration file
to load into the remote system. The path can either be a full
system path to the configuration file if the value starts with /
or relative to the root of the implemented role or playbook.
This argument is mutually exclusive with the I(lines) and
I(parents) arguments.
required: false
default: null
version_added: "2.2"
before:
description:
- The ordered set of commands to push on to the command stack if
a change needs to be made. This allows the playbook designer
the opportunity to perform configuration commands prior to pushing
any changes without affecting how the set of commands are matched
against the system.
required: false
default: null
after:
description:
- The ordered set of commands to append to the end of the command
stack if a change needs to be made. Just like with I(before) this
allows the playbook designer to append a set of commands to be
executed after the command set.
required: false
default: null
match:
description:
- Instructs the module on the way to perform the matching of
the set of commands against the current device config. If
match is set to I(line), commands are matched line by line. If
match is set to I(strict), command lines are matched with respect
to position. If match is set to I(exact), command lines
must be an equal match. Finally, if match is set to I(none), the
module will not attempt to compare the source configuration with
the running configuration on the remote device.
required: false
default: line
choices: ['line', 'strict', 'exact', 'none']
replace:
description:
- Instructs the module on the way to perform the configuration
on the device. If the replace argument is set to I(line) then
the modified lines are pushed to the device in configuration
mode. If the replace argument is set to I(block) then the entire
command block is pushed to the device in configuration mode if any
line is not correct.
required: false
default: line
choices: ['line', 'block']
force:
description:
- The force argument instructs the module to not consider the
current devices running-config. When set to true, this will
cause the module to push the contents of I(src) into the device
without first checking if already configured.
- Note this argument should be considered deprecated. To achieve
the equivalent, set the C(match=none) which is idempotent. This argument
will be removed in a future release.
required: false
default: false
choices: ['yes', 'no']
config:
description:
- The module, by default, will connect to the remote device and
retrieve the current running-config to use as a base for comparing
against the contents of source. There are times when it is not
desirable to have the task get the current running-config for
every task in a playbook. The I(config) argument allows the
implementer to pass in the configuration to use as the base
config for comparison.
required: false
default: null
save:
description:
- The C(save) argument instructs the module to save the running-
config to the startup-config at the conclusion of the module
running. If check mode is specified, this argument is ignored.
required: false
default: no
choices: ['yes', 'no']
version_added: "2.2"
"""
EXAMPLES = """
# Note: examples below use the following provider dict to handle
# transport and authentication to the node.
vars:
cli:
host: "{{ inventory_hostname }}"
username: netop
password: netop
- name: configure hostname over cli
ops_config:
lines:
- "hostname {{ inventory_hostname }}"
provider: "{{ cli }}"
- name: configure vlan 10 over cli
ops_config:
lines:
- no shutdown
parents:
- vlan 10
provider: "{{ cli }}"
- name: load config from file
ops_config:
src: ops01.cfg
backup: yes
provider: "{{ cli }}"
"""
RETURN = """
updates:
description: The set of commands that will be pushed to the remote device
returned: always
type: list
sample: ['...', '...']
backup_path:
description: The full path to the backup file
returned: when backup is yes
type: path
sample: /playbooks/ansible/backup/ops_config.2016-07-16@22:28:34
"""
import re
from ansible.module_utils.basic import get_exception
from ansible.module_utils.openswitch import NetworkModule, NetworkError
from ansible.module_utils.netcfg import NetworkConfig, dumps
def check_args(module, warnings):
if module.params['force']:
warnings.append('The force argument is deprecated, please use '
'match=none instead. This argument will be '
'removed in the future')
def get_config(module, result):
contents = module.params['config']
if not contents:
contents = module.config.get_config()
return NetworkConfig(indent=4, contents=contents)
def get_candidate(module):
candidate = NetworkConfig(indent=4)
if module.params['src']:
candidate.load(module.params['src'])
elif module.params['lines']:
parents = module.params['parents'] or list()
candidate.add(module.params['lines'], parents=parents)
return candidate
def load_config(module, commands, result):
if not module.check_mode:
module.config(commands)
result['changed'] = True
def run(module, result):
match = module.params['match']
replace = module.params['replace']
path = module.params['parents']
candidate = get_candidate(module)
if match != 'none':
config = get_config(module, result)
configobjs = candidate.difference(config, path=path, match=match,
replace=replace)
else:
configobjs = candidate.items
if configobjs:
commands = dumps(configobjs, 'commands').split('\n')
if module.params['lines']:
if module.params['before']:
commands[:0] = module.params['before']
if module.params['after']:
commands.extend(module.params['after'])
result['updates'] = commands
# send the configuration commands to the device and merge
# them with the current running config
if not module.check_mode:
module.config.load_config(commands)
result['changed'] = True
if module.params['save']:
if not module.check_mode:
module.config.save_config()
result['changed'] = True
def main():
argument_spec = dict(
src=dict(type='path'),
lines=dict(aliases=['commands'], type='list'),
parents=dict(type='list'),
before=dict(type='list'),
after=dict(type='list'),
match=dict(default='line', choices=['line', 'strict', 'exact', 'none']),
replace=dict(default='line', choices=['line', 'block']),
# this argument is deprecated in favor of setting match: none
# it will be removed in a future version
force=dict(default=False, type='bool'),
config=dict(),
save=dict(type='bool', default=False),
backup=dict(type='bool', default=False),
# ops_config is only supported over Cli transport so force
# the value of transport to be cli
transport=dict(default='cli', choices=['cli'])
)
mutually_exclusive = [('lines', 'src')]
required_if = [('match', 'strict', ['lines']),
('match', 'exact', ['lines']),
('replace', 'block', ['lines'])]
module = NetworkModule(argument_spec=argument_spec,
connect_on_load=False,
mutually_exclusive=mutually_exclusive,
required_if=required_if,
supports_check_mode=True)
if module.params['force'] is True:
module.params['match'] = 'none'
warnings = list()
check_args(module, warnings)
result = dict(changed=False, warnings=warnings)
if module.params['backup']:
result['__backup__'] = module.config.get_config()
try:
run(module, result)
except NetworkError:
exc = get_exception()
module.fail_json(msg=str(exc))
module.exit_json(**result)
if __name__ == '__main__':
main()
| gpl-3.0 |
cortext/crawtextV2 | ~/venvs/crawler/lib/python2.7/site-packages/nltk/cluster/api.py | 17 | 2056 | # Natural Language Toolkit: Clusterer Interfaces
#
# Copyright (C) 2001-2012 NLTK Project
# Author: Trevor Cohn <[email protected]>
# Porting: Steven Bird <[email protected]>
# URL: <http://www.nltk.org/>
# For license information, see LICENSE.TXT
from nltk.probability import DictionaryProbDist
class ClusterI(object):
"""
Interface covering basic clustering functionality.
"""
def cluster(self, vectors, assign_clusters=False):
"""
Assigns the vectors to clusters, learning the clustering parameters
from the data. Returns a cluster identifier for each vector.
"""
raise NotImplementedError()
def classify(self, token):
"""
Classifies the token into a cluster, setting the token's CLUSTER
parameter to that cluster identifier.
"""
raise NotImplementedError()
def likelihood(self, vector, label):
"""
Returns the likelihood (a float) of the token having the
corresponding cluster.
"""
if self.classify(vector) == label:
return 1.0
else:
return 0.0
def classification_probdist(self, vector):
"""
Classifies the token into a cluster, returning
a probability distribution over the cluster identifiers.
"""
likelihoods = {}
sum = 0.0
for cluster in self.cluster_names():
likelihoods[cluster] = self.likelihood(vector, cluster)
sum += likelihoods[cluster]
for cluster in self.cluster_names():
likelihoods[cluster] /= sum
return DictionaryProbDist(likelihoods)
def num_clusters(self):
"""
Returns the number of clusters.
"""
raise NotImplementedError()
def cluster_names(self):
"""
Returns the names of the clusters.
"""
return range(self.num_clusters())
def cluster_name(self, index):
"""
Returns the names of the cluster at index.
"""
return index
| mit |
mancoast/CPythonPyc_test | fail/340_test_windows_utils.py | 6 | 4535 | """Tests for window_utils"""
import sys
import test.support
import unittest
import unittest.mock
if sys.platform != 'win32':
raise unittest.SkipTest('Windows only')
import _winapi
from asyncio import windows_utils
from asyncio import _overlapped
class WinsocketpairTests(unittest.TestCase):
def test_winsocketpair(self):
ssock, csock = windows_utils.socketpair()
csock.send(b'xxx')
self.assertEqual(b'xxx', ssock.recv(1024))
csock.close()
ssock.close()
@unittest.mock.patch('asyncio.windows_utils.socket')
def test_winsocketpair_exc(self, m_socket):
m_socket.socket.return_value.getsockname.return_value = ('', 12345)
m_socket.socket.return_value.accept.return_value = object(), object()
m_socket.socket.return_value.connect.side_effect = OSError()
self.assertRaises(OSError, windows_utils.socketpair)
class PipeTests(unittest.TestCase):
def test_pipe_overlapped(self):
h1, h2 = windows_utils.pipe(overlapped=(True, True))
try:
ov1 = _overlapped.Overlapped()
self.assertFalse(ov1.pending)
self.assertEqual(ov1.error, 0)
ov1.ReadFile(h1, 100)
self.assertTrue(ov1.pending)
self.assertEqual(ov1.error, _winapi.ERROR_IO_PENDING)
ERROR_IO_INCOMPLETE = 996
try:
ov1.getresult()
except OSError as e:
self.assertEqual(e.winerror, ERROR_IO_INCOMPLETE)
else:
raise RuntimeError('expected ERROR_IO_INCOMPLETE')
ov2 = _overlapped.Overlapped()
self.assertFalse(ov2.pending)
self.assertEqual(ov2.error, 0)
ov2.WriteFile(h2, b"hello")
self.assertIn(ov2.error, {0, _winapi.ERROR_IO_PENDING})
res = _winapi.WaitForMultipleObjects([ov2.event], False, 100)
self.assertEqual(res, _winapi.WAIT_OBJECT_0)
self.assertFalse(ov1.pending)
self.assertEqual(ov1.error, ERROR_IO_INCOMPLETE)
self.assertFalse(ov2.pending)
self.assertIn(ov2.error, {0, _winapi.ERROR_IO_PENDING})
self.assertEqual(ov1.getresult(), b"hello")
finally:
_winapi.CloseHandle(h1)
_winapi.CloseHandle(h2)
def test_pipe_handle(self):
h, _ = windows_utils.pipe(overlapped=(True, True))
_winapi.CloseHandle(_)
p = windows_utils.PipeHandle(h)
self.assertEqual(p.fileno(), h)
self.assertEqual(p.handle, h)
# check garbage collection of p closes handle
del p
test.support.gc_collect()
try:
_winapi.CloseHandle(h)
except OSError as e:
self.assertEqual(e.winerror, 6) # ERROR_INVALID_HANDLE
else:
raise RuntimeError('expected ERROR_INVALID_HANDLE')
class PopenTests(unittest.TestCase):
def test_popen(self):
command = r"""if 1:
import sys
s = sys.stdin.readline()
sys.stdout.write(s.upper())
sys.stderr.write('stderr')
"""
msg = b"blah\n"
p = windows_utils.Popen([sys.executable, '-c', command],
stdin=windows_utils.PIPE,
stdout=windows_utils.PIPE,
stderr=windows_utils.PIPE)
for f in [p.stdin, p.stdout, p.stderr]:
self.assertIsInstance(f, windows_utils.PipeHandle)
ovin = _overlapped.Overlapped()
ovout = _overlapped.Overlapped()
overr = _overlapped.Overlapped()
ovin.WriteFile(p.stdin.handle, msg)
ovout.ReadFile(p.stdout.handle, 100)
overr.ReadFile(p.stderr.handle, 100)
events = [ovin.event, ovout.event, overr.event]
# Super-long timeout for slow buildbots.
res = _winapi.WaitForMultipleObjects(events, True, 10000)
self.assertEqual(res, _winapi.WAIT_OBJECT_0)
self.assertFalse(ovout.pending)
self.assertFalse(overr.pending)
self.assertFalse(ovin.pending)
self.assertEqual(ovin.getresult(), len(msg))
out = ovout.getresult().rstrip()
err = overr.getresult().rstrip()
self.assertGreater(len(out), 0)
self.assertGreater(len(err), 0)
# allow for partial reads...
self.assertTrue(msg.upper().rstrip().startswith(out))
self.assertTrue(b"stderr".startswith(err))
if __name__ == '__main__':
unittest.main()
| gpl-3.0 |
bpshetty/erpnext | erpnext/setup/setup_wizard/default_website.py | 38 | 2533 | # Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
from frappe import _
from frappe.utils import nowdate
class website_maker(object):
def __init__(self, args):
self.args = args
self.company = args.company_name
self.tagline = args.company_tagline
self.user = args.name
self.make_web_page()
self.make_website_settings()
self.make_blog()
def make_web_page(self):
# home page
homepage = frappe.get_doc('Homepage', 'Homepage')
homepage.company = self.company
homepage.tag_line = self.tagline
homepage.setup_items()
homepage.save()
def make_website_settings(self):
# update in home page in settings
website_settings = frappe.get_doc("Website Settings", "Website Settings")
website_settings.home_page = 'home'
website_settings.brand_html = self.company
website_settings.copyright = self.company
website_settings.top_bar_items = []
website_settings.append("top_bar_items", {
"doctype": "Top Bar Item",
"label":"Contact",
"url": "/contact"
})
website_settings.append("top_bar_items", {
"doctype": "Top Bar Item",
"label":"Blog",
"url": "/blog"
})
website_settings.append("top_bar_items", {
"doctype": "Top Bar Item",
"label": _("Products"),
"url": "/products"
})
website_settings.save()
def make_blog(self):
blogger = frappe.new_doc("Blogger")
user = frappe.get_doc("User", self.user)
blogger.user = self.user
blogger.full_name = user.first_name + (" " + user.last_name if user.last_name else "")
blogger.short_name = user.first_name.lower()
blogger.avatar = user.user_image
blogger.insert()
blog_category = frappe.get_doc({
"doctype": "Blog Category",
"category_name": "general",
"published": 1,
"title": _("General")
}).insert()
frappe.get_doc({
"doctype": "Blog Post",
"title": "Welcome",
"published": 1,
"published_on": nowdate(),
"blogger": blogger.name,
"blog_category": blog_category.name,
"blog_intro": "My First Blog",
"content": frappe.get_template("setup/setup_wizard/data/sample_blog_post.html").render(),
}).insert()
def test():
frappe.delete_doc("Web Page", "test-company")
frappe.delete_doc("Blog Post", "welcome")
frappe.delete_doc("Blogger", "administrator")
frappe.delete_doc("Blog Category", "general")
website_maker({'company':"Test Company", 'company_tagline': "Better Tools for Everyone", 'name': "Administrator"})
frappe.db.commit()
| gpl-3.0 |
MMariscal/AGILEIOS2014 | Leaflet/test-infra/s3_cache.py | 1700 | 3523 | #!/usr/bin/env python2.7
from __future__ import absolute_import, unicode_literals, print_function, division
from sys import argv
from os import environ, stat, remove as _delete_file
from os.path import isfile, dirname, basename, abspath
from hashlib import sha256
from subprocess import check_call as run
from boto.s3.connection import S3Connection
from boto.s3.key import Key
from boto.exception import S3ResponseError
NEED_TO_UPLOAD_MARKER = '.need-to-upload'
BYTES_PER_MB = 1024 * 1024
try:
BUCKET_NAME = environ['TWBS_S3_BUCKET']
except KeyError:
raise SystemExit("TWBS_S3_BUCKET environment variable not set!")
def _sha256_of_file(filename):
hasher = sha256()
with open(filename, 'rb') as input_file:
hasher.update(input_file.read())
file_hash = hasher.hexdigest()
print('sha256({}) = {}'.format(filename, file_hash))
return file_hash
def _delete_file_quietly(filename):
try:
_delete_file(filename)
except (OSError, IOError):
pass
def _tarball_size(directory):
kib = stat(_tarball_filename_for(directory)).st_size // BYTES_PER_MB
return "{} MiB".format(kib)
def _tarball_filename_for(directory):
return abspath('./{}.tar.gz'.format(basename(directory)))
def _create_tarball(directory):
print("Creating tarball of {}...".format(directory))
run(['tar', '-czf', _tarball_filename_for(directory), '-C', dirname(directory), basename(directory)])
def _extract_tarball(directory):
print("Extracting tarball of {}...".format(directory))
run(['tar', '-xzf', _tarball_filename_for(directory), '-C', dirname(directory)])
def download(directory):
_delete_file_quietly(NEED_TO_UPLOAD_MARKER)
try:
print("Downloading {} tarball from S3...".format(friendly_name))
key.get_contents_to_filename(_tarball_filename_for(directory))
except S3ResponseError as err:
open(NEED_TO_UPLOAD_MARKER, 'a').close()
print(err)
raise SystemExit("Cached {} download failed!".format(friendly_name))
print("Downloaded {}.".format(_tarball_size(directory)))
_extract_tarball(directory)
print("{} successfully installed from cache.".format(friendly_name))
def upload(directory):
_create_tarball(directory)
print("Uploading {} tarball to S3... ({})".format(friendly_name, _tarball_size(directory)))
key.set_contents_from_filename(_tarball_filename_for(directory))
print("{} cache successfully updated.".format(friendly_name))
_delete_file_quietly(NEED_TO_UPLOAD_MARKER)
if __name__ == '__main__':
# Uses environment variables:
# AWS_ACCESS_KEY_ID -- AWS Access Key ID
# AWS_SECRET_ACCESS_KEY -- AWS Secret Access Key
argv.pop(0)
if len(argv) != 4:
raise SystemExit("USAGE: s3_cache.py <download | upload> <friendly name> <dependencies file> <directory>")
mode, friendly_name, dependencies_file, directory = argv
conn = S3Connection()
bucket = conn.lookup(BUCKET_NAME, validate=False)
if bucket is None:
raise SystemExit("Could not access bucket!")
dependencies_file_hash = _sha256_of_file(dependencies_file)
key = Key(bucket, dependencies_file_hash)
key.storage_class = 'REDUCED_REDUNDANCY'
if mode == 'download':
download(directory)
elif mode == 'upload':
if isfile(NEED_TO_UPLOAD_MARKER): # FIXME
upload(directory)
else:
print("No need to upload anything.")
else:
raise SystemExit("Unrecognized mode {!r}".format(mode))
| gpl-2.0 |
mklew/mmp | pylib/cqlshlib/pylexotron.py | 7 | 16285 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from functools import partial
import re
from .saferscanner import SaferScanner
class LexingError(Exception):
@classmethod
def from_text(cls, rulestr, unmatched, msg='Lexing error'):
bad_char = len(rulestr) - len(unmatched)
linenum = rulestr[:bad_char].count('\n') + 1
charnum = len(rulestr[:bad_char].rsplit('\n', 1)[-1]) + 1
raise cls(linenum, charnum, msg)
def __init__(self, linenum, charnum, msg='Lexing error'):
self.linenum = linenum
self.charnum = charnum
self.msg = msg
self.args = (linenum, charnum, msg)
def __str__(self):
return '%s at line %d, char %d' % (self.msg, self.linenum, self.charnum)
class Hint:
def __init__(self, text):
self.text = text
def __hash__(self):
return hash((id(self.__class__), self.text))
def __eq__(self, other):
return isinstance(other, self.__class__) and other.text == self.text
def __repr__(self):
return '%s(%r)' % (self.__class__, self.text)
def is_hint(x):
return isinstance(x, Hint)
class ParseContext:
"""
These are meant to be immutable, although it would be something of a
pain to enforce that in python.
"""
def __init__(self, ruleset, bindings, matched, remainder, productionname):
self.ruleset = ruleset
self.bindings = bindings
self.matched = matched
self.remainder = remainder
self.productionname = productionname
def get_production_by_name(self, name):
return self.ruleset[name]
def get_completer(self, symname):
return self.ruleset[(self.productionname, symname)]
def get_binding(self, name, default=None):
return self.bindings.get(name, default)
def with_binding(self, name, val):
newbinds = self.bindings.copy()
newbinds[name] = val
return self.__class__(self.ruleset, newbinds, self.matched,
self.remainder, self.productionname)
def with_match(self, num):
return self.__class__(self.ruleset, self.bindings,
self.matched + self.remainder[:num],
self.remainder[num:], self.productionname)
def with_production_named(self, newname):
return self.__class__(self.ruleset, self.bindings, self.matched,
self.remainder, newname)
def __repr__(self):
return '<%s matched=%r remainder=%r prodname=%r>' % (self.__class__.__name__, self.matched, self.remainder,
self.productionname)
class matcher:
def __init__(self, arg):
self.arg = arg
def match(self, ctxt, completions):
raise NotImplementedError
def match_with_results(self, ctxt, completions):
matched_before = len(ctxt.matched)
newctxts = self.match(ctxt, completions)
return [(newctxt, newctxt.matched[matched_before:]) for newctxt in newctxts]
@staticmethod
def try_registered_completion(ctxt, symname, completions):
if ctxt.remainder or completions is None:
return False
try:
completer = ctxt.get_completer(symname)
except KeyError:
return False
try:
new_compls = completer(ctxt)
except Exception:
if ctxt.get_binding('*DEBUG*', False):
import traceback
traceback.print_exc()
return False
completions.update(new_compls)
return True
def __repr__(self):
return '%s(%r)' % (self.__class__.__name__, self.arg)
class choice(matcher):
def match(self, ctxt, completions):
foundctxts = []
for a in self.arg:
subctxts = a.match(ctxt, completions)
foundctxts.extend(subctxts)
return foundctxts
class one_or_none(matcher):
def match(self, ctxt, completions):
return [ctxt] + list(self.arg.match(ctxt, completions))
class repeat(matcher):
def match(self, ctxt, completions):
found = [ctxt]
ctxts = [ctxt]
while True:
new_ctxts = []
for c in ctxts:
new_ctxts.extend(self.arg.match(c, completions))
if not new_ctxts:
return found
found.extend(new_ctxts)
ctxts = new_ctxts
class rule_reference(matcher):
def match(self, ctxt, completions):
prevname = ctxt.productionname
try:
rule = ctxt.get_production_by_name(self.arg)
except KeyError:
raise ValueError("Can't look up production rule named %r" % (self.arg,))
output = rule.match(ctxt.with_production_named(self.arg), completions)
return [c.with_production_named(prevname) for c in output]
class rule_series(matcher):
def match(self, ctxt, completions):
ctxts = [ctxt]
for patpiece in self.arg:
new_ctxts = []
for c in ctxts:
new_ctxts.extend(patpiece.match(c, completions))
if not new_ctxts:
return ()
ctxts = new_ctxts
return ctxts
class named_symbol(matcher):
def __init__(self, name, arg):
matcher.__init__(self, arg)
self.name = name
def match(self, ctxt, completions):
pass_in_compls = completions
if self.try_registered_completion(ctxt, self.name, completions):
# don't collect other completions under this; use a dummy
pass_in_compls = set()
results = self.arg.match_with_results(ctxt, pass_in_compls)
return [c.with_binding(self.name, tokens_to_text(matchtoks)) for (c, matchtoks) in results]
def __repr__(self):
return '%s(%r, %r)' % (self.__class__.__name__, self.name, self.arg)
class named_collector(named_symbol):
def match(self, ctxt, completions):
pass_in_compls = completions
if self.try_registered_completion(ctxt, self.name, completions):
# don't collect other completions under this; use a dummy
pass_in_compls = set()
output = []
for ctxt, matchtoks in self.arg.match_with_results(ctxt, pass_in_compls):
oldval = ctxt.get_binding(self.name, ())
output.append(ctxt.with_binding(self.name, oldval + (tokens_to_text(matchtoks),)))
return output
class terminal_matcher(matcher):
def pattern(self):
raise NotImplementedError
class regex_rule(terminal_matcher):
def __init__(self, pat):
terminal_matcher.__init__(self, pat)
self.regex = pat
self.re = re.compile(pat + '$', re.I | re.S)
def match(self, ctxt, completions):
if ctxt.remainder:
if self.re.match(ctxt.remainder[0][1]):
return [ctxt.with_match(1)]
elif completions is not None:
completions.add(Hint('<%s>' % ctxt.productionname))
return []
def pattern(self):
return self.regex
class text_match(terminal_matcher):
alpha_re = re.compile(r'[a-zA-Z]')
def __init__(self, text):
try:
terminal_matcher.__init__(self, eval(text))
except SyntaxError:
print "bad syntax %r" % (text,)
def match(self, ctxt, completions):
if ctxt.remainder:
if self.arg.lower() == ctxt.remainder[0][1].lower():
return [ctxt.with_match(1)]
elif completions is not None:
completions.add(self.arg)
return []
def pattern(self):
# can't use (?i) here- Scanner component regex flags won't be applied
def ignorecaseify(matchobj):
c = matchobj.group(0)
return '[%s%s]' % (c.upper(), c.lower())
return self.alpha_re.sub(ignorecaseify, re.escape(self.arg))
class case_match(text_match):
def match(self, ctxt, completions):
if ctxt.remainder:
if self.arg == ctxt.remainder[0][1]:
return [ctxt.with_match(1)]
elif completions is not None:
completions.add(self.arg)
return []
def pattern(self):
return re.escape(self.arg)
def tokens_to_text(toks):
return ' '.join([t[1] for t in toks])
class ParsingRuleSet:
RuleSpecScanner = SaferScanner([
(r'::=', lambda s,t: t),
(r'\[[a-z0-9_]+\]=', lambda s,t: ('named_collector', t[1:-2])),
(r'[a-z0-9_]+=', lambda s,t: ('named_symbol', t[:-1])),
(r'/(\[\^?.[^]]*\]|[^/]|\\.)*/', lambda s,t: ('regex', t[1:-1].replace(r'\/', '/'))),
(r'"([^"]|\\.)*"', lambda s,t: ('litstring', t)),
(r'<[^>]*>', lambda s,t: ('reference', t[1:-1])),
(r'\bJUNK\b', lambda s,t: ('junk', t)),
(r'[@()|?*;]', lambda s,t: t),
(r'\s+', None),
(r'#[^\n]*', None),
], re.I | re.S)
def __init__(self):
self.ruleset = {}
self.scanner = None
self.terminals = []
@classmethod
def from_rule_defs(cls, rule_defs):
prs = cls()
prs.ruleset, prs.terminals = cls.parse_rules(rule_defs)
return prs
@classmethod
def parse_rules(cls, rulestr):
tokens, unmatched = cls.RuleSpecScanner.scan(rulestr)
if unmatched:
raise LexingError.from_text(rulestr, unmatched, msg="Syntax rules unparseable")
rules = {}
terminals = []
tokeniter = iter(tokens)
for t in tokeniter:
if isinstance(t, tuple) and t[0] in ('reference', 'junk'):
assign = tokeniter.next()
if assign != '::=':
raise ValueError('Unexpected token %r; expected "::="' % (assign,))
name = t[1]
production = cls.read_rule_tokens_until(';', tokeniter)
rules[name] = production
if isinstance(production, terminal_matcher):
terminals.append((name, production))
else:
raise ValueError('Unexpected token %r; expected name' % (t,))
return rules, terminals
@staticmethod
def mkrule(pieces):
if isinstance(pieces, (tuple, list)):
if len(pieces) == 1:
return pieces[0]
return rule_series(pieces)
return pieces
@classmethod
def read_rule_tokens_until(cls, endtoks, tokeniter):
if isinstance(endtoks, basestring):
endtoks = (endtoks,)
counttarget = None
if isinstance(endtoks, int):
counttarget = endtoks
endtoks = ()
countsofar = 0
myrules = []
mybranches = [myrules]
for t in tokeniter:
countsofar += 1
if t in endtoks:
if len(mybranches) == 1:
return cls.mkrule(mybranches[0])
return choice(map(cls.mkrule, mybranches))
if isinstance(t, tuple):
if t[0] == 'reference':
t = rule_reference(t[1])
elif t[0] == 'litstring':
t = text_match(t[1])
elif t[0] == 'regex':
t = regex_rule(t[1])
elif t[0] == 'named_collector':
t = named_collector(t[1], cls.read_rule_tokens_until(1, tokeniter))
elif t[0] == 'named_symbol':
t = named_symbol(t[1], cls.read_rule_tokens_until(1, tokeniter))
elif t == '(':
t = cls.read_rule_tokens_until(')', tokeniter)
elif t == '?':
t = one_or_none(myrules.pop(-1))
elif t == '*':
t = repeat(myrules.pop(-1))
elif t == '@':
x = tokeniter.next()
if not isinstance(x, tuple) or x[0] != 'litstring':
raise ValueError("Unexpected token %r following '@'" % (x,))
t = case_match(x[1])
elif t == '|':
myrules = []
mybranches.append(myrules)
continue
else:
raise ValueError('Unparseable rule token %r after %r' % (t, myrules[-1]))
myrules.append(t)
if countsofar == counttarget:
if len(mybranches) == 1:
return cls.mkrule(mybranches[0])
return choice(map(cls.mkrule, mybranches))
raise ValueError('Unexpected end of rule tokens')
def append_rules(self, rulestr):
rules, terminals = self.parse_rules(rulestr)
self.ruleset.update(rules)
self.terminals.extend(terminals)
if terminals:
self.scanner = None # recreate it if/when necessary
def register_completer(self, func, rulename, symname):
self.ruleset[(rulename, symname)] = func
def make_lexer(self):
def make_handler(name):
if name == 'JUNK':
return None
return lambda s, t: (name, t)
regexes = [(p.pattern(), make_handler(name)) for (name, p) in self.terminals]
return SaferScanner(regexes, re.I | re.S).scan
def lex(self, text):
if self.scanner is None:
self.scanner = self.make_lexer()
tokens, unmatched = self.scanner(text)
if unmatched:
raise LexingError.from_text(text, unmatched, 'text could not be lexed')
return tokens
def parse(self, startsymbol, tokens, init_bindings=None):
if init_bindings is None:
init_bindings = {}
ctxt = ParseContext(self.ruleset, init_bindings, (), tuple(tokens), startsymbol)
pattern = self.ruleset[startsymbol]
return pattern.match(ctxt, None)
def whole_match(self, startsymbol, tokens):
newctxts = [c for c in self.parse(startsymbol, tokens) if not c.remainder]
if newctxts:
return newctxts[0]
def lex_and_parse(self, text, startsymbol='Start'):
return self.parse(startsymbol, self.lex(text))
def complete(self, startsymbol, tokens, init_bindings=None):
if init_bindings is None:
init_bindings = {}
ctxt = ParseContext(self.ruleset, init_bindings, (), tuple(tokens), startsymbol)
pattern = self.ruleset[startsymbol]
if init_bindings.get('*DEBUG*', False):
completions = Debugotron(stream=sys.stderr)
else:
completions = set()
pattern.match(ctxt, completions)
return completions
import sys, traceback
class Debugotron(set):
depth = 10
def __init__(self, initializer=(), stream=sys.stdout):
set.__init__(self, initializer)
self.stream = stream
def add(self, item):
self._note_addition(item)
set.add(self, item)
def _note_addition(self, foo):
self.stream.write("\nitem %r added by:\n" % (foo,))
frame = sys._getframe().f_back.f_back
for i in range(self.depth):
name = frame.f_code.co_name
filename = frame.f_code.co_filename
lineno = frame.f_lineno
if 'self' in frame.f_locals:
clsobj = frame.f_locals['self']
cls = clsobj.__class__
line = '%s.%s() (%s:%d)' % (clsobj, name, filename, lineno)
else:
line = '%s (%s:%d)' % (name, filename, lineno)
self.stream.write(' %s\n' % (line,))
frame = frame.f_back
def update(self, items):
if items:
self._note_addition(items)
set.update(self, items)
| apache-2.0 |
acsone/acsone-addons | asynchronous_batch_mailings/__openerp__.py | 1 | 1887 | # -*- coding: utf-8 -*-
##############################################################################
#
# This file is part of asynchronous_batch_mailings, an Odoo module.
#
# Copyright (c) 2015 ACSONE SA/NV (<http://acsone.eu>)
#
# asynchronous_batch_mailings is free software:
# you can redistribute it and/or
# modify it under the terms of the GNU Affero General Public License
# as published by the Free Software Foundation, either version 3 of
# the License, or (at your option) any later version.
#
# asynchronous_batch_mailings is distributed in the hope
# that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the
# GNU Affero General Public License
# along with asynchronous_batch_mailings.
# If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Asynchronous Batch Mailings',
'version': '8.0.1.0.0',
'author': 'ACSONE SA/NV',
'maintainer': 'ACSONE SA/NV',
'website': 'http://www.acsone.eu',
'category': 'Marketing',
'depends': [
'mail',
'connector',
],
'description': """
Asynchronous Batch Mailings
===========================
This module allows to send emails by an asynchronous way.
Moreover it provides a way to split huge mailing.
Two parameters are available:
* the mailing size from which the mailing must become asynchronous
* the batch size
""",
'images': [
],
'data': [
'data/ir_config_parameter_data.xml',
],
'qweb': [
],
'demo': [
],
'test': [
],
'license': 'AGPL-3',
'installable': True,
'auto_install': False,
}
| agpl-3.0 |
eddiejessup/ciabatta | ciabatta/network.py | 1 | 1267 | """
Functions relating to networking, as in the internet.
"""
from __future__ import (division, unicode_literals, absolute_import,
print_function)
import smtplib
import socket
def email_with_gmail(username, password,
to, subject, body):
"""Send an email from an gmail account.
Parameters
----------
username, password: string
Gmail username and password: the prefix before @gmail.com
to: string
Email address of the recipient.
subject, body: string
Email subject and content.
"""
headers = '\r\n'.join([
'from: {}'.format(username),
'subject: {}'.format(subject),
'to: {}'.format(to),
'mime-version: 1.0',
'content-type: text/html'])
session = smtplib.SMTP('smtp.gmail.com', 587)
session.ehlo()
session.starttls()
session.login(username, password)
session.sendmail(username, to, headers + '\r\n\r\n' + body)
def get_local_ip():
"""Return the local IP address.
Returns
-------
ip: string
IP address
"""
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
try:
s.connect(("gmail.com", 80))
return s.getsockname()[0]
finally:
s.close()
| bsd-3-clause |
2014cdag1/cdag1 | wsgi/static/Brython2.1.0-20140419-113919/Lib/weakref.py | 769 | 11495 | """Weak reference support for Python.
This module is an implementation of PEP 205:
http://www.python.org/dev/peps/pep-0205/
"""
# Naming convention: Variables named "wr" are weak reference objects;
# they are called this instead of "ref" to avoid name collisions with
# the module-global ref() function imported from _weakref.
from _weakref import (
getweakrefcount,
getweakrefs,
ref,
proxy,
CallableProxyType,
ProxyType,
ReferenceType)
from _weakrefset import WeakSet, _IterationGuard
import collections # Import after _weakref to avoid circular import.
ProxyTypes = (ProxyType, CallableProxyType)
__all__ = ["ref", "proxy", "getweakrefcount", "getweakrefs",
"WeakKeyDictionary", "ReferenceType", "ProxyType",
"CallableProxyType", "ProxyTypes", "WeakValueDictionary",
"WeakSet"]
class WeakValueDictionary(collections.MutableMapping):
"""Mapping class that references values weakly.
Entries in the dictionary will be discarded when no strong
reference to the value exists anymore
"""
# We inherit the constructor without worrying about the input
# dictionary; since it uses our .update() method, we get the right
# checks (if the other dictionary is a WeakValueDictionary,
# objects are unwrapped on the way out, and we always wrap on the
# way in).
def __init__(self, *args, **kw):
def remove(wr, selfref=ref(self)):
self = selfref()
if self is not None:
if self._iterating:
self._pending_removals.append(wr.key)
else:
del self.data[wr.key]
self._remove = remove
# A list of keys to be removed
self._pending_removals = []
self._iterating = set()
self.data = d = {}
self.update(*args, **kw)
def _commit_removals(self):
l = self._pending_removals
d = self.data
# We shouldn't encounter any KeyError, because this method should
# always be called *before* mutating the dict.
while l:
del d[l.pop()]
def __getitem__(self, key):
o = self.data[key]()
if o is None:
raise KeyError(key)
else:
return o
def __delitem__(self, key):
if self._pending_removals:
self._commit_removals()
del self.data[key]
def __len__(self):
return len(self.data) - len(self._pending_removals)
def __contains__(self, key):
try:
o = self.data[key]()
except KeyError:
return False
return o is not None
def __repr__(self):
return "<WeakValueDictionary at %s>" % id(self)
def __setitem__(self, key, value):
if self._pending_removals:
self._commit_removals()
self.data[key] = KeyedRef(value, self._remove, key)
def copy(self):
new = WeakValueDictionary()
for key, wr in self.data.items():
o = wr()
if o is not None:
new[key] = o
return new
__copy__ = copy
def __deepcopy__(self, memo):
from copy import deepcopy
new = self.__class__()
for key, wr in self.data.items():
o = wr()
if o is not None:
new[deepcopy(key, memo)] = o
return new
def get(self, key, default=None):
try:
wr = self.data[key]
except KeyError:
return default
else:
o = wr()
if o is None:
# This should only happen
return default
else:
return o
def items(self):
with _IterationGuard(self):
for k, wr in self.data.items():
v = wr()
if v is not None:
yield k, v
def keys(self):
with _IterationGuard(self):
for k, wr in self.data.items():
if wr() is not None:
yield k
__iter__ = keys
def itervaluerefs(self):
"""Return an iterator that yields the weak references to the values.
The references are not guaranteed to be 'live' at the time
they are used, so the result of calling the references needs
to be checked before being used. This can be used to avoid
creating references that will cause the garbage collector to
keep the values around longer than needed.
"""
with _IterationGuard(self):
for wr in self.data.values():
yield wr
def values(self):
with _IterationGuard(self):
for wr in self.data.values():
obj = wr()
if obj is not None:
yield obj
def popitem(self):
if self._pending_removals:
self._commit_removals()
while True:
key, wr = self.data.popitem()
o = wr()
if o is not None:
return key, o
def pop(self, key, *args):
if self._pending_removals:
self._commit_removals()
try:
o = self.data.pop(key)()
except KeyError:
if args:
return args[0]
raise
if o is None:
raise KeyError(key)
else:
return o
def setdefault(self, key, default=None):
try:
wr = self.data[key]
except KeyError:
if self._pending_removals:
self._commit_removals()
self.data[key] = KeyedRef(default, self._remove, key)
return default
else:
return wr()
def update(self, dict=None, **kwargs):
if self._pending_removals:
self._commit_removals()
d = self.data
if dict is not None:
if not hasattr(dict, "items"):
dict = type({})(dict)
for key, o in dict.items():
d[key] = KeyedRef(o, self._remove, key)
if len(kwargs):
self.update(kwargs)
def valuerefs(self):
"""Return a list of weak references to the values.
The references are not guaranteed to be 'live' at the time
they are used, so the result of calling the references needs
to be checked before being used. This can be used to avoid
creating references that will cause the garbage collector to
keep the values around longer than needed.
"""
return list(self.data.values())
class KeyedRef(ref):
"""Specialized reference that includes a key corresponding to the value.
This is used in the WeakValueDictionary to avoid having to create
a function object for each key stored in the mapping. A shared
callback object can use the 'key' attribute of a KeyedRef instead
of getting a reference to the key from an enclosing scope.
"""
__slots__ = "key",
def __new__(type, ob, callback, key):
self = ref.__new__(type, ob, callback)
self.key = key
return self
def __init__(self, ob, callback, key):
super().__init__(ob, callback)
class WeakKeyDictionary(collections.MutableMapping):
""" Mapping class that references keys weakly.
Entries in the dictionary will be discarded when there is no
longer a strong reference to the key. This can be used to
associate additional data with an object owned by other parts of
an application without adding attributes to those objects. This
can be especially useful with objects that override attribute
accesses.
"""
def __init__(self, dict=None):
self.data = {}
def remove(k, selfref=ref(self)):
self = selfref()
if self is not None:
if self._iterating:
self._pending_removals.append(k)
else:
del self.data[k]
self._remove = remove
# A list of dead weakrefs (keys to be removed)
self._pending_removals = []
self._iterating = set()
if dict is not None:
self.update(dict)
def _commit_removals(self):
# NOTE: We don't need to call this method before mutating the dict,
# because a dead weakref never compares equal to a live weakref,
# even if they happened to refer to equal objects.
# However, it means keys may already have been removed.
l = self._pending_removals
d = self.data
while l:
try:
del d[l.pop()]
except KeyError:
pass
def __delitem__(self, key):
del self.data[ref(key)]
def __getitem__(self, key):
return self.data[ref(key)]
def __len__(self):
return len(self.data) - len(self._pending_removals)
def __repr__(self):
return "<WeakKeyDictionary at %s>" % id(self)
def __setitem__(self, key, value):
self.data[ref(key, self._remove)] = value
def copy(self):
new = WeakKeyDictionary()
for key, value in self.data.items():
o = key()
if o is not None:
new[o] = value
return new
__copy__ = copy
def __deepcopy__(self, memo):
from copy import deepcopy
new = self.__class__()
for key, value in self.data.items():
o = key()
if o is not None:
new[o] = deepcopy(value, memo)
return new
def get(self, key, default=None):
return self.data.get(ref(key),default)
def __contains__(self, key):
try:
wr = ref(key)
except TypeError:
return False
return wr in self.data
def items(self):
with _IterationGuard(self):
for wr, value in self.data.items():
key = wr()
if key is not None:
yield key, value
def keys(self):
with _IterationGuard(self):
for wr in self.data:
obj = wr()
if obj is not None:
yield obj
__iter__ = keys
def values(self):
with _IterationGuard(self):
for wr, value in self.data.items():
if wr() is not None:
yield value
def keyrefs(self):
"""Return a list of weak references to the keys.
The references are not guaranteed to be 'live' at the time
they are used, so the result of calling the references needs
to be checked before being used. This can be used to avoid
creating references that will cause the garbage collector to
keep the keys around longer than needed.
"""
return list(self.data)
def popitem(self):
while True:
key, value = self.data.popitem()
o = key()
if o is not None:
return o, value
def pop(self, key, *args):
return self.data.pop(ref(key), *args)
def setdefault(self, key, default=None):
return self.data.setdefault(ref(key, self._remove),default)
def update(self, dict=None, **kwargs):
d = self.data
if dict is not None:
if not hasattr(dict, "items"):
dict = type({})(dict)
for key, value in dict.items():
d[ref(key, self._remove)] = value
if len(kwargs):
self.update(kwargs)
| gpl-2.0 |
glove747/liberty-neutron | neutron/plugins/ml2/drivers/brocade/db/models.py | 63 | 4420 | # Copyright 2014 Brocade Communications System, Inc.
# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Brocade specific database schema/model."""
import sqlalchemy as sa
from neutron.db import model_base
from neutron.db import models_v2
class ML2_BrocadeNetwork(model_base.BASEV2, models_v2.HasId,
models_v2.HasTenant):
"""Schema for brocade network."""
vlan = sa.Column(sa.String(10))
segment_id = sa.Column(sa.String(36))
network_type = sa.Column(sa.String(10))
class ML2_BrocadePort(model_base.BASEV2, models_v2.HasId,
models_v2.HasTenant):
"""Schema for brocade port."""
network_id = sa.Column(sa.String(36),
sa.ForeignKey("ml2_brocadenetworks.id"),
nullable=False)
admin_state_up = sa.Column(sa.Boolean, nullable=False)
physical_interface = sa.Column(sa.String(36))
vlan_id = sa.Column(sa.String(36))
def create_network(context, net_id, vlan, segment_id, network_type, tenant_id):
"""Create a brocade specific network/port-profiles."""
# only network_type of vlan is supported
session = context.session
with session.begin(subtransactions=True):
net = get_network(context, net_id, None)
if not net:
net = ML2_BrocadeNetwork(id=net_id, vlan=vlan,
segment_id=segment_id,
network_type='vlan',
tenant_id=tenant_id)
session.add(net)
return net
def delete_network(context, net_id):
"""Delete a brocade specific network/port-profiles."""
session = context.session
with session.begin(subtransactions=True):
net = get_network(context, net_id, None)
if net:
session.delete(net)
def get_network(context, net_id, fields=None):
"""Get brocade specific network, with vlan extension."""
session = context.session
return session.query(ML2_BrocadeNetwork).filter_by(id=net_id).first()
def get_networks(context, filters=None, fields=None):
"""Get all brocade specific networks."""
session = context.session
return session.query(ML2_BrocadeNetwork).all()
def create_port(context, port_id, network_id, physical_interface,
vlan_id, tenant_id, admin_state_up):
"""Create a brocade specific port, has policy like vlan."""
session = context.session
with session.begin(subtransactions=True):
port = get_port(context, port_id)
if not port:
port = ML2_BrocadePort(id=port_id,
network_id=network_id,
physical_interface=physical_interface,
vlan_id=vlan_id,
admin_state_up=admin_state_up,
tenant_id=tenant_id)
session.add(port)
return port
def get_port(context, port_id):
"""get a brocade specific port."""
session = context.session
return session.query(ML2_BrocadePort).filter_by(id=port_id).first()
def get_ports(context, network_id=None):
"""get a brocade specific port."""
session = context.session
return session.query(ML2_BrocadePort).filter_by(
network_id=network_id).all()
def delete_port(context, port_id):
"""delete brocade specific port."""
session = context.session
with session.begin(subtransactions=True):
port = get_port(context, port_id)
if port:
session.delete(port)
def update_port_state(context, port_id, admin_state_up):
"""Update port attributes."""
session = context.session
with session.begin(subtransactions=True):
session.query(ML2_BrocadePort).filter_by(
id=port_id).update({'admin_state_up': admin_state_up})
| apache-2.0 |
mancoast/CPythonPyc_test | cpython/241_test_dummy_thread.py | 31 | 7139 | """Generic thread tests.
Meant to be used by dummy_thread and thread. To allow for different modules
to be used, test_main() can be called with the module to use as the thread
implementation as its sole argument.
"""
import dummy_thread as _thread
import time
import Queue
import random
import unittest
from test import test_support
DELAY = 0 # Set > 0 when testing a module other than dummy_thread, such as
# the 'thread' module.
class LockTests(unittest.TestCase):
"""Test lock objects."""
def setUp(self):
# Create a lock
self.lock = _thread.allocate_lock()
def test_initlock(self):
#Make sure locks start locked
self.failUnless(not self.lock.locked(),
"Lock object is not initialized unlocked.")
def test_release(self):
# Test self.lock.release()
self.lock.acquire()
self.lock.release()
self.failUnless(not self.lock.locked(),
"Lock object did not release properly.")
def test_improper_release(self):
#Make sure release of an unlocked thread raises _thread.error
self.failUnlessRaises(_thread.error, self.lock.release)
def test_cond_acquire_success(self):
#Make sure the conditional acquiring of the lock works.
self.failUnless(self.lock.acquire(0),
"Conditional acquiring of the lock failed.")
def test_cond_acquire_fail(self):
#Test acquiring locked lock returns False
self.lock.acquire(0)
self.failUnless(not self.lock.acquire(0),
"Conditional acquiring of a locked lock incorrectly "
"succeeded.")
def test_uncond_acquire_success(self):
#Make sure unconditional acquiring of a lock works.
self.lock.acquire()
self.failUnless(self.lock.locked(),
"Uncondional locking failed.")
def test_uncond_acquire_return_val(self):
#Make sure that an unconditional locking returns True.
self.failUnless(self.lock.acquire(1) is True,
"Unconditional locking did not return True.")
def test_uncond_acquire_blocking(self):
#Make sure that unconditional acquiring of a locked lock blocks.
def delay_unlock(to_unlock, delay):
"""Hold on to lock for a set amount of time before unlocking."""
time.sleep(delay)
to_unlock.release()
self.lock.acquire()
start_time = int(time.time())
_thread.start_new_thread(delay_unlock,(self.lock, DELAY))
if test_support.verbose:
print
print "*** Waiting for thread to release the lock "\
"(approx. %s sec.) ***" % DELAY
self.lock.acquire()
end_time = int(time.time())
if test_support.verbose:
print "done"
self.failUnless((end_time - start_time) >= DELAY,
"Blocking by unconditional acquiring failed.")
class MiscTests(unittest.TestCase):
"""Miscellaneous tests."""
def test_exit(self):
#Make sure _thread.exit() raises SystemExit
self.failUnlessRaises(SystemExit, _thread.exit)
def test_ident(self):
#Test sanity of _thread.get_ident()
self.failUnless(isinstance(_thread.get_ident(), int),
"_thread.get_ident() returned a non-integer")
self.failUnless(_thread.get_ident() != 0,
"_thread.get_ident() returned 0")
def test_LockType(self):
#Make sure _thread.LockType is the same type as _thread.allocate_locke()
self.failUnless(isinstance(_thread.allocate_lock(), _thread.LockType),
"_thread.LockType is not an instance of what is "
"returned by _thread.allocate_lock()")
def test_interrupt_main(self):
#Calling start_new_thread with a function that executes interrupt_main
# should raise KeyboardInterrupt upon completion.
def call_interrupt():
_thread.interrupt_main()
self.failUnlessRaises(KeyboardInterrupt, _thread.start_new_thread,
call_interrupt, tuple())
def test_interrupt_in_main(self):
# Make sure that if interrupt_main is called in main threat that
# KeyboardInterrupt is raised instantly.
self.failUnlessRaises(KeyboardInterrupt, _thread.interrupt_main)
class ThreadTests(unittest.TestCase):
"""Test thread creation."""
def test_arg_passing(self):
#Make sure that parameter passing works.
def arg_tester(queue, arg1=False, arg2=False):
"""Use to test _thread.start_new_thread() passes args properly."""
queue.put((arg1, arg2))
testing_queue = Queue.Queue(1)
_thread.start_new_thread(arg_tester, (testing_queue, True, True))
result = testing_queue.get()
self.failUnless(result[0] and result[1],
"Argument passing for thread creation using tuple failed")
_thread.start_new_thread(arg_tester, tuple(), {'queue':testing_queue,
'arg1':True, 'arg2':True})
result = testing_queue.get()
self.failUnless(result[0] and result[1],
"Argument passing for thread creation using kwargs failed")
_thread.start_new_thread(arg_tester, (testing_queue, True), {'arg2':True})
result = testing_queue.get()
self.failUnless(result[0] and result[1],
"Argument passing for thread creation using both tuple"
" and kwargs failed")
def test_multi_creation(self):
#Make sure multiple threads can be created.
def queue_mark(queue, delay):
"""Wait for ``delay`` seconds and then put something into ``queue``"""
time.sleep(delay)
queue.put(_thread.get_ident())
thread_count = 5
testing_queue = Queue.Queue(thread_count)
if test_support.verbose:
print
print "*** Testing multiple thread creation "\
"(will take approx. %s to %s sec.) ***" % (DELAY, thread_count)
for count in xrange(thread_count):
if DELAY:
local_delay = round(random.random(), 1)
else:
local_delay = 0
_thread.start_new_thread(queue_mark,
(testing_queue, local_delay))
time.sleep(DELAY)
if test_support.verbose:
print 'done'
self.failUnless(testing_queue.qsize() == thread_count,
"Not all %s threads executed properly after %s sec." %
(thread_count, DELAY))
def test_main(imported_module=None):
global _thread, DELAY
if imported_module:
_thread = imported_module
DELAY = 2
if test_support.verbose:
print
print "*** Using %s as _thread module ***" % _thread
test_support.run_unittest(LockTests, MiscTests, ThreadTests)
if __name__ == '__main__':
test_main()
| gpl-3.0 |
louisLouL/pair_trading | capstone_env/lib/python3.6/site-packages/matplotlib/tests/test_tightlayout.py | 2 | 8159 | from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
import warnings
import numpy as np
from matplotlib.testing.decorators import image_comparison
import matplotlib.pyplot as plt
from matplotlib.offsetbox import AnchoredOffsetbox, DrawingArea
from matplotlib.patches import Rectangle
def example_plot(ax, fontsize=12):
ax.plot([1, 2])
ax.locator_params(nbins=3)
ax.set_xlabel('x-label', fontsize=fontsize)
ax.set_ylabel('y-label', fontsize=fontsize)
ax.set_title('Title', fontsize=fontsize)
@image_comparison(baseline_images=['tight_layout1'])
def test_tight_layout1():
'Test tight_layout for a single subplot'
fig = plt.figure()
ax = fig.add_subplot(111)
example_plot(ax, fontsize=24)
plt.tight_layout()
@image_comparison(baseline_images=['tight_layout2'])
def test_tight_layout2():
'Test tight_layout for mutiple subplots'
fig, ((ax1, ax2), (ax3, ax4)) = plt.subplots(nrows=2, ncols=2)
example_plot(ax1)
example_plot(ax2)
example_plot(ax3)
example_plot(ax4)
plt.tight_layout()
@image_comparison(baseline_images=['tight_layout3'])
def test_tight_layout3():
'Test tight_layout for mutiple subplots'
fig = plt.figure()
ax1 = plt.subplot(221)
ax2 = plt.subplot(223)
ax3 = plt.subplot(122)
example_plot(ax1)
example_plot(ax2)
example_plot(ax3)
plt.tight_layout()
@image_comparison(baseline_images=['tight_layout4'],
freetype_version=('2.5.5', '2.6.1'))
def test_tight_layout4():
'Test tight_layout for subplot2grid'
fig = plt.figure()
ax1 = plt.subplot2grid((3, 3), (0, 0))
ax2 = plt.subplot2grid((3, 3), (0, 1), colspan=2)
ax3 = plt.subplot2grid((3, 3), (1, 0), colspan=2, rowspan=2)
ax4 = plt.subplot2grid((3, 3), (1, 2), rowspan=2)
example_plot(ax1)
example_plot(ax2)
example_plot(ax3)
example_plot(ax4)
plt.tight_layout()
@image_comparison(baseline_images=['tight_layout5'])
def test_tight_layout5():
'Test tight_layout for image'
fig = plt.figure()
ax = plt.subplot(111)
arr = np.arange(100).reshape((10, 10))
ax.imshow(arr, interpolation="none")
plt.tight_layout()
@image_comparison(baseline_images=['tight_layout6'])
def test_tight_layout6():
'Test tight_layout for gridspec'
# This raises warnings since tight layout cannot
# do this fully automatically. But the test is
# correct since the layout is manually edited
with warnings.catch_warnings():
warnings.simplefilter("ignore", UserWarning)
fig = plt.figure()
import matplotlib.gridspec as gridspec
gs1 = gridspec.GridSpec(2, 1)
ax1 = fig.add_subplot(gs1[0])
ax2 = fig.add_subplot(gs1[1])
example_plot(ax1)
example_plot(ax2)
gs1.tight_layout(fig, rect=[0, 0, 0.5, 1])
gs2 = gridspec.GridSpec(3, 1)
for ss in gs2:
ax = fig.add_subplot(ss)
example_plot(ax)
ax.set_title("")
ax.set_xlabel("")
ax.set_xlabel("x-label", fontsize=12)
gs2.tight_layout(fig, rect=[0.5, 0, 1, 1], h_pad=0.45)
top = min(gs1.top, gs2.top)
bottom = max(gs1.bottom, gs2.bottom)
gs1.tight_layout(fig, rect=[None, 0 + (bottom-gs1.bottom),
0.5, 1 - (gs1.top-top)])
gs2.tight_layout(fig, rect=[0.5, 0 + (bottom-gs2.bottom),
None, 1 - (gs2.top-top)],
h_pad=0.45)
@image_comparison(baseline_images=['tight_layout7'])
def test_tight_layout7():
# tight layout with left and right titles
fig = plt.figure()
fontsize = 24
ax = fig.add_subplot(111)
ax.plot([1, 2])
ax.locator_params(nbins=3)
ax.set_xlabel('x-label', fontsize=fontsize)
ax.set_ylabel('y-label', fontsize=fontsize)
ax.set_title('Left Title', loc='left', fontsize=fontsize)
ax.set_title('Right Title', loc='right', fontsize=fontsize)
plt.tight_layout()
@image_comparison(baseline_images=['tight_layout8'])
def test_tight_layout8():
'Test automatic use of tight_layout'
fig = plt.figure()
fig.set_tight_layout({'pad': .1})
ax = fig.add_subplot(111)
example_plot(ax, fontsize=24)
@image_comparison(baseline_images=['tight_layout9'])
def test_tight_layout9():
# Test tight_layout for non-visible suplots
# GH 8244
f, axarr = plt.subplots(2, 2)
axarr[1][1].set_visible(False)
plt.tight_layout()
# The following test is misleading when the text is removed.
@image_comparison(baseline_images=['outward_ticks'], remove_text=False)
def test_outward_ticks():
'Test automatic use of tight_layout'
fig = plt.figure()
ax = fig.add_subplot(221)
ax.xaxis.set_tick_params(tickdir='out', length=16, width=3)
ax.yaxis.set_tick_params(tickdir='out', length=16, width=3)
ax.xaxis.set_tick_params(
tickdir='out', length=32, width=3, tick1On=True, which='minor')
ax.yaxis.set_tick_params(
tickdir='out', length=32, width=3, tick1On=True, which='minor')
# The following minor ticks are not labelled, and they
# are drawn over the major ticks and labels--ugly!
ax.xaxis.set_ticks([0], minor=True)
ax.yaxis.set_ticks([0], minor=True)
ax = fig.add_subplot(222)
ax.xaxis.set_tick_params(tickdir='in', length=32, width=3)
ax.yaxis.set_tick_params(tickdir='in', length=32, width=3)
ax = fig.add_subplot(223)
ax.xaxis.set_tick_params(tickdir='inout', length=32, width=3)
ax.yaxis.set_tick_params(tickdir='inout', length=32, width=3)
ax = fig.add_subplot(224)
ax.xaxis.set_tick_params(tickdir='out', length=32, width=3)
ax.yaxis.set_tick_params(tickdir='out', length=32, width=3)
plt.tight_layout()
def add_offsetboxes(ax, size=10, margin=.1, color='black'):
"""
Surround ax with OffsetBoxes
"""
m, mp = margin, 1+margin
anchor_points = [(-m, -m), (-m, .5), (-m, mp),
(mp, .5), (.5, mp), (mp, mp),
(.5, -m), (mp, -m), (.5, -m)]
for point in anchor_points:
da = DrawingArea(size, size)
background = Rectangle((0, 0), width=size,
height=size,
facecolor=color,
edgecolor='None',
linewidth=0,
antialiased=False)
da.add_artist(background)
anchored_box = AnchoredOffsetbox(
loc=10,
child=da,
pad=0.,
frameon=False,
bbox_to_anchor=point,
bbox_transform=ax.transAxes,
borderpad=0.)
ax.add_artist(anchored_box)
return anchored_box
@image_comparison(baseline_images=['tight_layout_offsetboxes1',
'tight_layout_offsetboxes2'])
def test_tight_layout_offsetboxes():
# 1.
# - Create 4 subplots
# - Plot a diagonal line on them
# - Surround each plot with 7 boxes
# - Use tight_layout
# - See that the squares are included in the tight_layout
# and that the squares in the middle do not overlap
#
# 2.
# - Make the squares around the right side axes invisible
# - See that the invisible squares do not affect the
# tight_layout
rows = cols = 2
colors = ['red', 'blue', 'green', 'yellow']
x = y = [0, 1]
def _subplots():
_, axs = plt.subplots(rows, cols)
axs = axs.flat
for ax, color in zip(axs, colors):
ax.plot(x, y, color=color)
add_offsetboxes(ax, 20, color=color)
return axs
# 1.
axs = _subplots()
plt.tight_layout()
# 2.
axs = _subplots()
for ax in (axs[cols-1::rows]):
for child in ax.get_children():
if isinstance(child, AnchoredOffsetbox):
child.set_visible(False)
plt.tight_layout()
def test_empty_layout():
"""Tests that tight layout doesn't cause an error when there are
no axes.
"""
fig = plt.gcf()
fig.tight_layout()
| mit |
xiaogaozi/princess-alist | home/xiaogaozi/Archives/6th_hack_game/big_brother/solution/generator.py | 1 | 1308 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Random IP Address Generator <https://github.com/xiaogaozi/princess-alist>
# Copyright (C) 2010 xiaogaozi <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from random import choice
from random import randrange
def main():
file = open("big_brother_s_list", 'w')
for i in range(0, 1000):
l = range(1, 223)
l.remove(10)
l.remove(127)
l.remove(128)
l.remove(172)
l.remove(192)
ip = str(choice(l)) + "." + str(randrange(254)) + "." + str(randrange(254)) + "." + str(randrange(1, 254)) + "\n"
file.write(ip)
file.close()
if __name__ == "__main__":
main()
| gpl-3.0 |
aduric/crossfit | nonrel/tests/regressiontests/views/tests/defaults.py | 50 | 3720 | from os import path
from django.conf import settings
from django.test import TestCase
from django.contrib.contenttypes.models import ContentType
from regressiontests.views.models import Author, Article, UrlArticle
class DefaultsTests(TestCase):
"""Test django views in django/views/defaults.py"""
fixtures = ['testdata.json']
non_existing_urls = ['/views/non_existing_url/', # this is in urls.py
'/views/other_non_existing_url/'] # this NOT in urls.py
def test_shortcut_with_absolute_url(self):
"Can view a shortcut for an Author object that has a get_absolute_url method"
for obj in Author.objects.all():
short_url = '/views/shortcut/%s/%s/' % (ContentType.objects.get_for_model(Author).id, obj.pk)
response = self.client.get(short_url)
self.assertRedirects(response, 'http://testserver%s' % obj.get_absolute_url(),
status_code=302, target_status_code=404)
def test_shortcut_no_absolute_url(self):
"Shortcuts for an object that has no get_absolute_url method raises 404"
for obj in Article.objects.all():
short_url = '/views/shortcut/%s/%s/' % (ContentType.objects.get_for_model(Article).id, obj.pk)
response = self.client.get(short_url)
self.assertEqual(response.status_code, 404)
def test_wrong_type_pk(self):
short_url = '/views/shortcut/%s/%s/' % (ContentType.objects.get_for_model(Author).id, 'nobody/expects')
response = self.client.get(short_url)
self.assertEqual(response.status_code, 404)
def test_shortcut_bad_pk(self):
short_url = '/views/shortcut/%s/%s/' % (ContentType.objects.get_for_model(Author).id, '42424242')
response = self.client.get(short_url)
self.assertEqual(response.status_code, 404)
def test_nonint_content_type(self):
an_author = Author.objects.all()[0]
short_url = '/views/shortcut/%s/%s/' % ('spam', an_author.pk)
response = self.client.get(short_url)
self.assertEqual(response.status_code, 404)
def test_bad_content_type(self):
an_author = Author.objects.all()[0]
short_url = '/views/shortcut/%s/%s/' % (42424242, an_author.pk)
response = self.client.get(short_url)
self.assertEqual(response.status_code, 404)
def test_page_not_found(self):
"A 404 status is returned by the page_not_found view"
for url in self.non_existing_urls:
response = self.client.get(url)
self.assertEqual(response.status_code, 404)
def test_csrf_token_in_404(self):
"""
The 404 page should have the csrf_token available in the context
"""
# See ticket #14565
for url in self.non_existing_urls:
response = self.client.get(url)
csrf_token = response.context['csrf_token']
self.assertNotEqual(str(csrf_token), 'NOTPROVIDED')
self.assertNotEqual(str(csrf_token), '')
def test_server_error(self):
"The server_error view raises a 500 status"
response = self.client.get('/views/server_error/')
self.assertEqual(response.status_code, 500)
def test_get_absolute_url_attributes(self):
"A model can set attributes on the get_absolute_url method"
self.assertTrue(getattr(UrlArticle.get_absolute_url, 'purge', False),
'The attributes of the original get_absolute_url must be added.')
article = UrlArticle.objects.get(pk=1)
self.assertTrue(getattr(article.get_absolute_url, 'purge', False),
'The attributes of the original get_absolute_url must be added.')
| bsd-3-clause |
pothosware/gnuradio | gr-vocoder/python/vocoder/qa_g721_vocoder.py | 57 | 1573 | #!/usr/bin/env python
#
# Copyright 2011,2013 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
from gnuradio import gr, gr_unittest, vocoder, blocks
class test_g721_vocoder (gr_unittest.TestCase):
def setUp (self):
self.tb = gr.top_block()
def tearDown (self):
self.tb = None
def test001_module_load (self):
data = (8,24,36,52,56,64,76,88,104,124,132,148,172,
196,220,244,280,320,372,416,468,524,580,648)
src = blocks.vector_source_s(data)
enc = vocoder.g721_encode_sb()
dec = vocoder.g721_decode_bs()
snk = blocks.vector_sink_s()
self.tb.connect(src, enc, dec, snk)
self.tb.run()
actual_result = snk.data()
self.assertEqual(data, actual_result)
if __name__ == '__main__':
gr_unittest.run(test_g721_vocoder, "test_g721_vocoder.xml")
| gpl-3.0 |
cxmo/project-beta | code/utils/plotting_fmri.py | 4 | 1127 |
from __future__ import division, print_function, absolute_import
import matplotlib.pyplot as plt
""" In this module, we have provided plotting functions to help visulize fMRI data."""
def plot_grayscale(data):
plt.imshow(data, cmap='gray')
def plot_sdevs(sdevs, outliers_sdevs, outlier_interval):
plt.plot(sdevs, label = 'volume SD values')
put_o = sdevs[outliers_sdevs]
plt.plot(outliers_sdevs, put_o, 'o', label = 'outlier points')
plt.axhline(y=outlier_interval[0], linestyle='dashed')
plt.axhline(y=outlier_interval[1], linestyle='dashed')
plt.xlabel('Index')
plt.ylabel('SD')
plt.title('Volume Standard Deviations')
plt.legend(loc = 'lower right')
def plot_rms(rms, outliers_rms, outlier_interval):
plt.plot(rms, label = 'RMS values')
put_rms_o = rms[outliers_rms]
plt.plot(outliers_rms, put_rms_o, 'o', label = 'outlier points')
plt.axhline(y=outlier_interval[0], linestyle='dashed')
plt.axhline(y=outlier_interval[1], linestyle='dashed')
plt.xlabel('Index')
plt.ylabel('RMS')
plt.title('RMS Differences')
plt.legend(loc='upper right')
| bsd-3-clause |
vladikoff/fxa-mochitest | tests/mozbase/mozversion/tests/test_sources.py | 4 | 2794 | #!/usr/bin/env python
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.
import os
import tempfile
import unittest
import mozfile
from mozversion import errors, get_version
class SourcesTest(unittest.TestCase):
"""test getting version information from a sources xml"""
application_ini = """[App]\nName = B2G\n"""
platform_ini = """[Build]
BuildID = PlatformBuildID
SourceStamp = PlatformSourceStamp
SourceRepository = PlatformSourceRepo
"""
sources_xml = """<?xml version="1.0" ?><manifest>
<project path="build" revision="build_revision" />
<project path="gaia" revision="gaia_revision" />
<project path="gecko" revision="gecko_revision" />
</manifest>
"""
def setUp(self):
self.cwd = os.getcwd()
self.tempdir = tempfile.mkdtemp()
self.binary = os.path.join(self.tempdir, 'binary')
with open(self.binary, 'w') as f:
f.write('foobar')
def tearDown(self):
os.chdir(self.cwd)
mozfile.remove(self.tempdir)
def _write_conf_files(self, sources=True):
with open(os.path.join(self.tempdir, 'application.ini'), 'w') as f:
f.writelines(self.application_ini)
with open(os.path.join(self.tempdir, 'platform.ini'), 'w') as f:
f.writelines(self.platform_ini)
if sources:
with open(os.path.join(self.tempdir, 'sources.xml'), 'w') as f:
f.writelines(self.sources_xml)
def test_sources(self):
self._write_conf_files()
os.chdir(self.tempdir)
self._check_version(get_version(sources=os.path.join(self.tempdir,
'sources.xml')))
def test_sources_in_current_directory(self):
self._write_conf_files()
os.chdir(self.tempdir)
self._check_version(get_version())
def test_invalid_sources_path(self):
"""An invalid source path should cause an exception"""
self.assertRaises(errors.AppNotFoundError, get_version,
self.binary, os.path.join(self.tempdir, 'invalid'))
def test_without_sources_file(self):
"""With a missing sources file no exception should be thrown"""
self._write_conf_files(sources=False)
get_version(self.binary)
def _check_version(self, version):
self.assertEqual(version.get('build_changeset'), 'build_revision')
self.assertEqual(version.get('gaia_changeset'), 'gaia_revision')
self.assertEqual(version.get('gecko_changeset'), 'gecko_revision')
self.assertIsNone(version.get('invalid_key'))
if __name__ == '__main__':
unittest.main()
| mpl-2.0 |
psynteract/psynteract-os | plugins/psynteract_wait/psynteract_wait.py | 1 | 2478 | #-*- coding:utf-8 -*-
from libopensesame.py3compat import *
from libopensesame.exceptions import osexception
from libopensesame import item
from libqtopensesame.items.qtautoplugin import qtautoplugin
from libqtopensesame.misc import _
class psynteract_wait(item.item):
"""Wait for other clients."""
initial_view = 'controls'
description = 'Wait for other clients.'
def reset(self):
"""
desc:
Resets plug-in to initial values.
"""
self.var.display_message = 'no'
self.var.waiting_message = ''
self.var.additional_wait = 1000
def run(self):
"""Runs the item."""
current_wait = self.name
if self.name in self.experiment._connection.doc['data']['os_status']:
self.experiment._connection.doc['data']['os_status'][current_wait]+=1
else:
self.experiment._connection.doc['data']['os_status'][current_wait]=1
current_status = self.experiment._connection.doc['data']['os_status'][current_wait]
if self.var.display_message == 'yes':
from openexp.canvas import canvas
message_canvas= canvas(self.experiment)
message_canvas.text(self.var.waiting_message)
message_canvas.show()
if self.experiment.var.offline == 'no':
self.experiment._connection.push()
def check_waits(doc):
check = False
if current_wait in doc['data']['os_status']:
check = doc['data']['os_status'][current_wait]>=current_status
return check
self.experiment._connection.wait(check_waits)
if self.experiment.var.offline == 'no':
self.experiment._connection.push()
self.experiment.clock.sleep(self.var.additional_wait)
def prepare(self):
"""Prepares the item."""
item.item.prepare(self)
class qtpsynteract_wait(psynteract_wait, qtautoplugin):
def __init__(self, name, experiment, script=None):
psynteract_wait.__init__(self, name, experiment, script)
qtautoplugin.__init__(self, __file__)
self.custom_interactions()
def apply_edit_changes(self):
"""Applies the controls."""
if not qtautoplugin.apply_edit_changes(self) or self.lock:
return False
self.custom_interactions()
return True
def edit_widget(self):
"""Refreshes the controls."""
if self.lock:
return
self.lock = True
w = qtautoplugin.edit_widget(self)
self.custom_interactions()
self.lock = False
return w
def custom_interactions(self):
"""Activates the relevant controls and adjusts tooltips."""
self.waiting_message_widget.setEnabled(self.var.display_message=='yes')
| gpl-3.0 |
rbramwell/pulp | server/pulp/server/async/tasks.py | 2 | 25866 | from datetime import datetime
from gettext import gettext as _
import logging
import signal
import time
import traceback
import uuid
from celery import task, Task as CeleryTask, current_task
from celery.app import control, defaults
from celery.result import AsyncResult
from mongoengine.queryset import DoesNotExist
from pulp.common.constants import SCHEDULER_WORKER_NAME
from pulp.common import constants, dateutils, tags
from pulp.server.async.celery_instance import celery, RESOURCE_MANAGER_QUEUE, \
DEDICATED_QUEUE_EXCHANGE
from pulp.server.exceptions import PulpException, MissingResource, \
PulpCodedException
from pulp.server.db.model import Worker, ReservedResource, TaskStatus
from pulp.server.exceptions import NoWorkers
from pulp.server.managers.repo import _common as common_utils
from pulp.server.managers import factory as managers
from pulp.server.managers.schedule import utils
controller = control.Control(app=celery)
_logger = logging.getLogger(__name__)
@task(acks_late=True)
def _queue_reserved_task(name, task_id, resource_id, inner_args, inner_kwargs):
"""
A task that encapsulates another task to be dispatched later. This task being encapsulated is
called the "inner" task, and a task name, UUID, and accepts a list of positional args
and keyword args for the inner task. These arguments are named inner_args and inner_kwargs.
inner_args is a list, and inner_kwargs is a dictionary passed to the inner task as positional
and keyword arguments using the * and ** operators.
The inner task is dispatched into a dedicated queue for a worker that is decided at dispatch
time. The logic deciding which queue receives a task is controlled through the
find_worker function.
:param name: The name of the task to be called
:type name: basestring
:param inner_task_id: The UUID to be set on the task being called. By providing
the UUID, the caller can have an asynchronous reference to the inner task
that will be dispatched.
:type inner_task_id: basestring
:param resource_id: The name of the resource you wish to reserve for your task. The system
will ensure that no other tasks that want that same reservation will run
concurrently with yours.
:type resource_id: basestring
:return: None
"""
while True:
try:
worker = get_worker_for_reservation(resource_id)
except NoWorkers:
pass
else:
break
try:
worker = _get_unreserved_worker()
except NoWorkers:
pass
else:
break
# No worker is ready for this work, so we need to wait
time.sleep(0.25)
ReservedResource(task_id=task_id, worker_name=worker['name'], resource_id=resource_id).save()
inner_kwargs['routing_key'] = worker.name
inner_kwargs['exchange'] = DEDICATED_QUEUE_EXCHANGE
inner_kwargs['task_id'] = task_id
try:
celery.tasks[name].apply_async(*inner_args, **inner_kwargs)
finally:
_release_resource.apply_async((task_id, ), routing_key=worker.name,
exchange=DEDICATED_QUEUE_EXCHANGE)
def _is_worker(worker_name):
"""
Strip out workers that should never be assigned work. We need to check
via "startswith()" since we do not know which host the worker is running on.
"""
if worker_name.startswith(SCHEDULER_WORKER_NAME) or \
worker_name.startswith(RESOURCE_MANAGER_QUEUE):
return False
return True
def get_worker_for_reservation(resource_id):
"""
Return the Worker instance that is associated with a reservation of type resource_id. If
there are no workers with that reservation_id type a pulp.server.exceptions.NoWorkers
exception is raised.
:param resource_id: The name of the resource you wish to reserve for your task.
:raises NoWorkers: If all workers have reserved_resource entries associated with them.
:type resource_id: basestring
:returns: The Worker instance that has a reserved_resource entry of type
`resource_id` associated with it.
:rtype: pulp.server.db.model.resources.Worker
"""
reservation = ReservedResource.objects(resource_id=resource_id).first()
if reservation:
return Worker.objects(name=reservation['worker_name']).first()
else:
raise NoWorkers()
def _get_unreserved_worker():
"""
Return the Worker instance that has no reserved_resource entries
associated with it. If there are no unreserved workers a
pulp.server.exceptions.NoWorkers exception is raised.
:raises NoWorkers: If all workers have reserved_resource entries associated with them.
:returns: The Worker instance that has no reserved_resource
entries associated with it.
:rtype: pulp.server.db.model.resources.Worker
"""
# Build a mapping of queue names to Worker objects
workers_dict = dict((worker['name'], worker) for worker in Worker.objects())
worker_names = workers_dict.keys()
reserved_names = [r['worker_name'] for r in ReservedResource.objects.all()]
# Find an unreserved worker using set differences of the names, and filter
# out workers that should not be assigned work.
# NB: this is a little messy but set comprehensions are in python 2.7+
unreserved_workers = set(filter(_is_worker, worker_names)) - set(reserved_names)
try:
return workers_dict[unreserved_workers.pop()]
except KeyError:
# All workers are reserved
raise NoWorkers()
def _delete_worker(name, normal_shutdown=False):
"""
Delete the Worker with _id name from the database, cancel any associated tasks and reservations
If the worker shutdown normally, no message is logged, otherwise an error level message is
logged. Default is to assume the worker did not shut down normally.
Any resource reservations associated with this worker are cleaned up by this function.
Any tasks associated with this worker are explicitly canceled.
:param name: The name of the worker you wish to delete.
:type name: basestring
:param normal_shutdown: True if the worker shutdown normally, False otherwise. Defaults to
False.
:type normal_shutdown: bool
"""
if normal_shutdown is False:
msg = _('The worker named %(name)s is missing. Canceling the tasks in its queue.')
msg = msg % {'name': name}
_logger.error(msg)
# Delete the worker document
Worker.objects(name=name).delete()
# Delete all reserved_resource documents for the worker
ReservedResource.objects(worker_name=name).delete()
# Cancel all of the tasks that were assigned to this worker's queue
for task_status in TaskStatus.objects(worker_name=name,
state__in=constants.CALL_INCOMPLETE_STATES):
cancel(task_status['task_id'])
# Delete working directory
common_utils.delete_worker_working_directory(name)
@task
def _release_resource(task_id):
"""
Do not queue this task yourself. It will be used automatically when your task is dispatched by
the _queue_reserved_task task.
When a resource-reserving task is complete, this method releases the resource by removing the
ReservedResource object by UUID.
:param task_id: The UUID of the task that requested the reservation
:type task_id: basestring
"""
ReservedResource.objects(task_id=task_id).delete()
class TaskResult(object):
"""
The TaskResult object is used for returning errors and spawned tasks that do not affect the
primary status of the task.
Errors that don't affect the current task status might be related to secondary actions
where the primary action of the async-task was successful
Spawned tasks are items such as the individual tasks for updating the bindings on
each consumer when a repo distributor is updated.
"""
def __init__(self, result=None, error=None, spawned_tasks=None):
"""
:param result: The return value from the task
:type result: dict
:param error: The PulpException for the error & sub-errors that occured
:type error: pulp.server.exception.PulpException
:param spawned_tasks: A list of task status objects for tasks that were created by this
task and are tracked through the pulp database.
Alternately an AsyncResult, or the task_id of the task created.
:type spawned_tasks: list of TaskStatus, AsyncResult, or str objects
"""
self.return_value = result
self.error = error
self.spawned_tasks = []
if spawned_tasks:
for spawned_task in spawned_tasks:
if isinstance(spawned_task, dict):
self.spawned_tasks.append({'task_id': spawned_task.get('task_id')})
elif isinstance(spawned_task, AsyncResult):
self.spawned_tasks.append({'task_id': spawned_task.id})
elif isinstance(spawned_task, TaskStatus):
self.spawned_tasks.append({'task_id': spawned_task.task_id})
else: # This should be a string
self.spawned_tasks.append({'task_id': spawned_task})
@classmethod
def from_async_result(cls, async_result):
"""
Create a TaskResult object from a celery async_result type
:param async_result: The result object to use as a base
:type async_result: celery.result.AsyncResult
:returns: a TaskResult containing the async task in it's spawned_tasks list
:rtype: TaskResult
"""
return cls(spawned_tasks=[{'task_id': async_result.id}])
@classmethod
def from_task_status_dict(cls, task_status):
"""
Create a TaskResult object from a celery async_result type
:param task_status: The dictionary representation of a TaskStatus
:type task_status: dict
:returns: a TaskResult containing the task in it's spawned_tasks lsit
:rtype: TaskResult
"""
return cls(spawned_tasks=[{'task_id': task_status.task_id}])
def serialize(self):
"""
Serialize the output to a dictionary
"""
serialized_error = self.error
if serialized_error:
serialized_error = self.error.to_dict()
data = {
'result': self.return_value,
'error': serialized_error,
'spawned_tasks': self.spawned_tasks}
return data
class ReservedTaskMixin(object):
def apply_async_with_reservation(self, resource_type, resource_id, *args, **kwargs):
"""
This method allows the caller to schedule the ReservedTask to run asynchronously just like
Celery's apply_async(), while also making the named resource. No two tasks that claim the
same resource reservation can execute concurrently. It accepts type and id of a resource
and combines them to form a resource id.
This does not dispatch the task directly, but instead promises to dispatch it later by
encapsulating the desired task through a call to a _queue_reserved_task task. See the
docblock on _queue_reserved_task for more information on this.
This method creates a TaskStatus as a placeholder for later updates. Pulp expects to poll
on a task just after calling this method, so a TaskStatus entry needs to exist for it
before it returns.
For a list of parameters accepted by the *args and **kwargs parameters, please see the
docblock for the apply_async() method.
:param resource_type: A string that identifies type of a resource
:type resource_type: basestring
:param resource_id: A string that identifies some named resource, guaranteeing that only
one task reserving this same string can happen at a time.
:type resource_id: basestring
:param tags: A list of tags (strings) to place onto the task, used for searching
for tasks by tag
:type tags: list
:return: An AsyncResult instance as returned by Celery's apply_async
:rtype: celery.result.AsyncResult
"""
# Form a resource_id for reservation by combining given resource type and id. This way,
# two different resources having the same id will not block each other.
resource_id = ":".join((resource_type, resource_id))
inner_task_id = str(uuid.uuid4())
task_name = self.name
tag_list = kwargs.get('tags', [])
# Create a new task status with the task id and tags.
task_status = TaskStatus(task_id=inner_task_id, task_type=task_name,
state=constants.CALL_WAITING_STATE, tags=tag_list)
# To avoid the race condition where __call__ method below is called before
# this change is propagated to all db nodes, using an 'upsert' here and setting
# the task state to 'waiting' only on an insert.
task_status.save_with_set_on_insert(fields_to_set_on_insert=['state', 'start_time'])
_queue_reserved_task.apply_async(args=[task_name, inner_task_id, resource_id, args, kwargs],
queue=RESOURCE_MANAGER_QUEUE)
return AsyncResult(inner_task_id)
class Task(CeleryTask, ReservedTaskMixin):
"""
This is a custom Pulp subclass of the Celery Task object. It allows us to inject some custom
behavior into each Pulp task, including management of resource locking.
"""
# this tells celery to not automatically log tracebacks for these exceptions
throws = (PulpCodedException,)
def apply_async(self, *args, **kwargs):
"""
A wrapper around the Celery apply_async method. It allows us to accept a few more
parameters than Celery does for our own purposes, listed below. It also allows us
to create and update task status which can be used to track status of this task
during it's lifetime.
:param queue: The queue that the task has been placed into (optional, defaults to
the general Celery queue.)
:type queue: basestring
:param tags: A list of tags (strings) to place onto the task, used for searching for
tasks by tag
:type tags: list
:return: An AsyncResult instance as returned by Celery's apply_async
:rtype: celery.result.AsyncResult
"""
routing_key = kwargs.get('routing_key',
defaults.NAMESPACES['CELERY']['DEFAULT_ROUTING_KEY'].default)
tag_list = kwargs.pop('tags', [])
async_result = super(Task, self).apply_async(*args, **kwargs)
async_result.tags = tag_list
# Create a new task status with the task id and tags.
task_status = TaskStatus(
task_id=async_result.id, task_type=self.name,
state=constants.CALL_WAITING_STATE, worker_name=routing_key, tags=tag_list)
# To avoid the race condition where __call__ method below is called before
# this change is propagated to all db nodes, using an 'upsert' here and setting
# the task state to 'waiting' only on an insert.
task_status.save_with_set_on_insert(fields_to_set_on_insert=['state', 'start_time'])
return async_result
def __call__(self, *args, **kwargs):
"""
This overrides CeleryTask's __call__() method. We use this method
for task state tracking of Pulp tasks.
"""
# Check task status and skip running the task if task state is 'canceled'.
try:
task_status = TaskStatus.objects.get(task_id=self.request.id)
except DoesNotExist:
task_status = None
if task_status and task_status['state'] == constants.CALL_CANCELED_STATE:
_logger.debug("Task cancel received for task-id : [%s]" % self.request.id)
return
# Update start_time and set the task state to 'running' for asynchronous tasks.
# Skip updating status for eagerly executed tasks, since we don't want to track
# synchronous tasks in our database.
if not self.request.called_directly:
now = datetime.now(dateutils.utc_tz())
start_time = dateutils.format_iso8601_datetime(now)
# Using 'upsert' to avoid a possible race condition described in the apply_async method
# above.
TaskStatus.objects(task_id=self.request.id).update_one(
set__state=constants.CALL_RUNNING_STATE, set__start_time=start_time, upsert=True)
# Run the actual task
_logger.debug("Running task : [%s]" % self.request.id)
return super(Task, self).__call__(*args, **kwargs)
def on_success(self, retval, task_id, args, kwargs):
"""
This overrides the success handler run by the worker when the task
executes successfully. It updates state, finish_time and traceback
of the relevant task status for asynchronous tasks. Skip updating status
for synchronous tasks.
:param retval: The return value of the task.
:param task_id: Unique id of the executed task.
:param args: Original arguments for the executed task.
:param kwargs: Original keyword arguments for the executed task.
"""
_logger.debug("Task successful : [%s]" % task_id)
if 'scheduled_call_id' in kwargs:
if not isinstance(retval, AsyncResult):
_logger.info(_('resetting consecutive failure count for schedule %(id)s')
% {'id': kwargs['scheduled_call_id']})
utils.reset_failure_count(kwargs['scheduled_call_id'])
if not self.request.called_directly:
now = datetime.now(dateutils.utc_tz())
finish_time = dateutils.format_iso8601_datetime(now)
task_status = TaskStatus.objects.get(task_id=task_id)
task_status['finish_time'] = finish_time
task_status['result'] = retval
# Only set the state to finished if it's not already in a complete state. This is
# important for when the task has been canceled, so we don't move the task from canceled
# to finished.
if task_status['state'] not in constants.CALL_COMPLETE_STATES:
task_status['state'] = constants.CALL_FINISHED_STATE
if isinstance(retval, TaskResult):
task_status['result'] = retval.return_value
if retval.error:
task_status['error'] = retval.error.to_dict()
if retval.spawned_tasks:
task_list = []
for spawned_task in retval.spawned_tasks:
if isinstance(spawned_task, AsyncResult):
task_list.append(spawned_task.task_id)
elif isinstance(spawned_task, dict):
task_list.append(spawned_task['task_id'])
task_status['spawned_tasks'] = task_list
if isinstance(retval, AsyncResult):
task_status['spawned_tasks'] = [retval.task_id, ]
task_status['result'] = None
task_status.save()
common_utils.delete_working_directory()
def on_failure(self, exc, task_id, args, kwargs, einfo):
"""
This overrides the error handler run by the worker when the task fails.
It updates state, finish_time and traceback of the relevant task status
for asynchronous tasks. Skip updating status for synchronous tasks.
:param exc: The exception raised by the task.
:param task_id: Unique id of the failed task.
:param args: Original arguments for the executed task.
:param kwargs: Original keyword arguments for the executed task.
:param einfo: celery's ExceptionInfo instance, containing serialized traceback.
"""
if isinstance(exc, PulpCodedException):
_logger.info(_('Task failed : [%(task_id)s] : %(msg)s') %
{'task_id': task_id, 'msg': str(exc)})
_logger.debug(traceback.format_exc())
else:
_logger.info(_('Task failed : [%s]') % task_id)
# celery will log the traceback
if 'scheduled_call_id' in kwargs:
utils.increment_failure_count(kwargs['scheduled_call_id'])
if not self.request.called_directly:
now = datetime.now(dateutils.utc_tz())
finish_time = dateutils.format_iso8601_datetime(now)
task_status = TaskStatus.objects.get(task_id=task_id)
task_status['state'] = constants.CALL_ERROR_STATE
task_status['finish_time'] = finish_time
task_status['traceback'] = einfo.traceback
if not isinstance(exc, PulpException):
exc = PulpException(str(exc))
task_status['error'] = exc.to_dict()
task_status.save()
common_utils.delete_working_directory()
def cancel(task_id):
"""
Cancel the task that is represented by the given task_id. This method cancels only the task
with given task_id, not the spawned tasks. This also updates task's state to 'canceled'.
:param task_id: The ID of the task you wish to cancel
:type task_id: basestring
:raises MissingResource: if a task with given task_id does not exist
:raises PulpCodedException: if given task is already in a complete state
"""
try:
task_status = TaskStatus.objects.get(task_id=task_id)
except DoesNotExist:
raise MissingResource(task_id)
if task_status['state'] in constants.CALL_COMPLETE_STATES:
# If the task is already done, just stop
msg = _('Task [%(task_id)s] already in a completed state: %(state)s')
_logger.info(msg % {'task_id': task_id, 'state': task_status['state']})
return
if task_status['worker_name'] == 'agent':
tag_dict = dict(
[
tags.parse_resource_tag(t) for t in task_status['tags'] if tags.is_resource_tag(t)
])
agent_manager = managers.consumer_agent_manager()
consumer_id = tag_dict.get(tags.RESOURCE_CONSUMER_TYPE)
agent_manager.cancel_request(consumer_id, task_id)
else:
controller.revoke(task_id, terminate=True)
qs = TaskStatus.objects(task_id=task_id, state__nin=constants.CALL_COMPLETE_STATES)
qs.update_one(set__state=constants.CALL_CANCELED_STATE)
msg = _('Task canceled: %(task_id)s.')
msg = msg % {'task_id': task_id}
_logger.info(msg)
def get_current_task_id():
""""
Get the current task id from celery. If this is called outside of a running
celery task it will return None
:return: The ID of the currently running celery task or None if not in a task
:rtype: str
"""
if current_task and current_task.request and current_task.request.id:
return current_task.request.id
return None
def register_sigterm_handler(f, handler):
"""
register_signal_handler is a method or function decorator. It will register a special signal
handler for SIGTERM that will call handler() with no arguments if SIGTERM is received during the
operation of f. Once f has completed, the signal handler will be restored to the handler that
was in place before the method began.
:param f: The method or function that should be wrapped.
:type f: instancemethod or function
:param handler: The method or function that should be called when we receive SIGTERM.
handler will be called with no arguments.
:type handler: instancemethod or function
:return: A wrapped version of f that performs the signal registering and unregistering.
:rtype: instancemethod or function
"""
def sigterm_handler(signal_number, stack_frame):
"""
This is the signal handler that gets installed to handle SIGTERM. We don't wish to pass the
signal_number or the stack_frame on to handler, so its only purpose is to avoid
passing these arguments onward. It calls handler().
:param signal_number: The signal that is being handled. Since we have registered for
SIGTERM, this will be signal.SIGTERM.
:type signal_number: int
:param stack_frame: The current execution stack frame
:type stack_frame: None or frame
"""
handler()
def wrap_f(*args, **kwargs):
"""
This function is a wrapper around f. It replaces the signal handler for SIGTERM with
signerm_handler(), calls f, sets the SIGTERM handler back to what it was before, and then
returns the return value from f.
:param args: The positional arguments to be passed to f
:type args: tuple
:param kwargs: The keyword arguments to be passed to f
:type kwargs: dict
:return: The return value from calling f
:rtype: Could be anything!
"""
old_signal = signal.signal(signal.SIGTERM, sigterm_handler)
try:
return f(*args, **kwargs)
finally:
signal.signal(signal.SIGTERM, old_signal)
return wrap_f
| gpl-2.0 |
pillmuncher/hornet | hornet/examples/queens.py | 1 | 1111 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Copyright (C) 2014 Mick Krippendorf <[email protected]>
__version__ = '0.2.3a'
__date__ = '2014-09-27'
__author__ = 'Mick Krippendorf <[email protected]>'
__license__ = 'MIT'
from hornet import Database, let, select, _, arithmetic_not_equal
from hornet.symbols import (
queens, solution, noattack, Rest, Ns, S, X, Y, X1, Y1, Xs, Ys, Y0s, Qs,
)
def main():
db = Database()
db.tell(
queens(S) <<
let(Ns, [i + 1 for i in range(9)]) &
solution(Ns, Ns, [], S),
solution([X|Xs], Y0s, Qs, [X/Y|S]) <<
select(Y, Y0s, Ys) &
noattack(X/Y, Qs) &
solution(Xs, Ys, [X/Y|Qs], S),
solution([], _, _, []),
noattack(X/Y, [X1/Y1|Rest]) <<
arithmetic_not_equal(Y, Y1) &
arithmetic_not_equal(Y1 - Y, X1 - X) &
arithmetic_not_equal(Y1 - Y, X - X1) &
noattack(X/Y, Rest),
noattack(_, []),
)
for subst in db.ask(queens(S)):
print(subst[S])
if __name__ == '__main__':
main()
| mit |
legalsylvain/OpenUpgrade | addons/hr_payroll_account/wizard/hr_payroll_payslips_by_employees.py | 44 | 1739 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import osv
class hr_payslip_employees(osv.osv_memory):
_inherit ='hr.payslip.employees'
def compute_sheet(self, cr, uid, ids, context=None):
run_pool = self.pool.get('hr.payslip.run')
if context is None:
context = {}
if context and context.get('active_id', False):
run_data = run_pool.read(cr, uid, context['active_id'], ['journal_id'])
journal_id = run_data.get('journal_id', False)
journal_id = journal_id and journal_id[0] or False
if journal_id: context.update({'journal_id': journal_id})
return super(hr_payslip_employees, self).compute_sheet(cr, uid, ids, context=context)
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
Oslandia/vizitown_plugin | zope/interface/common/tests/test_idatetime.py | 79 | 1775 | ##############################################################################
#
# Copyright (c) 2003 Zope Foundation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
"""Test for datetime interfaces
"""
import unittest
from zope.interface.verify import verifyObject, verifyClass
from zope.interface.common.idatetime import ITimeDelta, ITimeDeltaClass
from zope.interface.common.idatetime import IDate, IDateClass
from zope.interface.common.idatetime import IDateTime, IDateTimeClass
from zope.interface.common.idatetime import ITime, ITimeClass, ITZInfo
from datetime import timedelta, date, datetime, time, tzinfo
class TestDateTimeInterfaces(unittest.TestCase):
def test_interfaces(self):
verifyObject(ITimeDelta, timedelta(minutes=20))
verifyObject(IDate, date(2000, 1, 2))
verifyObject(IDateTime, datetime(2000, 1, 2, 10, 20))
verifyObject(ITime, time(20, 30, 15, 1234))
verifyObject(ITZInfo, tzinfo())
verifyClass(ITimeDeltaClass, timedelta)
verifyClass(IDateClass, date)
verifyClass(IDateTimeClass, datetime)
verifyClass(ITimeClass, time)
def test_suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(TestDateTimeInterfaces))
return suite
if __name__ == '__main__':
unittest.main()
| gpl-2.0 |
MOA-2011/enigma2.pli4.0 | lib/python/Plugins/SystemPlugins/SatelliteEquipmentControl/plugin.py | 7 | 3459 | from Screens.Screen import Screen
from Screens.MessageBox import MessageBox
from Plugins.Plugin import PluginDescriptor
from Components.ConfigList import ConfigListScreen
from Components.ActionMap import ActionMap
from Components.config import config
from Components.NimManager import nimmanager as nimmgr
class SecParameterSetup(Screen, ConfigListScreen):
skin = """
<screen position="100,100" size="560,400" title="Satellite equipment setup" >
<widget name="config" position="10,10" size="540,390" />
</screen>"""
def __init__(self, session):
self.skin = SecParameterSetup.skin
self["actions"] = ActionMap(["SetupActions", "MenuActions"],
{
"ok": self.keySave,
"cancel": self.keyCancel,
"menu": self.closeRecursive,
}, -2)
Screen.__init__(self, session)
list = [
("Delay after diseqc reset command", config.sec.delay_after_diseqc_reset_cmd),
("Delay after diseqc peripherial poweron command", config.sec.delay_after_diseqc_peripherial_poweron_cmd),
("Delay after continuous tone disable before diseqc", config.sec.delay_after_continuous_tone_disable_before_diseqc),
("Delay after final continuous tone change", config.sec.delay_after_final_continuous_tone_change),
("Delay after last voltage change", config.sec.delay_after_final_voltage_change),
("Delay between diseqc commands", config.sec.delay_between_diseqc_repeats),
("Delay after last diseqc command", config.sec.delay_after_last_diseqc_command),
("Delay after toneburst", config.sec.delay_after_toneburst),
("Delay after change voltage before switch command", config.sec.delay_after_change_voltage_before_switch_command),
("Delay after enable voltage before switch command", config.sec.delay_after_enable_voltage_before_switch_command),
("Delay between switch and motor command", config.sec.delay_between_switch_and_motor_command),
("Delay after set voltage before measure motor power", config.sec.delay_after_voltage_change_before_measure_idle_inputpower),
("Delay after enable voltage before motor command", config.sec.delay_after_enable_voltage_before_motor_command),
("Delay after motor stop command", config.sec.delay_after_motor_stop_command),
("Delay after voltage change before motor command", config.sec.delay_after_voltage_change_before_motor_command),
("Delay before sequence repeat", config.sec.delay_before_sequence_repeat),
("Motor running timeout", config.sec.motor_running_timeout),
("Motor command retries", config.sec.motor_command_retries) ]
ConfigListScreen.__init__(self, list)
session = None
def confirmed(answer):
global session
if answer:
session.open(SecParameterSetup)
def SecSetupMain(Session, **kwargs):
global session
session = Session
session.openWithCallback(confirmed, MessageBox, _("Please do not change any values unless you know what you are doing!"), MessageBox.TYPE_INFO)
def SecSetupStart(menuid):
show = False
# other menu than "scan"?
if menuid != "scan":
return [ ]
# only show if DVB-S frontends are available
for slot in nimmgr.nim_slots:
if slot.isCompatible("DVB-S"):
return [(_("Satellite equipment setup"), SecSetupMain, "satellite_equipment_setup", None)]
return [ ]
def Plugins(**kwargs):
if (nimmgr.hasNimType("DVB-S")):
return PluginDescriptor(name=_("Satellite equipment setup"), description=_("Setup your satellite equipment"), where = PluginDescriptor.WHERE_MENU, needsRestart = False, fnc=SecSetupStart)
else:
return []
| gpl-2.0 |
seanwestfall/django | django/db/backends/base/schema.py | 339 | 43421 | import hashlib
import logging
from django.db.backends.utils import truncate_name
from django.db.transaction import atomic
from django.utils import six
from django.utils.encoding import force_bytes
logger = logging.getLogger('django.db.backends.schema')
def _related_non_m2m_objects(old_field, new_field):
# Filters out m2m objects from reverse relations.
# Returns (old_relation, new_relation) tuples.
return zip(
(obj for obj in old_field.model._meta.related_objects if not obj.field.many_to_many),
(obj for obj in new_field.model._meta.related_objects if not obj.field.many_to_many)
)
class BaseDatabaseSchemaEditor(object):
"""
This class (and its subclasses) are responsible for emitting schema-changing
statements to the databases - model creation/removal/alteration, field
renaming, index fiddling, and so on.
It is intended to eventually completely replace DatabaseCreation.
This class should be used by creating an instance for each set of schema
changes (e.g. a migration file), and by first calling start(),
then the relevant actions, and then commit(). This is necessary to allow
things like circular foreign key references - FKs will only be created once
commit() is called.
"""
# Overrideable SQL templates
sql_create_table = "CREATE TABLE %(table)s (%(definition)s)"
sql_rename_table = "ALTER TABLE %(old_table)s RENAME TO %(new_table)s"
sql_retablespace_table = "ALTER TABLE %(table)s SET TABLESPACE %(new_tablespace)s"
sql_delete_table = "DROP TABLE %(table)s CASCADE"
sql_create_column = "ALTER TABLE %(table)s ADD COLUMN %(column)s %(definition)s"
sql_alter_column = "ALTER TABLE %(table)s %(changes)s"
sql_alter_column_type = "ALTER COLUMN %(column)s TYPE %(type)s"
sql_alter_column_null = "ALTER COLUMN %(column)s DROP NOT NULL"
sql_alter_column_not_null = "ALTER COLUMN %(column)s SET NOT NULL"
sql_alter_column_default = "ALTER COLUMN %(column)s SET DEFAULT %(default)s"
sql_alter_column_no_default = "ALTER COLUMN %(column)s DROP DEFAULT"
sql_delete_column = "ALTER TABLE %(table)s DROP COLUMN %(column)s CASCADE"
sql_rename_column = "ALTER TABLE %(table)s RENAME COLUMN %(old_column)s TO %(new_column)s"
sql_update_with_default = "UPDATE %(table)s SET %(column)s = %(default)s WHERE %(column)s IS NULL"
sql_create_check = "ALTER TABLE %(table)s ADD CONSTRAINT %(name)s CHECK (%(check)s)"
sql_delete_check = "ALTER TABLE %(table)s DROP CONSTRAINT %(name)s"
sql_create_unique = "ALTER TABLE %(table)s ADD CONSTRAINT %(name)s UNIQUE (%(columns)s)"
sql_delete_unique = "ALTER TABLE %(table)s DROP CONSTRAINT %(name)s"
sql_create_fk = (
"ALTER TABLE %(table)s ADD CONSTRAINT %(name)s FOREIGN KEY (%(column)s) "
"REFERENCES %(to_table)s (%(to_column)s) DEFERRABLE INITIALLY DEFERRED"
)
sql_create_inline_fk = None
sql_delete_fk = "ALTER TABLE %(table)s DROP CONSTRAINT %(name)s"
sql_create_index = "CREATE INDEX %(name)s ON %(table)s (%(columns)s)%(extra)s"
sql_delete_index = "DROP INDEX %(name)s"
sql_create_pk = "ALTER TABLE %(table)s ADD CONSTRAINT %(name)s PRIMARY KEY (%(columns)s)"
sql_delete_pk = "ALTER TABLE %(table)s DROP CONSTRAINT %(name)s"
def __init__(self, connection, collect_sql=False):
self.connection = connection
self.collect_sql = collect_sql
if self.collect_sql:
self.collected_sql = []
# State-managing methods
def __enter__(self):
self.deferred_sql = []
if self.connection.features.can_rollback_ddl:
self.atomic = atomic(self.connection.alias)
self.atomic.__enter__()
return self
def __exit__(self, exc_type, exc_value, traceback):
if exc_type is None:
for sql in self.deferred_sql:
self.execute(sql)
if self.connection.features.can_rollback_ddl:
self.atomic.__exit__(exc_type, exc_value, traceback)
# Core utility functions
def execute(self, sql, params=[]):
"""
Executes the given SQL statement, with optional parameters.
"""
# Log the command we're running, then run it
logger.debug("%s; (params %r)" % (sql, params))
if self.collect_sql:
ending = "" if sql.endswith(";") else ";"
if params is not None:
self.collected_sql.append((sql % tuple(map(self.quote_value, params))) + ending)
else:
self.collected_sql.append(sql + ending)
else:
with self.connection.cursor() as cursor:
cursor.execute(sql, params)
def quote_name(self, name):
return self.connection.ops.quote_name(name)
@classmethod
def _digest(cls, *args):
"""
Generates a 32-bit digest of a set of arguments that can be used to
shorten identifying names.
"""
h = hashlib.md5()
for arg in args:
h.update(force_bytes(arg))
return h.hexdigest()[:8]
# Field <-> database mapping functions
def column_sql(self, model, field, include_default=False):
"""
Takes a field and returns its column definition.
The field must already have had set_attributes_from_name called.
"""
# Get the column's type and use that as the basis of the SQL
db_params = field.db_parameters(connection=self.connection)
sql = db_params['type']
params = []
# Check for fields that aren't actually columns (e.g. M2M)
if sql is None:
return None, None
# Work out nullability
null = field.null
# If we were told to include a default value, do so
include_default = include_default and not self.skip_default(field)
if include_default:
default_value = self.effective_default(field)
if default_value is not None:
if self.connection.features.requires_literal_defaults:
# Some databases can't take defaults as a parameter (oracle)
# If this is the case, the individual schema backend should
# implement prepare_default
sql += " DEFAULT %s" % self.prepare_default(default_value)
else:
sql += " DEFAULT %s"
params += [default_value]
# Oracle treats the empty string ('') as null, so coerce the null
# option whenever '' is a possible value.
if (field.empty_strings_allowed and not field.primary_key and
self.connection.features.interprets_empty_strings_as_nulls):
null = True
if null and not self.connection.features.implied_column_null:
sql += " NULL"
elif not null:
sql += " NOT NULL"
# Primary key/unique outputs
if field.primary_key:
sql += " PRIMARY KEY"
elif field.unique:
sql += " UNIQUE"
# Optionally add the tablespace if it's an implicitly indexed column
tablespace = field.db_tablespace or model._meta.db_tablespace
if tablespace and self.connection.features.supports_tablespaces and field.unique:
sql += " %s" % self.connection.ops.tablespace_sql(tablespace, inline=True)
# Return the sql
return sql, params
def skip_default(self, field):
"""
Some backends don't accept default values for certain columns types
(i.e. MySQL longtext and longblob).
"""
return False
def prepare_default(self, value):
"""
Only used for backends which have requires_literal_defaults feature
"""
raise NotImplementedError(
'subclasses of BaseDatabaseSchemaEditor for backends which have '
'requires_literal_defaults must provide a prepare_default() method'
)
def effective_default(self, field):
"""
Returns a field's effective database default value
"""
if field.has_default():
default = field.get_default()
elif not field.null and field.blank and field.empty_strings_allowed:
if field.get_internal_type() == "BinaryField":
default = six.binary_type()
else:
default = six.text_type()
else:
default = None
# If it's a callable, call it
if six.callable(default):
default = default()
# Run it through the field's get_db_prep_save method so we can send it
# to the database.
default = field.get_db_prep_save(default, self.connection)
return default
def quote_value(self, value):
"""
Returns a quoted version of the value so it's safe to use in an SQL
string. This is not safe against injection from user code; it is
intended only for use in making SQL scripts or preparing default values
for particularly tricky backends (defaults are not user-defined, though,
so this is safe).
"""
raise NotImplementedError()
# Actions
def create_model(self, model):
"""
Takes a model and creates a table for it in the database.
Will also create any accompanying indexes or unique constraints.
"""
# Create column SQL, add FK deferreds if needed
column_sqls = []
params = []
for field in model._meta.local_fields:
# SQL
definition, extra_params = self.column_sql(model, field)
if definition is None:
continue
# Check constraints can go on the column SQL here
db_params = field.db_parameters(connection=self.connection)
if db_params['check']:
definition += " CHECK (%s)" % db_params['check']
# Autoincrement SQL (for backends with inline variant)
col_type_suffix = field.db_type_suffix(connection=self.connection)
if col_type_suffix:
definition += " %s" % col_type_suffix
params.extend(extra_params)
# FK
if field.remote_field and field.db_constraint:
to_table = field.remote_field.model._meta.db_table
to_column = field.remote_field.model._meta.get_field(field.remote_field.field_name).column
if self.connection.features.supports_foreign_keys:
self.deferred_sql.append(self._create_fk_sql(model, field, "_fk_%(to_table)s_%(to_column)s"))
elif self.sql_create_inline_fk:
definition += " " + self.sql_create_inline_fk % {
"to_table": self.quote_name(to_table),
"to_column": self.quote_name(to_column),
}
# Add the SQL to our big list
column_sqls.append("%s %s" % (
self.quote_name(field.column),
definition,
))
# Autoincrement SQL (for backends with post table definition variant)
if field.get_internal_type() == "AutoField":
autoinc_sql = self.connection.ops.autoinc_sql(model._meta.db_table, field.column)
if autoinc_sql:
self.deferred_sql.extend(autoinc_sql)
# Add any unique_togethers (always deferred, as some fields might be
# created afterwards, like geometry fields with some backends)
for fields in model._meta.unique_together:
columns = [model._meta.get_field(field).column for field in fields]
self.deferred_sql.append(self._create_unique_sql(model, columns))
# Make the table
sql = self.sql_create_table % {
"table": self.quote_name(model._meta.db_table),
"definition": ", ".join(column_sqls)
}
if model._meta.db_tablespace:
tablespace_sql = self.connection.ops.tablespace_sql(model._meta.db_tablespace)
if tablespace_sql:
sql += ' ' + tablespace_sql
# Prevent using [] as params, in the case a literal '%' is used in the definition
self.execute(sql, params or None)
# Add any field index and index_together's (deferred as SQLite3 _remake_table needs it)
self.deferred_sql.extend(self._model_indexes_sql(model))
# Make M2M tables
for field in model._meta.local_many_to_many:
if field.remote_field.through._meta.auto_created:
self.create_model(field.remote_field.through)
def delete_model(self, model):
"""
Deletes a model from the database.
"""
# Handle auto-created intermediary models
for field in model._meta.local_many_to_many:
if field.remote_field.through._meta.auto_created:
self.delete_model(field.remote_field.through)
# Delete the table
self.execute(self.sql_delete_table % {
"table": self.quote_name(model._meta.db_table),
})
def alter_unique_together(self, model, old_unique_together, new_unique_together):
"""
Deals with a model changing its unique_together.
Note: The input unique_togethers must be doubly-nested, not the single-
nested ["foo", "bar"] format.
"""
olds = set(tuple(fields) for fields in old_unique_together)
news = set(tuple(fields) for fields in new_unique_together)
# Deleted uniques
for fields in olds.difference(news):
self._delete_composed_index(model, fields, {'unique': True}, self.sql_delete_unique)
# Created uniques
for fields in news.difference(olds):
columns = [model._meta.get_field(field).column for field in fields]
self.execute(self._create_unique_sql(model, columns))
def alter_index_together(self, model, old_index_together, new_index_together):
"""
Deals with a model changing its index_together.
Note: The input index_togethers must be doubly-nested, not the single-
nested ["foo", "bar"] format.
"""
olds = set(tuple(fields) for fields in old_index_together)
news = set(tuple(fields) for fields in new_index_together)
# Deleted indexes
for fields in olds.difference(news):
self._delete_composed_index(model, fields, {'index': True}, self.sql_delete_index)
# Created indexes
for field_names in news.difference(olds):
fields = [model._meta.get_field(field) for field in field_names]
self.execute(self._create_index_sql(model, fields, suffix="_idx"))
def _delete_composed_index(self, model, fields, constraint_kwargs, sql):
columns = [model._meta.get_field(field).column for field in fields]
constraint_names = self._constraint_names(model, columns, **constraint_kwargs)
if len(constraint_names) != 1:
raise ValueError("Found wrong number (%s) of constraints for %s(%s)" % (
len(constraint_names),
model._meta.db_table,
", ".join(columns),
))
self.execute(self._delete_constraint_sql(sql, model, constraint_names[0]))
def alter_db_table(self, model, old_db_table, new_db_table):
"""
Renames the table a model points to.
"""
if old_db_table == new_db_table:
return
self.execute(self.sql_rename_table % {
"old_table": self.quote_name(old_db_table),
"new_table": self.quote_name(new_db_table),
})
def alter_db_tablespace(self, model, old_db_tablespace, new_db_tablespace):
"""
Moves a model's table between tablespaces
"""
self.execute(self.sql_retablespace_table % {
"table": self.quote_name(model._meta.db_table),
"old_tablespace": self.quote_name(old_db_tablespace),
"new_tablespace": self.quote_name(new_db_tablespace),
})
def add_field(self, model, field):
"""
Creates a field on a model.
Usually involves adding a column, but may involve adding a
table instead (for M2M fields)
"""
# Special-case implicit M2M tables
if field.many_to_many and field.remote_field.through._meta.auto_created:
return self.create_model(field.remote_field.through)
# Get the column's definition
definition, params = self.column_sql(model, field, include_default=True)
# It might not actually have a column behind it
if definition is None:
return
# Check constraints can go on the column SQL here
db_params = field.db_parameters(connection=self.connection)
if db_params['check']:
definition += " CHECK (%s)" % db_params['check']
# Build the SQL and run it
sql = self.sql_create_column % {
"table": self.quote_name(model._meta.db_table),
"column": self.quote_name(field.column),
"definition": definition,
}
self.execute(sql, params)
# Drop the default if we need to
# (Django usually does not use in-database defaults)
if not self.skip_default(field) and field.default is not None:
sql = self.sql_alter_column % {
"table": self.quote_name(model._meta.db_table),
"changes": self.sql_alter_column_no_default % {
"column": self.quote_name(field.column),
}
}
self.execute(sql)
# Add an index, if required
if field.db_index and not field.unique:
self.deferred_sql.append(self._create_index_sql(model, [field]))
# Add any FK constraints later
if field.remote_field and self.connection.features.supports_foreign_keys and field.db_constraint:
self.deferred_sql.append(self._create_fk_sql(model, field, "_fk_%(to_table)s_%(to_column)s"))
# Reset connection if required
if self.connection.features.connection_persists_old_columns:
self.connection.close()
def remove_field(self, model, field):
"""
Removes a field from a model. Usually involves deleting a column,
but for M2Ms may involve deleting a table.
"""
# Special-case implicit M2M tables
if field.many_to_many and field.remote_field.through._meta.auto_created:
return self.delete_model(field.remote_field.through)
# It might not actually have a column behind it
if field.db_parameters(connection=self.connection)['type'] is None:
return
# Drop any FK constraints, MySQL requires explicit deletion
if field.remote_field:
fk_names = self._constraint_names(model, [field.column], foreign_key=True)
for fk_name in fk_names:
self.execute(self._delete_constraint_sql(self.sql_delete_fk, model, fk_name))
# Delete the column
sql = self.sql_delete_column % {
"table": self.quote_name(model._meta.db_table),
"column": self.quote_name(field.column),
}
self.execute(sql)
# Reset connection if required
if self.connection.features.connection_persists_old_columns:
self.connection.close()
def alter_field(self, model, old_field, new_field, strict=False):
"""
Allows a field's type, uniqueness, nullability, default, column,
constraints etc. to be modified.
Requires a copy of the old field as well so we can only perform
changes that are required.
If strict is true, raises errors if the old column does not match old_field precisely.
"""
# Ensure this field is even column-based
old_db_params = old_field.db_parameters(connection=self.connection)
old_type = old_db_params['type']
new_db_params = new_field.db_parameters(connection=self.connection)
new_type = new_db_params['type']
if ((old_type is None and old_field.remote_field is None) or
(new_type is None and new_field.remote_field is None)):
raise ValueError(
"Cannot alter field %s into %s - they do not properly define "
"db_type (are you using a badly-written custom field?)" %
(old_field, new_field),
)
elif old_type is None and new_type is None and (
old_field.remote_field.through and new_field.remote_field.through and
old_field.remote_field.through._meta.auto_created and
new_field.remote_field.through._meta.auto_created):
return self._alter_many_to_many(model, old_field, new_field, strict)
elif old_type is None and new_type is None and (
old_field.remote_field.through and new_field.remote_field.through and
not old_field.remote_field.through._meta.auto_created and
not new_field.remote_field.through._meta.auto_created):
# Both sides have through models; this is a no-op.
return
elif old_type is None or new_type is None:
raise ValueError(
"Cannot alter field %s into %s - they are not compatible types "
"(you cannot alter to or from M2M fields, or add or remove "
"through= on M2M fields)" % (old_field, new_field)
)
self._alter_field(model, old_field, new_field, old_type, new_type,
old_db_params, new_db_params, strict)
def _alter_field(self, model, old_field, new_field, old_type, new_type,
old_db_params, new_db_params, strict=False):
"""Actually perform a "physical" (non-ManyToMany) field update."""
# Drop any FK constraints, we'll remake them later
fks_dropped = set()
if old_field.remote_field and old_field.db_constraint:
fk_names = self._constraint_names(model, [old_field.column], foreign_key=True)
if strict and len(fk_names) != 1:
raise ValueError("Found wrong number (%s) of foreign key constraints for %s.%s" % (
len(fk_names),
model._meta.db_table,
old_field.column,
))
for fk_name in fk_names:
fks_dropped.add((old_field.column,))
self.execute(self._delete_constraint_sql(self.sql_delete_fk, model, fk_name))
# Has unique been removed?
if old_field.unique and (not new_field.unique or (not old_field.primary_key and new_field.primary_key)):
# Find the unique constraint for this field
constraint_names = self._constraint_names(model, [old_field.column], unique=True)
if strict and len(constraint_names) != 1:
raise ValueError("Found wrong number (%s) of unique constraints for %s.%s" % (
len(constraint_names),
model._meta.db_table,
old_field.column,
))
for constraint_name in constraint_names:
self.execute(self._delete_constraint_sql(self.sql_delete_unique, model, constraint_name))
# Drop incoming FK constraints if we're a primary key and things are going
# to change.
if old_field.primary_key and new_field.primary_key and old_type != new_type:
# '_meta.related_field' also contains M2M reverse fields, these
# will be filtered out
for _old_rel, new_rel in _related_non_m2m_objects(old_field, new_field):
rel_fk_names = self._constraint_names(
new_rel.related_model, [new_rel.field.column], foreign_key=True
)
for fk_name in rel_fk_names:
self.execute(self._delete_constraint_sql(self.sql_delete_fk, new_rel.related_model, fk_name))
# Removed an index? (no strict check, as multiple indexes are possible)
if (old_field.db_index and not new_field.db_index and
not old_field.unique and not
(not new_field.unique and old_field.unique)):
# Find the index for this field
index_names = self._constraint_names(model, [old_field.column], index=True)
for index_name in index_names:
self.execute(self._delete_constraint_sql(self.sql_delete_index, model, index_name))
# Change check constraints?
if old_db_params['check'] != new_db_params['check'] and old_db_params['check']:
constraint_names = self._constraint_names(model, [old_field.column], check=True)
if strict and len(constraint_names) != 1:
raise ValueError("Found wrong number (%s) of check constraints for %s.%s" % (
len(constraint_names),
model._meta.db_table,
old_field.column,
))
for constraint_name in constraint_names:
self.execute(self._delete_constraint_sql(self.sql_delete_check, model, constraint_name))
# Have they renamed the column?
if old_field.column != new_field.column:
self.execute(self._rename_field_sql(model._meta.db_table, old_field, new_field, new_type))
# Next, start accumulating actions to do
actions = []
null_actions = []
post_actions = []
# Type change?
if old_type != new_type:
fragment, other_actions = self._alter_column_type_sql(
model._meta.db_table, old_field, new_field, new_type
)
actions.append(fragment)
post_actions.extend(other_actions)
# When changing a column NULL constraint to NOT NULL with a given
# default value, we need to perform 4 steps:
# 1. Add a default for new incoming writes
# 2. Update existing NULL rows with new default
# 3. Replace NULL constraint with NOT NULL
# 4. Drop the default again.
# Default change?
old_default = self.effective_default(old_field)
new_default = self.effective_default(new_field)
needs_database_default = (
old_default != new_default and
new_default is not None and
not self.skip_default(new_field)
)
if needs_database_default:
if self.connection.features.requires_literal_defaults:
# Some databases can't take defaults as a parameter (oracle)
# If this is the case, the individual schema backend should
# implement prepare_default
actions.append((
self.sql_alter_column_default % {
"column": self.quote_name(new_field.column),
"default": self.prepare_default(new_default),
},
[],
))
else:
actions.append((
self.sql_alter_column_default % {
"column": self.quote_name(new_field.column),
"default": "%s",
},
[new_default],
))
# Nullability change?
if old_field.null != new_field.null:
if (self.connection.features.interprets_empty_strings_as_nulls and
new_field.get_internal_type() in ("CharField", "TextField")):
# The field is nullable in the database anyway, leave it alone
pass
elif new_field.null:
null_actions.append((
self.sql_alter_column_null % {
"column": self.quote_name(new_field.column),
"type": new_type,
},
[],
))
else:
null_actions.append((
self.sql_alter_column_not_null % {
"column": self.quote_name(new_field.column),
"type": new_type,
},
[],
))
# Only if we have a default and there is a change from NULL to NOT NULL
four_way_default_alteration = (
new_field.has_default() and
(old_field.null and not new_field.null)
)
if actions or null_actions:
if not four_way_default_alteration:
# If we don't have to do a 4-way default alteration we can
# directly run a (NOT) NULL alteration
actions = actions + null_actions
# Combine actions together if we can (e.g. postgres)
if self.connection.features.supports_combined_alters and actions:
sql, params = tuple(zip(*actions))
actions = [(", ".join(sql), sum(params, []))]
# Apply those actions
for sql, params in actions:
self.execute(
self.sql_alter_column % {
"table": self.quote_name(model._meta.db_table),
"changes": sql,
},
params,
)
if four_way_default_alteration:
# Update existing rows with default value
self.execute(
self.sql_update_with_default % {
"table": self.quote_name(model._meta.db_table),
"column": self.quote_name(new_field.column),
"default": "%s",
},
[new_default],
)
# Since we didn't run a NOT NULL change before we need to do it
# now
for sql, params in null_actions:
self.execute(
self.sql_alter_column % {
"table": self.quote_name(model._meta.db_table),
"changes": sql,
},
params,
)
if post_actions:
for sql, params in post_actions:
self.execute(sql, params)
# Added a unique?
if (not old_field.unique and new_field.unique) or (
old_field.primary_key and not new_field.primary_key and new_field.unique
):
self.execute(self._create_unique_sql(model, [new_field.column]))
# Added an index?
if (not old_field.db_index and new_field.db_index and
not new_field.unique and not
(not old_field.unique and new_field.unique)):
self.execute(self._create_index_sql(model, [new_field], suffix="_uniq"))
# Type alteration on primary key? Then we need to alter the column
# referring to us.
rels_to_update = []
if old_field.primary_key and new_field.primary_key and old_type != new_type:
rels_to_update.extend(_related_non_m2m_objects(old_field, new_field))
# Changed to become primary key?
# Note that we don't detect unsetting of a PK, as we assume another field
# will always come along and replace it.
if not old_field.primary_key and new_field.primary_key:
# First, drop the old PK
constraint_names = self._constraint_names(model, primary_key=True)
if strict and len(constraint_names) != 1:
raise ValueError("Found wrong number (%s) of PK constraints for %s" % (
len(constraint_names),
model._meta.db_table,
))
for constraint_name in constraint_names:
self.execute(self._delete_constraint_sql(self.sql_delete_pk, model, constraint_name))
# Make the new one
self.execute(
self.sql_create_pk % {
"table": self.quote_name(model._meta.db_table),
"name": self.quote_name(self._create_index_name(model, [new_field.column], suffix="_pk")),
"columns": self.quote_name(new_field.column),
}
)
# Update all referencing columns
rels_to_update.extend(_related_non_m2m_objects(old_field, new_field))
# Handle our type alters on the other end of rels from the PK stuff above
for old_rel, new_rel in rels_to_update:
rel_db_params = new_rel.field.db_parameters(connection=self.connection)
rel_type = rel_db_params['type']
fragment, other_actions = self._alter_column_type_sql(
new_rel.related_model._meta.db_table, old_rel.field, new_rel.field, rel_type
)
self.execute(
self.sql_alter_column % {
"table": self.quote_name(new_rel.related_model._meta.db_table),
"changes": fragment[0],
},
fragment[1],
)
for sql, params in other_actions:
self.execute(sql, params)
# Does it have a foreign key?
if (new_field.remote_field and
(fks_dropped or not old_field.remote_field or not old_field.db_constraint) and
new_field.db_constraint):
self.execute(self._create_fk_sql(model, new_field, "_fk_%(to_table)s_%(to_column)s"))
# Rebuild FKs that pointed to us if we previously had to drop them
if old_field.primary_key and new_field.primary_key and old_type != new_type:
for rel in new_field.model._meta.related_objects:
if not rel.many_to_many:
self.execute(self._create_fk_sql(rel.related_model, rel.field, "_fk"))
# Does it have check constraints we need to add?
if old_db_params['check'] != new_db_params['check'] and new_db_params['check']:
self.execute(
self.sql_create_check % {
"table": self.quote_name(model._meta.db_table),
"name": self.quote_name(self._create_index_name(model, [new_field.column], suffix="_check")),
"column": self.quote_name(new_field.column),
"check": new_db_params['check'],
}
)
# Drop the default if we need to
# (Django usually does not use in-database defaults)
if needs_database_default:
sql = self.sql_alter_column % {
"table": self.quote_name(model._meta.db_table),
"changes": self.sql_alter_column_no_default % {
"column": self.quote_name(new_field.column),
}
}
self.execute(sql)
# Reset connection if required
if self.connection.features.connection_persists_old_columns:
self.connection.close()
def _alter_column_type_sql(self, table, old_field, new_field, new_type):
"""
Hook to specialize column type alteration for different backends,
for cases when a creation type is different to an alteration type
(e.g. SERIAL in PostgreSQL, PostGIS fields).
Should return two things; an SQL fragment of (sql, params) to insert
into an ALTER TABLE statement, and a list of extra (sql, params) tuples
to run once the field is altered.
"""
return (
(
self.sql_alter_column_type % {
"column": self.quote_name(new_field.column),
"type": new_type,
},
[],
),
[],
)
def _alter_many_to_many(self, model, old_field, new_field, strict):
"""
Alters M2Ms to repoint their to= endpoints.
"""
# Rename the through table
if old_field.remote_field.through._meta.db_table != new_field.remote_field.through._meta.db_table:
self.alter_db_table(old_field.remote_field.through, old_field.remote_field.through._meta.db_table,
new_field.remote_field.through._meta.db_table)
# Repoint the FK to the other side
self.alter_field(
new_field.remote_field.through,
# We need the field that points to the target model, so we can tell alter_field to change it -
# this is m2m_reverse_field_name() (as opposed to m2m_field_name, which points to our model)
old_field.remote_field.through._meta.get_field(old_field.m2m_reverse_field_name()),
new_field.remote_field.through._meta.get_field(new_field.m2m_reverse_field_name()),
)
self.alter_field(
new_field.remote_field.through,
# for self-referential models we need to alter field from the other end too
old_field.remote_field.through._meta.get_field(old_field.m2m_field_name()),
new_field.remote_field.through._meta.get_field(new_field.m2m_field_name()),
)
def _create_index_name(self, model, column_names, suffix=""):
"""
Generates a unique name for an index/unique constraint.
"""
# If there is just one column in the index, use a default algorithm from Django
if len(column_names) == 1 and not suffix:
return truncate_name(
'%s_%s' % (model._meta.db_table, self._digest(column_names[0])),
self.connection.ops.max_name_length()
)
# Else generate the name for the index using a different algorithm
table_name = model._meta.db_table.replace('"', '').replace('.', '_')
index_unique_name = '_%s' % self._digest(table_name, *column_names)
max_length = self.connection.ops.max_name_length() or 200
# If the index name is too long, truncate it
index_name = ('%s_%s%s%s' % (
table_name, column_names[0], index_unique_name, suffix,
)).replace('"', '').replace('.', '_')
if len(index_name) > max_length:
part = ('_%s%s%s' % (column_names[0], index_unique_name, suffix))
index_name = '%s%s' % (table_name[:(max_length - len(part))], part)
# It shouldn't start with an underscore (Oracle hates this)
if index_name[0] == "_":
index_name = index_name[1:]
# If it's STILL too long, just hash it down
if len(index_name) > max_length:
index_name = hashlib.md5(force_bytes(index_name)).hexdigest()[:max_length]
# It can't start with a number on Oracle, so prepend D if we need to
if index_name[0].isdigit():
index_name = "D%s" % index_name[:-1]
return index_name
def _create_index_sql(self, model, fields, suffix="", sql=None):
"""
Return the SQL statement to create the index for one or several fields.
`sql` can be specified if the syntax differs from the standard (GIS
indexes, ...).
"""
if len(fields) == 1 and fields[0].db_tablespace:
tablespace_sql = self.connection.ops.tablespace_sql(fields[0].db_tablespace)
elif model._meta.db_tablespace:
tablespace_sql = self.connection.ops.tablespace_sql(model._meta.db_tablespace)
else:
tablespace_sql = ""
if tablespace_sql:
tablespace_sql = " " + tablespace_sql
columns = [field.column for field in fields]
sql_create_index = sql or self.sql_create_index
return sql_create_index % {
"table": self.quote_name(model._meta.db_table),
"name": self.quote_name(self._create_index_name(model, columns, suffix=suffix)),
"columns": ", ".join(self.quote_name(column) for column in columns),
"extra": tablespace_sql,
}
def _model_indexes_sql(self, model):
"""
Return all index SQL statements (field indexes, index_together) for the
specified model, as a list.
"""
if not model._meta.managed or model._meta.proxy or model._meta.swapped:
return []
output = []
for field in model._meta.local_fields:
if field.db_index and not field.unique:
output.append(self._create_index_sql(model, [field], suffix=""))
for field_names in model._meta.index_together:
fields = [model._meta.get_field(field) for field in field_names]
output.append(self._create_index_sql(model, fields, suffix="_idx"))
return output
def _rename_field_sql(self, table, old_field, new_field, new_type):
return self.sql_rename_column % {
"table": self.quote_name(table),
"old_column": self.quote_name(old_field.column),
"new_column": self.quote_name(new_field.column),
"type": new_type,
}
def _create_fk_sql(self, model, field, suffix):
from_table = model._meta.db_table
from_column = field.column
to_table = field.target_field.model._meta.db_table
to_column = field.target_field.column
suffix = suffix % {
"to_table": to_table,
"to_column": to_column,
}
return self.sql_create_fk % {
"table": self.quote_name(from_table),
"name": self.quote_name(self._create_index_name(model, [from_column], suffix=suffix)),
"column": self.quote_name(from_column),
"to_table": self.quote_name(to_table),
"to_column": self.quote_name(to_column),
}
def _create_unique_sql(self, model, columns):
return self.sql_create_unique % {
"table": self.quote_name(model._meta.db_table),
"name": self.quote_name(self._create_index_name(model, columns, suffix="_uniq")),
"columns": ", ".join(self.quote_name(column) for column in columns),
}
def _delete_constraint_sql(self, template, model, name):
return template % {
"table": self.quote_name(model._meta.db_table),
"name": self.quote_name(name),
}
def _constraint_names(self, model, column_names=None, unique=None,
primary_key=None, index=None, foreign_key=None,
check=None):
"""
Returns all constraint names matching the columns and conditions
"""
column_names = list(column_names) if column_names else None
with self.connection.cursor() as cursor:
constraints = self.connection.introspection.get_constraints(cursor, model._meta.db_table)
result = []
for name, infodict in constraints.items():
if column_names is None or column_names == infodict['columns']:
if unique is not None and infodict['unique'] != unique:
continue
if primary_key is not None and infodict['primary_key'] != primary_key:
continue
if index is not None and infodict['index'] != index:
continue
if check is not None and infodict['check'] != check:
continue
if foreign_key is not None and not infodict['foreign_key']:
continue
result.append(name)
return result
| bsd-3-clause |
meghana1995/sympy | sympy/series/tests/test_kauers.py | 102 | 1032 | from sympy.series.kauers import finite_diff
from sympy.series.kauers import finite_diff_kauers
from sympy.abc import x, y, z, m, n, w
from sympy import sin, cos
from sympy import pi
from sympy import Sum
def test_finite_diff():
assert finite_diff(x**2 + 2*x + 1, x) == 2*x + 3
assert finite_diff(y**3 + 2*y**2 + 3*y + 5, y) == 3*y**2 + 7*y + 6
assert finite_diff(z**2 - 2*z + 3, z) == 2*z - 1
assert finite_diff(w**2 + 3*w - 2, w) == 2*w + 4
assert finite_diff(sin(x), x, pi/6) == -sin(x) + sin(x + pi/6)
assert finite_diff(cos(y), y, pi/3) == -cos(y) + cos(y + pi/3)
assert finite_diff(x**2 - 2*x + 3, x, 2) == 4*x
assert finite_diff(n**2 - 2*n + 3, n, 3) == 6*n + 3
def test_finite_diff_kauers():
assert finite_diff_kauers(Sum(x**2, (x, 1, n))) == (n + 1)**2
assert finite_diff_kauers(Sum(y, (y, 1, m))) == (m + 1)
assert finite_diff_kauers(Sum((x*y), (x, 1, m), (y, 1, n))) == (m + 1)*(n + 1)
assert finite_diff_kauers(Sum((x*y**2), (x, 1, m), (y, 1, n))) == (n + 1)**2*(m + 1)
| bsd-3-clause |
dsavransky/plandb.sioslab.com | getDataFromIPAC.py | 1 | 15203 | import requests
import pandas
from StringIO import StringIO
import astropy.units as u
import astropy.constants as const
import EXOSIMS.PlanetPhysicalModel.Forecaster
from sqlalchemy import create_engine
import getpass,keyring
import numpy as np
#grab the data
query = """https://exoplanetarchive.ipac.caltech.edu/cgi-bin/nstedAPI/nph-nstedAPI?table=exoplanets&select=*&format=csv"""
r = requests.get(query)
data = pandas.read_csv(StringIO(r.content))
# we need:
# distance AND
# (sma OR (period AND stellar mass)) AND
# (radius OR mass (either true or m\sin(i)))
keep = ~np.isnan(data['st_dist']) & (~np.isnan(data['pl_orbsmax'].values) | \
(~np.isnan(data['pl_orbper'].values) & ~np.isnan(data['st_mass'].values))) & \
(~np.isnan(data['pl_bmassj'].values) | ~np.isnan(data['pl_radj'].values))
data = data[keep]
#fill in missing smas from period & star mass
nosma = np.isnan(data['pl_orbsmax'].values)
p2sma = lambda mu,T: ((mu*T**2/(4*np.pi**2))**(1/3.)).to('AU')
GMs = const.G*(data['st_mass'][nosma].values*u.solMass) # units of solar mass
T = data['pl_orbper'][nosma].values*u.day
tmpsma = p2sma(GMs,T)
data['pl_orbsmax'][nosma] = tmpsma
data = data.assign(smax_from_orbper=nosma)
#update all WAs based on sma
WA = np.arctan((data['pl_orbsmax'].values*u.AU)/(data['st_dist'].values*u.pc)).to('mas')
data['pl_angsep'] = WA.value
##populate max WA based on available eccentricity data (otherwise maxWA = WA)
hase = ~np.isnan(data['pl_orbeccen'].values)
maxWA = WA[:]
maxWA[hase] = np.arctan((data['pl_orbsmax'][hase].values*(1 + data['pl_orbeccen'][hase].values)*u.AU)/(data['st_dist'][hase].values*u.pc)).to('mas')
data = data.assign(pl_maxangsep=maxWA.value)
#populate min WA based on eccentricity & inclination data (otherwise minWA = WA)
hasI = ~np.isnan(data['pl_orbincl'].values)
s = data['pl_orbsmax'].values*u.AU
s[hase] *= (1 - data['pl_orbeccen'][hase].values)
s[hasI] *= np.cos(data['pl_orbincl'][hasI].values*u.deg)
s[~hasI] = 0
minWA = np.arctan(s/(data['st_dist'].values*u.pc)).to('mas')
data = data.assign(pl_minangsep=minWA.value)
##fill in radius based on median values generated by forecaster from best mass
#noR = np.isnan(data['pl_radj'].values)
#fcstr = EXOSIMS.PlanetPhysicalModel.Forecaster.Forecaster()
#ms = data['pl_bmassj'][noR].values
#planradii = [np.median(fcstr.calc_radius_from_mass(np.array([v]*1000)*u.M_jupiter).to(u.R_jupiter).value) for v in ms]
#data['pl_radj'][noR] = planradii
#data = data.assign(rad_from_mass=noR)
#forecaster original
#S = np.array([0.2790,0.589,-0.044,0.881]) #orig coeffs
#C0 = np.log10(1.008)
#T = np.array([2.04,((0.414*u.M_jupiter).to(u.M_earth)).value,((0.0800*u.M_sun).to(u.M_earth)).value])
#C = np.hstack((C0, C0 + np.cumsum(-np.diff(S)*np.log10(T))))
#modify neptune and jupiter leg with new transition point at saturn mass and then flat leg past jupiter mass
S = np.array([0.2790,0,0,0,0.881])
C = np.array([np.log10(1.008), 0, 0, 0, 0])
T = np.array([2.04,95.16,(u.M_jupiter).to(u.M_earth),((0.0800*u.M_sun).to(u.M_earth)).value])
Rj = u.R_jupiter.to(u.R_earth)
Rs = 8.522 #saturn radius
S[1] = (np.log10(Rs) - (C[0] + np.log10(T[0])*S[0]))/(np.log10(T[1]) - np.log10(T[0]))
C[1] = np.log10(Rs) - np.log10(T[1])*S[1]
S[2] = (np.log10(Rj) - np.log10(Rs))/(np.log10(T[2]) - np.log10(T[1]))
C[2] = np.log10(Rj) - np.log10(T[2])*S[2]
C[3] = np.log10(Rj)
C[4] = np.log10(Rj) - np.log10(T[3])*S[4]
##forecaster sanity check:
m1 = np.array([1e-3,T[0]])
r1 = 10.**(C[0] + np.log10(m1)*S[0])
m2 = T[0:2]
r2 = 10.**(C[1] + np.log10(m2)*S[1])
m3 = T[1:3]
r3 = 10.**(C[2] + np.log10(m3)*S[2])
m4 = T[2:4]
r4 = 10.**(C[3] + np.log10(m4)*S[3])
m5 = np.array([T[3],1e6])
r5 = 10.**(C[4] + np.log10(m5)*S[4])
#fill in radius based on mass
noR = np.isnan(data['pl_radj'].values)
m = ((data['pl_bmassj'][noR].values*u.M_jupiter).to(u.M_earth)).value
def RfromM(m):
m = np.array(m,ndmin=1)
R = np.zeros(m.shape)
S = np.array([0.2790,0,0,0,0.881])
C = np.array([np.log10(1.008), 0, 0, 0, 0])
T = np.array([2.04,95.16,(u.M_jupiter).to(u.M_earth),((0.0800*u.M_sun).to(u.M_earth)).value])
Rj = u.R_jupiter.to(u.R_earth)
Rs = 8.522 #saturn radius
S[1] = (np.log10(Rs) - (C[0] + np.log10(T[0])*S[0]))/(np.log10(T[1]) - np.log10(T[0]))
C[1] = np.log10(Rs) - np.log10(T[1])*S[1]
S[2] = (np.log10(Rj) - np.log10(Rs))/(np.log10(T[2]) - np.log10(T[1]))
C[2] = np.log10(Rj) - np.log10(T[2])*S[2]
C[3] = np.log10(Rj)
C[4] = np.log10(Rj) - np.log10(T[3])*S[4]
inds = np.digitize(m,np.hstack((0,T,np.inf)))
for j in range(1,inds.max()+1):
R[inds == j] = 10.**(C[j-1] + np.log10(m[inds == j])*S[j-1])
return R
R = RfromM(m)
data['pl_radj'][noR] = ((R*u.R_earth).to(u.R_jupiter)).value
data = data.assign(rad_from_mass=noR)
##fill in effective temperatures
#noteff = np.isnan(data['st_teff'].values)
#bmv = data['st_bmvj'][noteff].values
#nobv = np.isnan(bmv)
#Teff = 4600.0*u.K * (1.0/(0.92*self.BV[sInds] + 1.7) + 1.0/(0.92*self.BV[sInds] + 0.62))
#θeff = 0.5379 + 0.3981(V − I)+4.432e-2(V − I)**2 − 2.693e-2(V − I)**3
#orbit info
from EXOSIMS.util.eccanom import eccanom
from EXOSIMS.util.deltaMag import deltaMag
import EXOSIMS.Prototypes.PlanetPhysicalModel
PPMod = EXOSIMS.Prototypes.PlanetPhysicalModel.PlanetPhysicalModel()
M = np.linspace(0,2*np.pi,100)
#plannames = data['pl_hostname'].values+' '+data['pl_letter'].values
plannames = data['pl_name'].values
orbdata = None
#row = data.iloc[71]
for j in range(len(plannames)):
row = data.iloc[j]
a = row['pl_orbsmax']
e = row['pl_orbeccen']
if np.isnan(e): e = 0.0
I = row['pl_orbincl']*np.pi/180.0
if np.isnan(I): I = np.pi/2.0
w = row['pl_orblper']*np.pi/180.0
if np.isnan(w): w = 0.0
E = eccanom(M, e)
Rp = row['pl_radj']
dist = row['st_dist']
a1 = np.cos(w)
a2 = np.cos(I)*np.sin(w)
a3 = np.sin(I)*np.sin(w)
A = a*np.vstack((a1, a2, a3))
b1 = -np.sqrt(1 - e**2)*np.sin(w)
b2 = np.sqrt(1 - e**2)*np.cos(I)*np.cos(w)
b3 = np.sqrt(1 - e**2)*np.sin(I)*np.cos(w)
B = a*np.vstack((b1, b2, b3))
r1 = np.cos(E) - e
r2 = np.sin(E)
r = (A*r1 + B*r2).T
d = np.linalg.norm(r, axis=1)
s = np.linalg.norm(r[:,0:2], axis=1)
phi = PPMod.calc_Phi(np.arccos(r[:,2]/d)*u.rad)
dMag = deltaMag(0.5, Rp*u.R_jupiter, d*u.AU, phi)
WA = np.arctan((s*u.AU)/(dist*u.pc)).to('mas').value
print(j,plannames[j],WA.min() - minWA[j].value, WA.max() - maxWA[j].value)
out = pandas.DataFrame({'Name': [plannames[j]]*len(M),
'M': M,
'r': d,
's': s,
'phi': phi,
'dMag': dMag,
'WA': WA})
if orbdata is None:
orbdata = out.copy()
else:
orbdata = orbdata.append(out)
#------write to db------------
import sqlalchemy.types
namemxchar = np.array([len(n) for n in plannames]).max()
#testdb
engine = create_engine('mysql+pymysql://[email protected]/dsavrans_plandb',echo=False)
#proddb#################################################################################################
username = 'dsavrans_admin'
passwd = keyring.get_password('plandb_sql_login', username)
if passwd is None:
passwd = getpass.getpass("Password for mysql user %s:\n"%username)
keyring.set_password('plandb_sql_login', username, passwd)
engine = create_engine('mysql+pymysql://'+username+':'+passwd+'@sioslab.com/dsavrans_plandb',echo=False)
#proddb#################################################################################################
data.to_sql('KnownPlanets',engine,chunksize=100,if_exists='replace',
dtype={'pl_name':sqlalchemy.types.String(namemxchar),
'pl_hostname':sqlalchemy.types.String(namemxchar-2),
'pl_letter':sqlalchemy.types.CHAR(1)})
result = engine.execute("ALTER TABLE KnownPlanets ENGINE=InnoDB")
result = engine.execute("ALTER TABLE KnownPlanets ADD INDEX (pl_name)")
result = engine.execute("ALTER TABLE KnownPlanets ADD INDEX (pl_hostname)")
orbdata.to_sql('PlanetOrbits',engine,chunksize=100,if_exists='replace',dtype={'Name':sqlalchemy.types.String(namemxchar)})
result = engine.execute("ALTER TABLE PlanetOrbits ENGINE=InnoDB")
result = engine.execute("ALTER TABLE PlanetOrbits ADD INDEX (Name)")
result = engine.execute("ALTER TABLE PlanetOrbits ADD FOREIGN KEY (Name) REFERENCES KnownPlanets(pl_name) ON DELETE NO ACTION ON UPDATE NO ACTION");
#---------------------------------------------
inds = np.where((data['pl_maxangsep'].values > 150) & (data['pl_minangsep'].values < 450))[0]
WAbins0 = np.arange(100,501,1)
WAbins = np.hstack((0, WAbins0, np.inf))
dMagbins0 = np.arange(0,26.1,0.1)
dMagbins = np.hstack((dMagbins0,np.inf))
WAc,dMagc = np.meshgrid(WAbins0[:-1]+np.diff(WAbins0)/2.0,dMagbins0[:-1]+np.diff(dMagbins0)/2.0)
WAc = WAc.T
dMagc = dMagc.T
WAinds = np.arange(WAbins0.size-1)
dMaginds = np.arange(dMagbins0.size-1)
WAinds,dMaginds = np.meshgrid(WAinds,dMaginds)
WAinds = WAinds.T
dMaginds = dMaginds.T
names = []
WAcs = []
dMagcs = []
iinds = []
jinds = []
hs = []
cs = []
goodinds = []
for j in inds:
row = data.iloc[j]
print row['pl_name']
amu = row['pl_orbsmax']
astd = (row['pl_orbsmaxerr1'] - row['pl_orbsmaxerr2'])/2.
if np.isnan(astd): astd = 0.01*amu
gena = lambda n: np.clip(np.random.randn(n)*astd + amu,0,np.inf)
emu = row['pl_orbeccen']
if np.isnan(emu):
gene = lambda n: 0.175/np.sqrt(np.pi/2.)*np.sqrt(-2.*np.log(1 - np.random.uniform(size=n)))
else:
estd = (row['pl_orbeccenerr1'] - row['pl_orbeccenerr2'])/2.
if np.isnan(estd):
estd = 0.01*e
gene = lambda n: np.clip(np.random.randn(n)*estd + emu,0,0.99)
Imu = row['pl_orbincl']*np.pi/180.0
if np.isnan(Imu):
if row['pl_bmassprov'] == 'Msini':
Icrit = np.arcsin( ((row['pl_bmassj']*u.M_jupiter).to(u.M_earth)).value/((0.0800*u.M_sun).to(u.M_earth)).value )
Irange = [Icrit, np.pi - Icrit]
C = 0.5*(np.cos(Irange[0])-np.cos(Irange[1]))
genI = lambda n: np.arccos(np.cos(Irange[0]) - 2.*C*np.random.uniform(size=n))
else:
genI = lambda n: np.arccos(1 - 2.*np.random.uniform(size=n))
else:
Istd = (row['pl_orbinclerr1'] - row['pl_orbinclerr2'])/2.*np.pi/180.0
if np.isnan(Istd):
Istd = Imu*0.01
genI = lambda n: np.random.randn(n)*Istd + Imu
wbarmu = row['pl_orblper']*np.pi/180.0
if np.isnan(wbarmu):
genwbar = lambda n: np.random.uniform(size=n,low=0.0,high=2*np.pi)
else:
wbarstd = (row['pl_orblpererr1'] - row['pl_orblpererr2'])/2.*np.pi/180.0
if np.isnan(wbarstd):
wbarstd = wbarmu*0.01
genwbar = lambda n: np.random.randn(n)*wbarstd + wbarmu
n = int(1e6)
c = 0.
h = np.zeros((len(WAbins)-3, len(dMagbins)-2))
k = 0.0
cprev = 0.0
pdiff = 1.0
while (pdiff > 0.0001) | (k <3):
for blah in range(100):
print k,pdiff
a = gena(n)
e = gene(n)
I = genI(n)
O = np.random.uniform(size=n,low=0.0,high=2*np.pi)
wbar = genwbar(n)
w = O - wbar
if row['rad_from_mass']:
if row['pl_bmassprov'] == 'Msini':
Mp = ((row['pl_bmassj']*u.M_jupiter).to(u.M_earth)).value
Mp = Mp/np.sin(I)
else:
Mstd = (((row['pl_bmassjerr1'] - row['pl_bmassjerr2'])*u.M_jupiter).to(u.M_earth)).value
if np.isnan(Mstd):
Mstd = ((row['pl_bmassj']*u.M_jupiter).to(u.M_earth)).value * 0.1
Mp = np.random.randn(n)*Mstd + ((row['pl_bmassj']*u.M_jupiter).to(u.M_earth)).value
R = (RfromM(Mp)*u.R_earth).to(u.R_jupiter).value
R[R > 1.0] = 1.0
else:
Rmu = row['pl_radj']
Rstd = (row['pl_radjerr1'] - row['pl_radjerr2'])/2.
if np.isnan(Rstd): Rstd = Rmu*0.1
R = np.random.randn(n)*Rstd + Rmu
M0 = np.random.uniform(size=n,low=0.0,high=2*np.pi)
E = eccanom(M0, e)
a1 = np.cos(O)*np.cos(w) - np.sin(O)*np.cos(I)*np.sin(w)
a2 = np.sin(O)*np.cos(w) + np.cos(O)*np.cos(I)*np.sin(w)
a3 = np.sin(I)*np.sin(w)
A = a*np.vstack((a1, a2, a3))
b1 = -np.sqrt(1 - e**2)*(np.cos(O)*np.sin(w) + np.sin(O)*np.cos(I)*np.cos(w))
b2 = np.sqrt(1 - e**2)*(-np.sin(O)*np.sin(w) + np.cos(O)*np.cos(I)*np.cos(w))
b3 = np.sqrt(1 - e**2)*np.sin(I)*np.cos(w)
B = a*np.vstack((b1, b2, b3))
r1 = np.cos(E) - e
r2 = np.sin(E)
rvec = (A*r1 + B*r2).T
rnorm = np.linalg.norm(rvec, axis=1)
s = np.linalg.norm(rvec[:,0:2], axis=1)
phi = PPMod.calc_Phi(np.arccos(rvec[:,2]/rnorm)*u.rad) # planet phase
dMag = deltaMag(0.5, R*u.R_jupiter, rnorm*u.AU, phi) # delta magnitude
WA = np.arctan((s*u.AU)/(row['st_dist']*u.pc)).to('mas').value # working angle
h += np.histogram2d(WA,dMag,bins=(WAbins,dMagbins))[0][1:-1,0:-1]
k += 1.0
currc = float(len(np.where((WA >= 150) & (WA <= 430) & (dMag <= 22.5))[0]))/n
cprev = c
if k == 1.0:
c = currc
else:
c = ((k-1)*c + currc)/k
if c == 0:
pdiff = 1.0
else:
pdiff = np.abs(c - cprev)/c
if (c == 0.0) & (k > 2):
break
if (c < 1e-5) & (k > 25):
break
if c != 0.0:
h = h/float(n*k)
names.append(np.array([row['pl_name']]*h.size))
WAcs.append(WAc.flatten())
dMagcs.append(dMagc.flatten())
hs.append(h.flatten())
iinds.append(WAinds.flatten())
jinds.append(dMaginds.flatten())
cs.append(c)
goodinds.append(j)
cs = np.array(cs)
goodinds = np.array(goodinds)
result = engine.execute("ALTER TABLE KnownPlanets ADD completeness double COMMENT 'WFIRST completeness'")
result = engine.execute("UPDATE KnownPlanets SET completeness=NULL where completeness is not NULL")
for ind,c in zip(goodinds,cs):
result = engine.execute("UPDATE KnownPlanets SET completeness=%f where pl_name = '%s'"%(c,plannames[ind]))
out2 = pandas.DataFrame({'Name': np.hstack(names),
'alpha': np.hstack(WAcs),
'dMag': np.hstack(dMagcs),
'H': np.hstack(hs),
'iind': np.hstack(iinds),
'jind': np.hstack(jinds)
})
out2 = out2[out2['H'].values != 0.]
out2['H'] = np.log10(out2['H'].values)
out2.to_sql('Completeness',engine,chunksize=100,if_exists='replace',dtype={'Name':sqlalchemy.types.String(namemxchar)})
result = engine.execute("ALTER TABLE Completeness ENGINE=InnoDB")
result = engine.execute("ALTER TABLE Completeness ADD INDEX (Name)")
result = engine.execute("ALTER TABLE Completeness ADD FOREIGN KEY (Name) REFERENCES KnownPlanets(pl_name) ON DELETE NO ACTION ON UPDATE NO ACTION");
| mit |
Jc2k/libcloud | libcloud/compute/drivers/bluebox.py | 1 | 7490 | # Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
libcloud driver for the Blue Box Blocks API
This driver implements all libcloud functionality for the Blue Box Blocks API.
Blue Box home page http://bluebox.net
Blue Box API documentation https://boxpanel.bluebox
.net/public/the_vault/index.php/Blocks_API
"""
import copy
import base64
from libcloud.utils.py3 import urlencode
from libcloud.utils.py3 import b
from libcloud.common.base import JsonResponse, ConnectionUserAndKey
from libcloud.compute.providers import Provider
from libcloud.compute.types import NodeState, InvalidCredsError
from libcloud.compute.base import Node, NodeDriver
from libcloud.compute.base import NodeSize, NodeImage, NodeLocation
from libcloud.compute.base import NodeAuthPassword, NodeAuthSSHKey
# Current end point for Blue Box API.
BLUEBOX_API_HOST = "boxpanel.bluebox.net"
# The API doesn't currently expose all of the required values for libcloud,
# so we simply list what's available right now, along with all of the various
# attributes that are needed by libcloud.
BLUEBOX_INSTANCE_TYPES = {
'1gb': {
'id': '94fd37a7-2606-47f7-84d5-9000deda52ae',
'name': 'Block 1GB Virtual Server',
'ram': 1024,
'disk': 20,
'cpu': 0.5
},
'2gb': {
'id': 'b412f354-5056-4bf0-a42f-6ddd998aa092',
'name': 'Block 2GB Virtual Server',
'ram': 2048,
'disk': 25,
'cpu': 1
},
'4gb': {
'id': '0cd183d3-0287-4b1a-8288-b3ea8302ed58',
'name': 'Block 4GB Virtual Server',
'ram': 4096,
'disk': 50,
'cpu': 2
},
'8gb': {
'id': 'b9b87a5b-2885-4a2e-b434-44a163ca6251',
'name': 'Block 8GB Virtual Server',
'ram': 8192,
'disk': 100,
'cpu': 4
}
}
RAM_PER_CPU = 2048
NODE_STATE_MAP = {'queued': NodeState.PENDING,
'building': NodeState.PENDING,
'running': NodeState.RUNNING,
'error': NodeState.TERMINATED,
'unknown': NodeState.UNKNOWN}
class BlueboxResponse(JsonResponse):
def parse_error(self):
if int(self.status) == 401:
if not self.body:
raise InvalidCredsError(str(self.status) + ': ' + self.error)
else:
raise InvalidCredsError(self.body)
return self.body
class BlueboxNodeSize(NodeSize):
def __init__(self, id, name, cpu, ram, disk, price, driver):
self.id = id
self.name = name
self.cpu = cpu
self.ram = ram
self.disk = disk
self.price = price
self.driver = driver
def __repr__(self):
return ((
'<NodeSize: id=%s, name=%s, cpu=%s, ram=%s, disk=%s, '
'price=%s, driver=%s ...>')
% (self.id, self.name, self.cpu, self.ram, self.disk,
self.price, self.driver.name))
class BlueboxConnection(ConnectionUserAndKey):
"""
Connection class for the Bluebox driver
"""
host = BLUEBOX_API_HOST
secure = True
responseCls = BlueboxResponse
def add_default_headers(self, headers):
user_b64 = base64.b64encode(b('%s:%s' % (self.user_id, self.key)))
headers['Authorization'] = 'Basic %s' % (user_b64)
return headers
class BlueboxNodeDriver(NodeDriver):
"""
Bluebox Blocks node driver
"""
connectionCls = BlueboxConnection
type = Provider.BLUEBOX
api_name = 'bluebox'
name = 'Bluebox Blocks'
website = 'http://bluebox.net'
features = {'create_node': ['ssh_key', 'password']}
def list_nodes(self):
result = self.connection.request('/api/blocks.json')
return [self._to_node(i) for i in result.object]
def list_sizes(self, location=None):
sizes = []
for key, values in list(BLUEBOX_INSTANCE_TYPES.items()):
attributes = copy.deepcopy(values)
attributes.update({'price': self._get_size_price(size_id=key)})
sizes.append(BlueboxNodeSize(driver=self.connection.driver,
**attributes))
return sizes
def list_images(self, location=None):
result = self.connection.request('/api/block_templates.json')
images = []
for image in result.object:
images.extend([self._to_image(image)])
return images
def create_node(self, **kwargs):
headers = {'Content-Type': 'application/x-www-form-urlencoded'}
size = kwargs["size"]
name = kwargs['name']
image = kwargs['image']
size = kwargs['size']
auth = self._get_and_check_auth(kwargs.get('auth'))
data = {
'hostname': name,
'product': size.id,
'template': image.id
}
ssh = None
password = None
if isinstance(auth, NodeAuthSSHKey):
ssh = auth.pubkey
data.update(ssh_public_key=ssh)
elif isinstance(auth, NodeAuthPassword):
password = auth.password
data.update(password=password)
if "ex_username" in kwargs:
data.update(username=kwargs["ex_username"])
if not ssh and not password:
raise Exception("SSH public key or password required.")
params = urlencode(data)
result = self.connection.request('/api/blocks.json', headers=headers,
data=params, method='POST')
node = self._to_node(result.object)
if getattr(auth, "generated", False):
node.extra['password'] = auth.password
return node
def destroy_node(self, node):
url = '/api/blocks/%s.json' % (node.id)
result = self.connection.request(url, method='DELETE')
return result.status == 200
def list_locations(self):
return [NodeLocation(0, "Blue Box Seattle US", 'US', self)]
def reboot_node(self, node):
url = '/api/blocks/%s/reboot.json' % (node.id)
result = self.connection.request(url, method="PUT")
return result.status == 200
def _to_node(self, vm):
state = NODE_STATE_MAP[vm.get('status', NodeState.UNKNOWN)]
n = Node(id=vm['id'],
name=vm['hostname'],
state=state,
public_ips=[ip['address'] for ip in vm['ips']],
private_ips=[],
extra={'storage': vm['storage'], 'cpu': vm['cpu']},
driver=self.connection.driver)
return n
def _to_image(self, image):
image = NodeImage(id=image['id'],
name=image['description'],
driver=self.connection.driver)
return image
| apache-2.0 |
jcpowermac/ansible | lib/ansible/modules/network/radware/vdirect_runnable.py | 22 | 13185 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright 2017 Radware LTD.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'metadata_version': '1.1'}
DOCUMENTATION = '''
module: vdirect_runnable
author: Evgeny Fedoruk @ Radware LTD (@evgenyfedoruk)
short_description: Runs templates and workflow actions in Radware vDirect server
description:
- Runs configuration templates, creates workflows and runs workflow actions in Radware vDirect server.
notes:
- Requires the Radware vdirect-client Python package on the host. This is as easy as
C(pip install vdirect-client)
version_added: "2.5"
options:
vdirect_ip:
description:
- Primary vDirect server IP address, may be set as C(VDIRECT_IP) environment variable.
required: true
vdirect_user:
description:
- vDirect server username, may be set as C(VDIRECT_USER) environment variable.
required: true
default: None
vdirect_password:
description:
- vDirect server password, may be set as C(VDIRECT_PASSWORD) environment variable.
required: true
default: None
vdirect_secondary_ip:
description:
- Secondary vDirect server IP address, may be set as C(VDIRECT_SECONDARY_IP) environment variable.
required: false
default: None
vdirect_wait:
description:
- Wait for async operation to complete, may be set as C(VDIRECT_WAIT) environment variable.
required: false
type: bool
default: 'yes'
vdirect_https_port:
description:
- vDirect server HTTPS port number, may be set as C(VDIRECT_HTTPS_PORT) environment variable.
required: false
default: 2189
vdirect_http_port:
description:
- vDirect server HTTP port number, may be set as C(VDIRECT_HTTP_PORT) environment variable.
required: false
default: 2188
vdirect_timeout:
description:
- Amount of time to wait for async operation completion [seconds],
- may be set as C(VDIRECT_TIMEOUT) environment variable.
required: false
default: 60
vdirect_use_ssl:
description:
- If C(no), an HTTP connection will be used instead of the default HTTPS connection,
- may be set as C(VDIRECT_HTTPS) or C(VDIRECT_USE_SSL) environment variable.
required: false
type: bool
default: 'yes'
vdirect_validate_certs:
description:
- If C(no), SSL certificates will not be validated,
- may be set as C(VDIRECT_VALIDATE_CERTS) or C(VDIRECT_VERIFY) environment variable.
- This should only set to C(no) used on personally controlled sites using self-signed certificates.
required: false
type: bool
default: 'yes'
runnable_type:
description:
- vDirect runnable type.
- May be ConfigurationTemplate, WorkflowTemplate or a Workflow.
required: true
runnable_name:
description:
- vDirect runnable name to run.
- May be configuration template name, workflow template name or workflow instance name.
required: true
action_name:
description:
- Workflow action name to run.
- Required if I(runnable_type=Workflow).
required: false
parameters:
description:
- Action parameters dictionary. In case of ConfigurationTemplate runnable type,
- the device connection details should always be passed as a parameter.
required: false
requirements:
- "vdirect-client >= 4.1.1"
'''
EXAMPLES = '''
- name: vdirect_runnable
vdirect_runnable:
vdirect_ip: 10.10.10.10
vdirect_user: vDirect
vdirect_password: radware
runnable_type: ConfigurationTemplate
runnable_name: get_vlans
parameters: {'vlans_needed':1,'adc':[{'type':'Adc','name':'adc-1'}]}
'''
RETURN = '''
result:
description: Message detailing run result
returned: success
type: string
sample: "Workflow action run completed."
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.basic import env_fallback
try:
from vdirect_client import rest_client
HAS_REST_CLIENT = True
except ImportError:
HAS_REST_CLIENT = False
CONFIGURATION_TEMPLATE_RUNNABLE_TYPE = 'ConfigurationTemplate'
WORKFLOW_TEMPLATE_RUNNABLE_TYPE = 'WorkflowTemplate'
WORKFLOW_RUNNABLE_TYPE = 'Workflow'
TEMPLATE_SUCCESS = 'Configuration template run completed.'
WORKFLOW_CREATION_SUCCESS = 'Workflow created.'
WORKFLOW_ACTION_SUCCESS = 'Workflow action run completed.'
meta_args = dict(
vdirect_ip=dict(
required=True, fallback=(env_fallback, ['VDIRECT_IP']),
default=None),
vdirect_user=dict(
required=True, fallback=(env_fallback, ['VDIRECT_USER']),
default=None),
vdirect_password=dict(
required=True, fallback=(env_fallback, ['VDIRECT_PASSWORD']),
default=None, no_log=True, type='str'),
vdirect_secondary_ip=dict(
required=False, fallback=(env_fallback, ['VDIRECT_SECONDARY_IP']),
default=None),
vdirect_use_ssl=dict(
required=False, fallback=(env_fallback, ['VDIRECT_HTTPS', 'VDIRECT_USE_SSL']),
default=True, type='bool'),
vdirect_wait=dict(
required=False, fallback=(env_fallback, ['VDIRECT_WAIT']),
default=True, type='bool'),
vdirect_timeout=dict(
required=False, fallback=(env_fallback, ['VDIRECT_TIMEOUT']),
default=60, type='int'),
vdirect_validate_certs=dict(
required=False, fallback=(env_fallback, ['VDIRECT_VERIFY', 'VDIRECT_VALIDATE_CERTS']),
default=True, type='bool'),
vdirect_https_port=dict(
required=False, fallback=(env_fallback, ['VDIRECT_HTTPS_PORT']),
default=2189, type='int'),
vdirect_http_port=dict(
required=False, fallback=(env_fallback, ['VDIRECT_HTTP_PORT']),
default=2188, type='int'),
runnable_type=dict(
required=True,
choices=[CONFIGURATION_TEMPLATE_RUNNABLE_TYPE, WORKFLOW_TEMPLATE_RUNNABLE_TYPE, WORKFLOW_RUNNABLE_TYPE]),
runnable_name=dict(required=True, default=None),
action_name=dict(required=False, default=None),
parameters=dict(required=False, type='dict', default={})
)
class RunnableException(Exception):
def __init__(self, reason, details):
self.reason = reason
self.details = details
def __str__(self):
return 'Reason: {0}. Details:{1}.'.format(self.reason, self.details)
class WrongActionNameException(RunnableException):
def __init__(self, action, available_actions):
super(WrongActionNameException, self).__init__('Wrong action name ' + repr(action),
'Available actions are: ' + repr(available_actions))
class MissingActionParametersException(RunnableException):
def __init__(self, required_parameters):
super(MissingActionParametersException, self).__init__(
'Action parameters missing',
'Required parameters are: ' + repr(required_parameters))
class MissingRunnableException(RunnableException):
def __init__(self, name):
super(MissingRunnableException, self).__init__(
'Runnable missing',
'Runnable ' + name + ' is missing')
class VdirectRunnable(object):
CREATE_WORKFLOW_ACTION = 'createWorkflow'
RUN_ACTION = 'run'
def __init__(self, params):
self.client = rest_client.RestClient(params['vdirect_ip'],
params['vdirect_user'],
params['vdirect_password'],
wait=params['vdirect_wait'],
secondary_vdirect_ip=params['vdirect_secondary_ip'],
https_port=params['vdirect_https_port'],
http_port=params['vdirect_http_port'],
timeout=params['vdirect_timeout'],
https=params['vdirect_use_ssl'],
verify=params['vdirect_validate_certs'])
self.params = params
self.type = self.params['runnable_type']
self.name = self.params['runnable_name']
if 'parameters' in self.params:
self.action_params = self.params['parameters']
else:
self.action_params = []
def _validate_runnable_exists(self):
res = self.client.runnable.get_runnable_objects(self.type)
runnable_names = res[rest_client.RESP_DATA]['names']
if self.name not in runnable_names:
raise MissingRunnableException(self.name)
def _validate_action_name(self):
if self.type == WORKFLOW_TEMPLATE_RUNNABLE_TYPE:
self.action_name = VdirectRunnable.CREATE_WORKFLOW_ACTION
elif self.type == CONFIGURATION_TEMPLATE_RUNNABLE_TYPE:
self.action_name = VdirectRunnable.RUN_ACTION
else:
self.action_name = self.params['action_name']
res = self.client.runnable.get_available_actions(self.type, self.name)
available_actions = res[rest_client.RESP_DATA]['names']
if self.action_name not in available_actions:
raise WrongActionNameException(self.action_name, available_actions)
def _validate_required_action_params(self):
action_params_names = [n for n in self.action_params]
res = self.client.runnable.get_action_info(self.type, self.name, self.action_name)
if 'parameters' in res[rest_client.RESP_DATA]:
action_params_spec = res[rest_client.RESP_DATA]['parameters']
else:
action_params_spec = []
required_action_params_dict = [{'name': p['name'], 'type': p['type']} for p in action_params_spec
if p['type'] == 'alteon' or
p['type'] == 'defensePro' or
p['type'] == 'appWall' or
p['direction'] != 'out']
required_action_params_names = [n['name'] for n in required_action_params_dict]
if set(required_action_params_names) & set(action_params_names) != set(required_action_params_names):
raise MissingActionParametersException(required_action_params_dict)
def run(self):
self._validate_runnable_exists()
self._validate_action_name()
self._validate_required_action_params()
data = self.action_params
result = self.client.runnable.run(data, self.type, self.name, self.action_name)
result_to_return = {'msg': ''}
if result[rest_client.RESP_STATUS] == 200:
if result[rest_client.RESP_DATA]['success']:
if self.type == WORKFLOW_TEMPLATE_RUNNABLE_TYPE:
result_to_return['msg'] = WORKFLOW_CREATION_SUCCESS
elif self.type == CONFIGURATION_TEMPLATE_RUNNABLE_TYPE:
result_to_return['msg'] = TEMPLATE_SUCCESS
else:
result_to_return['msg'] = WORKFLOW_ACTION_SUCCESS
if 'parameters' in result[rest_client.RESP_DATA]:
result_to_return['parameters'] = result[rest_client.RESP_DATA]['parameters']
else:
if 'exception' in result[rest_client.RESP_DATA]:
raise RunnableException(result[rest_client.RESP_DATA]['exception']['message'],
result[rest_client.RESP_STR])
else:
raise RunnableException('The status returned ' + str(result[rest_client.RESP_DATA]['status']),
result[rest_client.RESP_STR])
else:
raise RunnableException(result[rest_client.RESP_REASON],
result[rest_client.RESP_STR])
return result_to_return
def main():
if not HAS_REST_CLIENT:
raise ImportError("The python vdirect-client module is required")
module = AnsibleModule(argument_spec=meta_args,
required_if=[['runnable_type', WORKFLOW_RUNNABLE_TYPE, ['action_name']]])
try:
vdirect_runnable = VdirectRunnable(module.params)
result = vdirect_runnable.run()
result = dict(result=result)
module.exit_json(**result)
except Exception as e:
module.fail_json(msg=str(e))
if __name__ == '__main__':
main()
| gpl-3.0 |
CLVsol/odoo_addons | clv_insured_mng/clv_tag/clv_tag.py | 1 | 2057 | # -*- encoding: utf-8 -*-
################################################################################
# #
# Copyright (C) 2013-Today Carlos Eduardo Vercelino - CLVsol #
# #
# This program is free software: you can redistribute it and/or modify #
# it under the terms of the GNU Affero General Public License as published by #
# the Free Software Foundation, either version 3 of the License, or #
# (at your option) any later version. #
# #
# This program is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
# GNU Affero General Public License for more details. #
# #
# You should have received a copy of the GNU Affero General Public License #
# along with this program. If not, see <http://www.gnu.org/licenses/>. #
################################################################################
from openerp import models, fields
class clv_insured_mng(models.Model):
_inherit = 'clv_insured_mng'
tag_ids = fields.Many2many('clv_tag',
'clv_insured_mng_tag_rel',
'insured_id',
'tag_id',
'Tags')
class clv_tag(models.Model):
_inherit = 'clv_tag'
insured_ids = fields.Many2many('clv_insured_mng',
'clv_insured_mng_tag_rel',
'tag_id',
'insured_id',
'Insureds')
| agpl-3.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.