repo_name
stringlengths 5
100
| path
stringlengths 4
375
| copies
stringclasses 991
values | size
stringlengths 4
7
| content
stringlengths 666
1M
| license
stringclasses 15
values |
---|---|---|---|---|---|
x303597316/hue | apps/beeswax/src/beeswax/data_export.py | 25 | 2985 | #!/usr/bin/env python
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import time
from django.utils.translation import ugettext as _
from desktop.lib import export_csvxls
from beeswax import common, conf
LOG = logging.getLogger(__name__)
_DATA_WAIT_SLEEP = 0.1 # Sleep 0.1 sec before checking for data availability
FETCH_SIZE = 1000
def download(handle, format, db):
"""
download(query_model, format) -> HttpResponse
Retrieve the query result in the format specified. Return an HttpResponse object.
"""
if format not in common.DL_FORMATS:
LOG.error('Unknown download format "%s"' % (format,))
return
content_generator = HS2DataAdapter(handle, db, conf.DOWNLOAD_ROW_LIMIT.get())
generator = export_csvxls.create_generator(content_generator, format)
return export_csvxls.make_response(generator, format, 'query_result')
def upload(path, handle, user, db, fs):
"""
upload(query_model, path, user, db, fs) -> None
Retrieve the query result in the format specified and upload to hdfs.
"""
if fs.do_as_user(user.username, fs.exists, path):
raise Exception(_("%s already exists.") % path)
else:
fs.do_as_user(user.username, fs.create, path)
content_generator = HS2DataAdapter(handle, db, -1, start_over=True)
for header, data in content_generator:
dataset = export_csvxls.dataset(None, data)
fs.do_as_user(user.username, fs.append, path, dataset.csv)
def HS2DataAdapter(handle, db, max_rows=0, start_over=True):
"""
HS2DataAdapter(query_model, db) -> headers, 2D array of data.
"""
results = db.fetch(handle, start_over=start_over, rows=FETCH_SIZE)
while not results.ready:
time.sleep(_DATA_WAIT_SLEEP)
results = db.fetch(handle, start_over=start_over, rows=FETCH_SIZE)
headers = results.cols()
num_rows_seen = 0
limit_rows = max_rows > -1
while results is not None:
data = []
for row in results.rows():
num_rows_seen += 1
if limit_rows and num_rows_seen > max_rows:
break
data.append(row)
yield headers, data
if limit_rows and num_rows_seen > max_rows:
break
if results.has_more:
results = db.fetch(handle, start_over=False, rows=FETCH_SIZE)
else:
results = None
| apache-2.0 |
molebot/brython | www/src/Lib/test/test_shutil.py | 23 | 67715 | # Copyright (C) 2003 Python Software Foundation
import unittest
import shutil
import tempfile
import sys
import stat
import os
import os.path
import errno
import functools
import subprocess
from test import support
from test.support import TESTFN
from os.path import splitdrive
from distutils.spawn import find_executable, spawn
from shutil import (_make_tarball, _make_zipfile, make_archive,
register_archive_format, unregister_archive_format,
get_archive_formats, Error, unpack_archive,
register_unpack_format, RegistryError,
unregister_unpack_format, get_unpack_formats)
import tarfile
import warnings
from test import support
from test.support import TESTFN, check_warnings, captured_stdout, requires_zlib
try:
import bz2
BZ2_SUPPORTED = True
except ImportError:
BZ2_SUPPORTED = False
TESTFN2 = TESTFN + "2"
try:
import grp
import pwd
UID_GID_SUPPORT = True
except ImportError:
UID_GID_SUPPORT = False
try:
import zipfile
ZIP_SUPPORT = True
except ImportError:
ZIP_SUPPORT = find_executable('zip')
def _fake_rename(*args, **kwargs):
# Pretend the destination path is on a different filesystem.
raise OSError(getattr(errno, 'EXDEV', 18), "Invalid cross-device link")
def mock_rename(func):
@functools.wraps(func)
def wrap(*args, **kwargs):
try:
builtin_rename = os.rename
os.rename = _fake_rename
return func(*args, **kwargs)
finally:
os.rename = builtin_rename
return wrap
def write_file(path, content, binary=False):
"""Write *content* to a file located at *path*.
If *path* is a tuple instead of a string, os.path.join will be used to
make a path. If *binary* is true, the file will be opened in binary
mode.
"""
if isinstance(path, tuple):
path = os.path.join(*path)
with open(path, 'wb' if binary else 'w') as fp:
fp.write(content)
def read_file(path, binary=False):
"""Return contents from a file located at *path*.
If *path* is a tuple instead of a string, os.path.join will be used to
make a path. If *binary* is true, the file will be opened in binary
mode.
"""
if isinstance(path, tuple):
path = os.path.join(*path)
with open(path, 'rb' if binary else 'r') as fp:
return fp.read()
class TestShutil(unittest.TestCase):
def setUp(self):
super(TestShutil, self).setUp()
self.tempdirs = []
def tearDown(self):
super(TestShutil, self).tearDown()
while self.tempdirs:
d = self.tempdirs.pop()
shutil.rmtree(d, os.name in ('nt', 'cygwin'))
def mkdtemp(self):
"""Create a temporary directory that will be cleaned up.
Returns the path of the directory.
"""
d = tempfile.mkdtemp()
self.tempdirs.append(d)
return d
def test_rmtree_works_on_bytes(self):
tmp = self.mkdtemp()
victim = os.path.join(tmp, 'killme')
os.mkdir(victim)
write_file(os.path.join(victim, 'somefile'), 'foo')
victim = os.fsencode(victim)
self.assertIsInstance(victim, bytes)
shutil.rmtree(victim)
@support.skip_unless_symlink
def test_rmtree_fails_on_symlink(self):
tmp = self.mkdtemp()
dir_ = os.path.join(tmp, 'dir')
os.mkdir(dir_)
link = os.path.join(tmp, 'link')
os.symlink(dir_, link)
self.assertRaises(OSError, shutil.rmtree, link)
self.assertTrue(os.path.exists(dir_))
self.assertTrue(os.path.lexists(link))
errors = []
def onerror(*args):
errors.append(args)
shutil.rmtree(link, onerror=onerror)
self.assertEqual(len(errors), 1)
self.assertIs(errors[0][0], os.path.islink)
self.assertEqual(errors[0][1], link)
self.assertIsInstance(errors[0][2][1], OSError)
@support.skip_unless_symlink
def test_rmtree_works_on_symlinks(self):
tmp = self.mkdtemp()
dir1 = os.path.join(tmp, 'dir1')
dir2 = os.path.join(dir1, 'dir2')
dir3 = os.path.join(tmp, 'dir3')
for d in dir1, dir2, dir3:
os.mkdir(d)
file1 = os.path.join(tmp, 'file1')
write_file(file1, 'foo')
link1 = os.path.join(dir1, 'link1')
os.symlink(dir2, link1)
link2 = os.path.join(dir1, 'link2')
os.symlink(dir3, link2)
link3 = os.path.join(dir1, 'link3')
os.symlink(file1, link3)
# make sure symlinks are removed but not followed
shutil.rmtree(dir1)
self.assertFalse(os.path.exists(dir1))
self.assertTrue(os.path.exists(dir3))
self.assertTrue(os.path.exists(file1))
def test_rmtree_errors(self):
# filename is guaranteed not to exist
filename = tempfile.mktemp()
self.assertRaises(FileNotFoundError, shutil.rmtree, filename)
# test that ignore_errors option is honored
shutil.rmtree(filename, ignore_errors=True)
# existing file
tmpdir = self.mkdtemp()
write_file((tmpdir, "tstfile"), "")
filename = os.path.join(tmpdir, "tstfile")
with self.assertRaises(NotADirectoryError) as cm:
shutil.rmtree(filename)
# The reason for this rather odd construct is that Windows sprinkles
# a \*.* at the end of file names. But only sometimes on some buildbots
possible_args = [filename, os.path.join(filename, '*.*')]
self.assertIn(cm.exception.filename, possible_args)
self.assertTrue(os.path.exists(filename))
# test that ignore_errors option is honored
shutil.rmtree(filename, ignore_errors=True)
self.assertTrue(os.path.exists(filename))
errors = []
def onerror(*args):
errors.append(args)
shutil.rmtree(filename, onerror=onerror)
self.assertEqual(len(errors), 2)
self.assertIs(errors[0][0], os.listdir)
self.assertEqual(errors[0][1], filename)
self.assertIsInstance(errors[0][2][1], NotADirectoryError)
self.assertIn(errors[0][2][1].filename, possible_args)
self.assertIs(errors[1][0], os.rmdir)
self.assertEqual(errors[1][1], filename)
self.assertIsInstance(errors[1][2][1], NotADirectoryError)
self.assertIn(errors[1][2][1].filename, possible_args)
# See bug #1071513 for why we don't run this on cygwin
# and bug #1076467 for why we don't run this as root.
if (hasattr(os, 'chmod') and sys.platform[:6] != 'cygwin'
and not (hasattr(os, 'geteuid') and os.geteuid() == 0)):
def test_on_error(self):
self.errorState = 0
os.mkdir(TESTFN)
self.addCleanup(shutil.rmtree, TESTFN)
self.child_file_path = os.path.join(TESTFN, 'a')
self.child_dir_path = os.path.join(TESTFN, 'b')
support.create_empty_file(self.child_file_path)
os.mkdir(self.child_dir_path)
old_dir_mode = os.stat(TESTFN).st_mode
old_child_file_mode = os.stat(self.child_file_path).st_mode
old_child_dir_mode = os.stat(self.child_dir_path).st_mode
# Make unwritable.
new_mode = stat.S_IREAD|stat.S_IEXEC
os.chmod(self.child_file_path, new_mode)
os.chmod(self.child_dir_path, new_mode)
os.chmod(TESTFN, new_mode)
self.addCleanup(os.chmod, TESTFN, old_dir_mode)
self.addCleanup(os.chmod, self.child_file_path, old_child_file_mode)
self.addCleanup(os.chmod, self.child_dir_path, old_child_dir_mode)
shutil.rmtree(TESTFN, onerror=self.check_args_to_onerror)
# Test whether onerror has actually been called.
self.assertEqual(self.errorState, 3,
"Expected call to onerror function did not "
"happen.")
def check_args_to_onerror(self, func, arg, exc):
# test_rmtree_errors deliberately runs rmtree
# on a directory that is chmod 500, which will fail.
# This function is run when shutil.rmtree fails.
# 99.9% of the time it initially fails to remove
# a file in the directory, so the first time through
# func is os.remove.
# However, some Linux machines running ZFS on
# FUSE experienced a failure earlier in the process
# at os.listdir. The first failure may legally
# be either.
if self.errorState < 2:
if func is os.unlink:
self.assertEqual(arg, self.child_file_path)
elif func is os.rmdir:
self.assertEqual(arg, self.child_dir_path)
else:
self.assertIs(func, os.listdir)
self.assertIn(arg, [TESTFN, self.child_dir_path])
self.assertTrue(issubclass(exc[0], OSError))
self.errorState += 1
else:
self.assertEqual(func, os.rmdir)
self.assertEqual(arg, TESTFN)
self.assertTrue(issubclass(exc[0], OSError))
self.errorState = 3
def test_rmtree_does_not_choke_on_failing_lstat(self):
try:
orig_lstat = os.lstat
def raiser(fn, *args, **kwargs):
if fn != TESTFN:
raise OSError()
else:
return orig_lstat(fn)
os.lstat = raiser
os.mkdir(TESTFN)
write_file((TESTFN, 'foo'), 'foo')
shutil.rmtree(TESTFN)
finally:
os.lstat = orig_lstat
@unittest.skipUnless(hasattr(os, 'chmod'), 'requires os.chmod')
@support.skip_unless_symlink
def test_copymode_follow_symlinks(self):
tmp_dir = self.mkdtemp()
src = os.path.join(tmp_dir, 'foo')
dst = os.path.join(tmp_dir, 'bar')
src_link = os.path.join(tmp_dir, 'baz')
dst_link = os.path.join(tmp_dir, 'quux')
write_file(src, 'foo')
write_file(dst, 'foo')
os.symlink(src, src_link)
os.symlink(dst, dst_link)
os.chmod(src, stat.S_IRWXU|stat.S_IRWXG)
# file to file
os.chmod(dst, stat.S_IRWXO)
self.assertNotEqual(os.stat(src).st_mode, os.stat(dst).st_mode)
shutil.copymode(src, dst)
self.assertEqual(os.stat(src).st_mode, os.stat(dst).st_mode)
# follow src link
os.chmod(dst, stat.S_IRWXO)
shutil.copymode(src_link, dst)
self.assertEqual(os.stat(src).st_mode, os.stat(dst).st_mode)
# follow dst link
os.chmod(dst, stat.S_IRWXO)
shutil.copymode(src, dst_link)
self.assertEqual(os.stat(src).st_mode, os.stat(dst).st_mode)
# follow both links
os.chmod(dst, stat.S_IRWXO)
shutil.copymode(src_link, dst)
self.assertEqual(os.stat(src).st_mode, os.stat(dst).st_mode)
@unittest.skipUnless(hasattr(os, 'lchmod'), 'requires os.lchmod')
@support.skip_unless_symlink
def test_copymode_symlink_to_symlink(self):
tmp_dir = self.mkdtemp()
src = os.path.join(tmp_dir, 'foo')
dst = os.path.join(tmp_dir, 'bar')
src_link = os.path.join(tmp_dir, 'baz')
dst_link = os.path.join(tmp_dir, 'quux')
write_file(src, 'foo')
write_file(dst, 'foo')
os.symlink(src, src_link)
os.symlink(dst, dst_link)
os.chmod(src, stat.S_IRWXU|stat.S_IRWXG)
os.chmod(dst, stat.S_IRWXU)
os.lchmod(src_link, stat.S_IRWXO|stat.S_IRWXG)
# link to link
os.lchmod(dst_link, stat.S_IRWXO)
shutil.copymode(src_link, dst_link, follow_symlinks=False)
self.assertEqual(os.lstat(src_link).st_mode,
os.lstat(dst_link).st_mode)
self.assertNotEqual(os.stat(src).st_mode, os.stat(dst).st_mode)
# src link - use chmod
os.lchmod(dst_link, stat.S_IRWXO)
shutil.copymode(src_link, dst, follow_symlinks=False)
self.assertEqual(os.stat(src).st_mode, os.stat(dst).st_mode)
# dst link - use chmod
os.lchmod(dst_link, stat.S_IRWXO)
shutil.copymode(src, dst_link, follow_symlinks=False)
self.assertEqual(os.stat(src).st_mode, os.stat(dst).st_mode)
@unittest.skipIf(hasattr(os, 'lchmod'), 'requires os.lchmod to be missing')
@support.skip_unless_symlink
def test_copymode_symlink_to_symlink_wo_lchmod(self):
tmp_dir = self.mkdtemp()
src = os.path.join(tmp_dir, 'foo')
dst = os.path.join(tmp_dir, 'bar')
src_link = os.path.join(tmp_dir, 'baz')
dst_link = os.path.join(tmp_dir, 'quux')
write_file(src, 'foo')
write_file(dst, 'foo')
os.symlink(src, src_link)
os.symlink(dst, dst_link)
shutil.copymode(src_link, dst_link, follow_symlinks=False) # silent fail
@support.skip_unless_symlink
def test_copystat_symlinks(self):
tmp_dir = self.mkdtemp()
src = os.path.join(tmp_dir, 'foo')
dst = os.path.join(tmp_dir, 'bar')
src_link = os.path.join(tmp_dir, 'baz')
dst_link = os.path.join(tmp_dir, 'qux')
write_file(src, 'foo')
src_stat = os.stat(src)
os.utime(src, (src_stat.st_atime,
src_stat.st_mtime - 42.0)) # ensure different mtimes
write_file(dst, 'bar')
self.assertNotEqual(os.stat(src).st_mtime, os.stat(dst).st_mtime)
os.symlink(src, src_link)
os.symlink(dst, dst_link)
if hasattr(os, 'lchmod'):
os.lchmod(src_link, stat.S_IRWXO)
if hasattr(os, 'lchflags') and hasattr(stat, 'UF_NODUMP'):
os.lchflags(src_link, stat.UF_NODUMP)
src_link_stat = os.lstat(src_link)
# follow
if hasattr(os, 'lchmod'):
shutil.copystat(src_link, dst_link, follow_symlinks=True)
self.assertNotEqual(src_link_stat.st_mode, os.stat(dst).st_mode)
# don't follow
shutil.copystat(src_link, dst_link, follow_symlinks=False)
dst_link_stat = os.lstat(dst_link)
if os.utime in os.supports_follow_symlinks:
for attr in 'st_atime', 'st_mtime':
# The modification times may be truncated in the new file.
self.assertLessEqual(getattr(src_link_stat, attr),
getattr(dst_link_stat, attr) + 1)
if hasattr(os, 'lchmod'):
self.assertEqual(src_link_stat.st_mode, dst_link_stat.st_mode)
if hasattr(os, 'lchflags') and hasattr(src_link_stat, 'st_flags'):
self.assertEqual(src_link_stat.st_flags, dst_link_stat.st_flags)
# tell to follow but dst is not a link
shutil.copystat(src_link, dst, follow_symlinks=False)
self.assertTrue(abs(os.stat(src).st_mtime - os.stat(dst).st_mtime) <
00000.1)
@unittest.skipUnless(hasattr(os, 'chflags') and
hasattr(errno, 'EOPNOTSUPP') and
hasattr(errno, 'ENOTSUP'),
"requires os.chflags, EOPNOTSUPP & ENOTSUP")
def test_copystat_handles_harmless_chflags_errors(self):
tmpdir = self.mkdtemp()
file1 = os.path.join(tmpdir, 'file1')
file2 = os.path.join(tmpdir, 'file2')
write_file(file1, 'xxx')
write_file(file2, 'xxx')
def make_chflags_raiser(err):
ex = OSError()
def _chflags_raiser(path, flags, *, follow_symlinks=True):
ex.errno = err
raise ex
return _chflags_raiser
old_chflags = os.chflags
try:
for err in errno.EOPNOTSUPP, errno.ENOTSUP:
os.chflags = make_chflags_raiser(err)
shutil.copystat(file1, file2)
# assert others errors break it
os.chflags = make_chflags_raiser(errno.EOPNOTSUPP + errno.ENOTSUP)
self.assertRaises(OSError, shutil.copystat, file1, file2)
finally:
os.chflags = old_chflags
@support.skip_unless_xattr
def test_copyxattr(self):
tmp_dir = self.mkdtemp()
src = os.path.join(tmp_dir, 'foo')
write_file(src, 'foo')
dst = os.path.join(tmp_dir, 'bar')
write_file(dst, 'bar')
# no xattr == no problem
shutil._copyxattr(src, dst)
# common case
os.setxattr(src, 'user.foo', b'42')
os.setxattr(src, 'user.bar', b'43')
shutil._copyxattr(src, dst)
self.assertEqual(os.listxattr(src), os.listxattr(dst))
self.assertEqual(
os.getxattr(src, 'user.foo'),
os.getxattr(dst, 'user.foo'))
# check errors don't affect other attrs
os.remove(dst)
write_file(dst, 'bar')
os_error = OSError(errno.EPERM, 'EPERM')
def _raise_on_user_foo(fname, attr, val, **kwargs):
if attr == 'user.foo':
raise os_error
else:
orig_setxattr(fname, attr, val, **kwargs)
try:
orig_setxattr = os.setxattr
os.setxattr = _raise_on_user_foo
shutil._copyxattr(src, dst)
self.assertIn('user.bar', os.listxattr(dst))
finally:
os.setxattr = orig_setxattr
# the source filesystem not supporting xattrs should be ok, too.
def _raise_on_src(fname, *, follow_symlinks=True):
if fname == src:
raise OSError(errno.ENOTSUP, 'Operation not supported')
return orig_listxattr(fname, follow_symlinks=follow_symlinks)
try:
orig_listxattr = os.listxattr
os.listxattr = _raise_on_src
shutil._copyxattr(src, dst)
finally:
os.listxattr = orig_listxattr
# test that shutil.copystat copies xattrs
src = os.path.join(tmp_dir, 'the_original')
write_file(src, src)
os.setxattr(src, 'user.the_value', b'fiddly')
dst = os.path.join(tmp_dir, 'the_copy')
write_file(dst, dst)
shutil.copystat(src, dst)
self.assertEqual(os.getxattr(dst, 'user.the_value'), b'fiddly')
@support.skip_unless_symlink
@support.skip_unless_xattr
@unittest.skipUnless(hasattr(os, 'geteuid') and os.geteuid() == 0,
'root privileges required')
def test_copyxattr_symlinks(self):
# On Linux, it's only possible to access non-user xattr for symlinks;
# which in turn require root privileges. This test should be expanded
# as soon as other platforms gain support for extended attributes.
tmp_dir = self.mkdtemp()
src = os.path.join(tmp_dir, 'foo')
src_link = os.path.join(tmp_dir, 'baz')
write_file(src, 'foo')
os.symlink(src, src_link)
os.setxattr(src, 'trusted.foo', b'42')
os.setxattr(src_link, 'trusted.foo', b'43', follow_symlinks=False)
dst = os.path.join(tmp_dir, 'bar')
dst_link = os.path.join(tmp_dir, 'qux')
write_file(dst, 'bar')
os.symlink(dst, dst_link)
shutil._copyxattr(src_link, dst_link, follow_symlinks=False)
self.assertEqual(os.getxattr(dst_link, 'trusted.foo', follow_symlinks=False), b'43')
self.assertRaises(OSError, os.getxattr, dst, 'trusted.foo')
shutil._copyxattr(src_link, dst, follow_symlinks=False)
self.assertEqual(os.getxattr(dst, 'trusted.foo'), b'43')
@support.skip_unless_symlink
def test_copy_symlinks(self):
tmp_dir = self.mkdtemp()
src = os.path.join(tmp_dir, 'foo')
dst = os.path.join(tmp_dir, 'bar')
src_link = os.path.join(tmp_dir, 'baz')
write_file(src, 'foo')
os.symlink(src, src_link)
if hasattr(os, 'lchmod'):
os.lchmod(src_link, stat.S_IRWXU | stat.S_IRWXO)
# don't follow
shutil.copy(src_link, dst, follow_symlinks=True)
self.assertFalse(os.path.islink(dst))
self.assertEqual(read_file(src), read_file(dst))
os.remove(dst)
# follow
shutil.copy(src_link, dst, follow_symlinks=False)
self.assertTrue(os.path.islink(dst))
self.assertEqual(os.readlink(dst), os.readlink(src_link))
if hasattr(os, 'lchmod'):
self.assertEqual(os.lstat(src_link).st_mode,
os.lstat(dst).st_mode)
@support.skip_unless_symlink
def test_copy2_symlinks(self):
tmp_dir = self.mkdtemp()
src = os.path.join(tmp_dir, 'foo')
dst = os.path.join(tmp_dir, 'bar')
src_link = os.path.join(tmp_dir, 'baz')
write_file(src, 'foo')
os.symlink(src, src_link)
if hasattr(os, 'lchmod'):
os.lchmod(src_link, stat.S_IRWXU | stat.S_IRWXO)
if hasattr(os, 'lchflags') and hasattr(stat, 'UF_NODUMP'):
os.lchflags(src_link, stat.UF_NODUMP)
src_stat = os.stat(src)
src_link_stat = os.lstat(src_link)
# follow
shutil.copy2(src_link, dst, follow_symlinks=True)
self.assertFalse(os.path.islink(dst))
self.assertEqual(read_file(src), read_file(dst))
os.remove(dst)
# don't follow
shutil.copy2(src_link, dst, follow_symlinks=False)
self.assertTrue(os.path.islink(dst))
self.assertEqual(os.readlink(dst), os.readlink(src_link))
dst_stat = os.lstat(dst)
if os.utime in os.supports_follow_symlinks:
for attr in 'st_atime', 'st_mtime':
# The modification times may be truncated in the new file.
self.assertLessEqual(getattr(src_link_stat, attr),
getattr(dst_stat, attr) + 1)
if hasattr(os, 'lchmod'):
self.assertEqual(src_link_stat.st_mode, dst_stat.st_mode)
self.assertNotEqual(src_stat.st_mode, dst_stat.st_mode)
if hasattr(os, 'lchflags') and hasattr(src_link_stat, 'st_flags'):
self.assertEqual(src_link_stat.st_flags, dst_stat.st_flags)
@support.skip_unless_xattr
def test_copy2_xattr(self):
tmp_dir = self.mkdtemp()
src = os.path.join(tmp_dir, 'foo')
dst = os.path.join(tmp_dir, 'bar')
write_file(src, 'foo')
os.setxattr(src, 'user.foo', b'42')
shutil.copy2(src, dst)
self.assertEqual(
os.getxattr(src, 'user.foo'),
os.getxattr(dst, 'user.foo'))
os.remove(dst)
@support.skip_unless_symlink
def test_copyfile_symlinks(self):
tmp_dir = self.mkdtemp()
src = os.path.join(tmp_dir, 'src')
dst = os.path.join(tmp_dir, 'dst')
dst_link = os.path.join(tmp_dir, 'dst_link')
link = os.path.join(tmp_dir, 'link')
write_file(src, 'foo')
os.symlink(src, link)
# don't follow
shutil.copyfile(link, dst_link, follow_symlinks=False)
self.assertTrue(os.path.islink(dst_link))
self.assertEqual(os.readlink(link), os.readlink(dst_link))
# follow
shutil.copyfile(link, dst)
self.assertFalse(os.path.islink(dst))
def test_rmtree_uses_safe_fd_version_if_available(self):
_use_fd_functions = ({os.open, os.stat, os.unlink, os.rmdir} <=
os.supports_dir_fd and
os.listdir in os.supports_fd and
os.stat in os.supports_follow_symlinks)
if _use_fd_functions:
self.assertTrue(shutil._use_fd_functions)
self.assertTrue(shutil.rmtree.avoids_symlink_attacks)
tmp_dir = self.mkdtemp()
d = os.path.join(tmp_dir, 'a')
os.mkdir(d)
try:
real_rmtree = shutil._rmtree_safe_fd
class Called(Exception): pass
def _raiser(*args, **kwargs):
raise Called
shutil._rmtree_safe_fd = _raiser
self.assertRaises(Called, shutil.rmtree, d)
finally:
shutil._rmtree_safe_fd = real_rmtree
else:
self.assertFalse(shutil._use_fd_functions)
self.assertFalse(shutil.rmtree.avoids_symlink_attacks)
def test_rmtree_dont_delete_file(self):
# When called on a file instead of a directory, don't delete it.
handle, path = tempfile.mkstemp()
os.close(handle)
self.assertRaises(NotADirectoryError, shutil.rmtree, path)
os.remove(path)
def test_copytree_simple(self):
src_dir = tempfile.mkdtemp()
dst_dir = os.path.join(tempfile.mkdtemp(), 'destination')
self.addCleanup(shutil.rmtree, src_dir)
self.addCleanup(shutil.rmtree, os.path.dirname(dst_dir))
write_file((src_dir, 'test.txt'), '123')
os.mkdir(os.path.join(src_dir, 'test_dir'))
write_file((src_dir, 'test_dir', 'test.txt'), '456')
shutil.copytree(src_dir, dst_dir)
self.assertTrue(os.path.isfile(os.path.join(dst_dir, 'test.txt')))
self.assertTrue(os.path.isdir(os.path.join(dst_dir, 'test_dir')))
self.assertTrue(os.path.isfile(os.path.join(dst_dir, 'test_dir',
'test.txt')))
actual = read_file((dst_dir, 'test.txt'))
self.assertEqual(actual, '123')
actual = read_file((dst_dir, 'test_dir', 'test.txt'))
self.assertEqual(actual, '456')
@support.skip_unless_symlink
def test_copytree_symlinks(self):
tmp_dir = self.mkdtemp()
src_dir = os.path.join(tmp_dir, 'src')
dst_dir = os.path.join(tmp_dir, 'dst')
sub_dir = os.path.join(src_dir, 'sub')
os.mkdir(src_dir)
os.mkdir(sub_dir)
write_file((src_dir, 'file.txt'), 'foo')
src_link = os.path.join(sub_dir, 'link')
dst_link = os.path.join(dst_dir, 'sub/link')
os.symlink(os.path.join(src_dir, 'file.txt'),
src_link)
if hasattr(os, 'lchmod'):
os.lchmod(src_link, stat.S_IRWXU | stat.S_IRWXO)
if hasattr(os, 'lchflags') and hasattr(stat, 'UF_NODUMP'):
os.lchflags(src_link, stat.UF_NODUMP)
src_stat = os.lstat(src_link)
shutil.copytree(src_dir, dst_dir, symlinks=True)
self.assertTrue(os.path.islink(os.path.join(dst_dir, 'sub', 'link')))
self.assertEqual(os.readlink(os.path.join(dst_dir, 'sub', 'link')),
os.path.join(src_dir, 'file.txt'))
dst_stat = os.lstat(dst_link)
if hasattr(os, 'lchmod'):
self.assertEqual(dst_stat.st_mode, src_stat.st_mode)
if hasattr(os, 'lchflags'):
self.assertEqual(dst_stat.st_flags, src_stat.st_flags)
def test_copytree_with_exclude(self):
# creating data
join = os.path.join
exists = os.path.exists
src_dir = tempfile.mkdtemp()
try:
dst_dir = join(tempfile.mkdtemp(), 'destination')
write_file((src_dir, 'test.txt'), '123')
write_file((src_dir, 'test.tmp'), '123')
os.mkdir(join(src_dir, 'test_dir'))
write_file((src_dir, 'test_dir', 'test.txt'), '456')
os.mkdir(join(src_dir, 'test_dir2'))
write_file((src_dir, 'test_dir2', 'test.txt'), '456')
os.mkdir(join(src_dir, 'test_dir2', 'subdir'))
os.mkdir(join(src_dir, 'test_dir2', 'subdir2'))
write_file((src_dir, 'test_dir2', 'subdir', 'test.txt'), '456')
write_file((src_dir, 'test_dir2', 'subdir2', 'test.py'), '456')
# testing glob-like patterns
try:
patterns = shutil.ignore_patterns('*.tmp', 'test_dir2')
shutil.copytree(src_dir, dst_dir, ignore=patterns)
# checking the result: some elements should not be copied
self.assertTrue(exists(join(dst_dir, 'test.txt')))
self.assertFalse(exists(join(dst_dir, 'test.tmp')))
self.assertFalse(exists(join(dst_dir, 'test_dir2')))
finally:
shutil.rmtree(dst_dir)
try:
patterns = shutil.ignore_patterns('*.tmp', 'subdir*')
shutil.copytree(src_dir, dst_dir, ignore=patterns)
# checking the result: some elements should not be copied
self.assertFalse(exists(join(dst_dir, 'test.tmp')))
self.assertFalse(exists(join(dst_dir, 'test_dir2', 'subdir2')))
self.assertFalse(exists(join(dst_dir, 'test_dir2', 'subdir')))
finally:
shutil.rmtree(dst_dir)
# testing callable-style
try:
def _filter(src, names):
res = []
for name in names:
path = os.path.join(src, name)
if (os.path.isdir(path) and
path.split()[-1] == 'subdir'):
res.append(name)
elif os.path.splitext(path)[-1] in ('.py'):
res.append(name)
return res
shutil.copytree(src_dir, dst_dir, ignore=_filter)
# checking the result: some elements should not be copied
self.assertFalse(exists(join(dst_dir, 'test_dir2', 'subdir2',
'test.py')))
self.assertFalse(exists(join(dst_dir, 'test_dir2', 'subdir')))
finally:
shutil.rmtree(dst_dir)
finally:
shutil.rmtree(src_dir)
shutil.rmtree(os.path.dirname(dst_dir))
def test_copytree_retains_permissions(self):
tmp_dir = tempfile.mkdtemp()
src_dir = os.path.join(tmp_dir, 'source')
os.mkdir(src_dir)
dst_dir = os.path.join(tmp_dir, 'destination')
self.addCleanup(shutil.rmtree, tmp_dir)
os.chmod(src_dir, 0o777)
write_file((src_dir, 'permissive.txt'), '123')
os.chmod(os.path.join(src_dir, 'permissive.txt'), 0o777)
write_file((src_dir, 'restrictive.txt'), '456')
os.chmod(os.path.join(src_dir, 'restrictive.txt'), 0o600)
restrictive_subdir = tempfile.mkdtemp(dir=src_dir)
os.chmod(restrictive_subdir, 0o600)
shutil.copytree(src_dir, dst_dir)
self.assertEquals(os.stat(src_dir).st_mode, os.stat(dst_dir).st_mode)
self.assertEquals(os.stat(os.path.join(src_dir, 'permissive.txt')).st_mode,
os.stat(os.path.join(dst_dir, 'permissive.txt')).st_mode)
self.assertEquals(os.stat(os.path.join(src_dir, 'restrictive.txt')).st_mode,
os.stat(os.path.join(dst_dir, 'restrictive.txt')).st_mode)
restrictive_subdir_dst = os.path.join(dst_dir,
os.path.split(restrictive_subdir)[1])
self.assertEquals(os.stat(restrictive_subdir).st_mode,
os.stat(restrictive_subdir_dst).st_mode)
@unittest.skipUnless(hasattr(os, 'link'), 'requires os.link')
def test_dont_copy_file_onto_link_to_itself(self):
# Temporarily disable test on Windows.
if os.name == 'nt':
return
# bug 851123.
os.mkdir(TESTFN)
src = os.path.join(TESTFN, 'cheese')
dst = os.path.join(TESTFN, 'shop')
try:
with open(src, 'w') as f:
f.write('cheddar')
os.link(src, dst)
self.assertRaises(shutil.Error, shutil.copyfile, src, dst)
with open(src, 'r') as f:
self.assertEqual(f.read(), 'cheddar')
os.remove(dst)
finally:
shutil.rmtree(TESTFN, ignore_errors=True)
@support.skip_unless_symlink
def test_dont_copy_file_onto_symlink_to_itself(self):
# bug 851123.
os.mkdir(TESTFN)
src = os.path.join(TESTFN, 'cheese')
dst = os.path.join(TESTFN, 'shop')
try:
with open(src, 'w') as f:
f.write('cheddar')
# Using `src` here would mean we end up with a symlink pointing
# to TESTFN/TESTFN/cheese, while it should point at
# TESTFN/cheese.
os.symlink('cheese', dst)
self.assertRaises(shutil.Error, shutil.copyfile, src, dst)
with open(src, 'r') as f:
self.assertEqual(f.read(), 'cheddar')
os.remove(dst)
finally:
shutil.rmtree(TESTFN, ignore_errors=True)
@support.skip_unless_symlink
def test_rmtree_on_symlink(self):
# bug 1669.
os.mkdir(TESTFN)
try:
src = os.path.join(TESTFN, 'cheese')
dst = os.path.join(TESTFN, 'shop')
os.mkdir(src)
os.symlink(src, dst)
self.assertRaises(OSError, shutil.rmtree, dst)
shutil.rmtree(dst, ignore_errors=True)
finally:
shutil.rmtree(TESTFN, ignore_errors=True)
if hasattr(os, "mkfifo"):
# Issue #3002: copyfile and copytree block indefinitely on named pipes
def test_copyfile_named_pipe(self):
os.mkfifo(TESTFN)
try:
self.assertRaises(shutil.SpecialFileError,
shutil.copyfile, TESTFN, TESTFN2)
self.assertRaises(shutil.SpecialFileError,
shutil.copyfile, __file__, TESTFN)
finally:
os.remove(TESTFN)
@support.skip_unless_symlink
def test_copytree_named_pipe(self):
os.mkdir(TESTFN)
try:
subdir = os.path.join(TESTFN, "subdir")
os.mkdir(subdir)
pipe = os.path.join(subdir, "mypipe")
os.mkfifo(pipe)
try:
shutil.copytree(TESTFN, TESTFN2)
except shutil.Error as e:
errors = e.args[0]
self.assertEqual(len(errors), 1)
src, dst, error_msg = errors[0]
self.assertEqual("`%s` is a named pipe" % pipe, error_msg)
else:
self.fail("shutil.Error should have been raised")
finally:
shutil.rmtree(TESTFN, ignore_errors=True)
shutil.rmtree(TESTFN2, ignore_errors=True)
def test_copytree_special_func(self):
src_dir = self.mkdtemp()
dst_dir = os.path.join(self.mkdtemp(), 'destination')
write_file((src_dir, 'test.txt'), '123')
os.mkdir(os.path.join(src_dir, 'test_dir'))
write_file((src_dir, 'test_dir', 'test.txt'), '456')
copied = []
def _copy(src, dst):
copied.append((src, dst))
shutil.copytree(src_dir, dst_dir, copy_function=_copy)
self.assertEqual(len(copied), 2)
@support.skip_unless_symlink
def test_copytree_dangling_symlinks(self):
# a dangling symlink raises an error at the end
src_dir = self.mkdtemp()
dst_dir = os.path.join(self.mkdtemp(), 'destination')
os.symlink('IDONTEXIST', os.path.join(src_dir, 'test.txt'))
os.mkdir(os.path.join(src_dir, 'test_dir'))
write_file((src_dir, 'test_dir', 'test.txt'), '456')
self.assertRaises(Error, shutil.copytree, src_dir, dst_dir)
# a dangling symlink is ignored with the proper flag
dst_dir = os.path.join(self.mkdtemp(), 'destination2')
shutil.copytree(src_dir, dst_dir, ignore_dangling_symlinks=True)
self.assertNotIn('test.txt', os.listdir(dst_dir))
# a dangling symlink is copied if symlinks=True
dst_dir = os.path.join(self.mkdtemp(), 'destination3')
shutil.copytree(src_dir, dst_dir, symlinks=True)
self.assertIn('test.txt', os.listdir(dst_dir))
def _copy_file(self, method):
fname = 'test.txt'
tmpdir = self.mkdtemp()
write_file((tmpdir, fname), 'xxx')
file1 = os.path.join(tmpdir, fname)
tmpdir2 = self.mkdtemp()
method(file1, tmpdir2)
file2 = os.path.join(tmpdir2, fname)
return (file1, file2)
@unittest.skipUnless(hasattr(os, 'chmod'), 'requires os.chmod')
def test_copy(self):
# Ensure that the copied file exists and has the same mode bits.
file1, file2 = self._copy_file(shutil.copy)
self.assertTrue(os.path.exists(file2))
self.assertEqual(os.stat(file1).st_mode, os.stat(file2).st_mode)
@unittest.skipUnless(hasattr(os, 'chmod'), 'requires os.chmod')
@unittest.skipUnless(hasattr(os, 'utime'), 'requires os.utime')
def test_copy2(self):
# Ensure that the copied file exists and has the same mode and
# modification time bits.
file1, file2 = self._copy_file(shutil.copy2)
self.assertTrue(os.path.exists(file2))
file1_stat = os.stat(file1)
file2_stat = os.stat(file2)
self.assertEqual(file1_stat.st_mode, file2_stat.st_mode)
for attr in 'st_atime', 'st_mtime':
# The modification times may be truncated in the new file.
self.assertLessEqual(getattr(file1_stat, attr),
getattr(file2_stat, attr) + 1)
if hasattr(os, 'chflags') and hasattr(file1_stat, 'st_flags'):
self.assertEqual(getattr(file1_stat, 'st_flags'),
getattr(file2_stat, 'st_flags'))
@requires_zlib
def test_make_tarball(self):
# creating something to tar
tmpdir = self.mkdtemp()
write_file((tmpdir, 'file1'), 'xxx')
write_file((tmpdir, 'file2'), 'xxx')
os.mkdir(os.path.join(tmpdir, 'sub'))
write_file((tmpdir, 'sub', 'file3'), 'xxx')
tmpdir2 = self.mkdtemp()
# force shutil to create the directory
os.rmdir(tmpdir2)
unittest.skipUnless(splitdrive(tmpdir)[0] == splitdrive(tmpdir2)[0],
"source and target should be on same drive")
base_name = os.path.join(tmpdir2, 'archive')
# working with relative paths to avoid tar warnings
old_dir = os.getcwd()
os.chdir(tmpdir)
try:
_make_tarball(splitdrive(base_name)[1], '.')
finally:
os.chdir(old_dir)
# check if the compressed tarball was created
tarball = base_name + '.tar.gz'
self.assertTrue(os.path.exists(tarball))
# trying an uncompressed one
base_name = os.path.join(tmpdir2, 'archive')
old_dir = os.getcwd()
os.chdir(tmpdir)
try:
_make_tarball(splitdrive(base_name)[1], '.', compress=None)
finally:
os.chdir(old_dir)
tarball = base_name + '.tar'
self.assertTrue(os.path.exists(tarball))
def _tarinfo(self, path):
tar = tarfile.open(path)
try:
names = tar.getnames()
names.sort()
return tuple(names)
finally:
tar.close()
def _create_files(self):
# creating something to tar
tmpdir = self.mkdtemp()
dist = os.path.join(tmpdir, 'dist')
os.mkdir(dist)
write_file((dist, 'file1'), 'xxx')
write_file((dist, 'file2'), 'xxx')
os.mkdir(os.path.join(dist, 'sub'))
write_file((dist, 'sub', 'file3'), 'xxx')
os.mkdir(os.path.join(dist, 'sub2'))
tmpdir2 = self.mkdtemp()
base_name = os.path.join(tmpdir2, 'archive')
return tmpdir, tmpdir2, base_name
@requires_zlib
@unittest.skipUnless(find_executable('tar') and find_executable('gzip'),
'Need the tar command to run')
def test_tarfile_vs_tar(self):
tmpdir, tmpdir2, base_name = self._create_files()
old_dir = os.getcwd()
os.chdir(tmpdir)
try:
_make_tarball(base_name, 'dist')
finally:
os.chdir(old_dir)
# check if the compressed tarball was created
tarball = base_name + '.tar.gz'
self.assertTrue(os.path.exists(tarball))
# now create another tarball using `tar`
tarball2 = os.path.join(tmpdir, 'archive2.tar.gz')
tar_cmd = ['tar', '-cf', 'archive2.tar', 'dist']
gzip_cmd = ['gzip', '-f9', 'archive2.tar']
old_dir = os.getcwd()
os.chdir(tmpdir)
try:
with captured_stdout() as s:
spawn(tar_cmd)
spawn(gzip_cmd)
finally:
os.chdir(old_dir)
self.assertTrue(os.path.exists(tarball2))
# let's compare both tarballs
self.assertEqual(self._tarinfo(tarball), self._tarinfo(tarball2))
# trying an uncompressed one
base_name = os.path.join(tmpdir2, 'archive')
old_dir = os.getcwd()
os.chdir(tmpdir)
try:
_make_tarball(base_name, 'dist', compress=None)
finally:
os.chdir(old_dir)
tarball = base_name + '.tar'
self.assertTrue(os.path.exists(tarball))
# now for a dry_run
base_name = os.path.join(tmpdir2, 'archive')
old_dir = os.getcwd()
os.chdir(tmpdir)
try:
_make_tarball(base_name, 'dist', compress=None, dry_run=True)
finally:
os.chdir(old_dir)
tarball = base_name + '.tar'
self.assertTrue(os.path.exists(tarball))
@requires_zlib
@unittest.skipUnless(ZIP_SUPPORT, 'Need zip support to run')
def test_make_zipfile(self):
# creating something to tar
tmpdir = self.mkdtemp()
write_file((tmpdir, 'file1'), 'xxx')
write_file((tmpdir, 'file2'), 'xxx')
tmpdir2 = self.mkdtemp()
# force shutil to create the directory
os.rmdir(tmpdir2)
base_name = os.path.join(tmpdir2, 'archive')
_make_zipfile(base_name, tmpdir)
# check if the compressed tarball was created
tarball = base_name + '.zip'
self.assertTrue(os.path.exists(tarball))
def test_make_archive(self):
tmpdir = self.mkdtemp()
base_name = os.path.join(tmpdir, 'archive')
self.assertRaises(ValueError, make_archive, base_name, 'xxx')
@requires_zlib
def test_make_archive_owner_group(self):
# testing make_archive with owner and group, with various combinations
# this works even if there's not gid/uid support
if UID_GID_SUPPORT:
group = grp.getgrgid(0)[0]
owner = pwd.getpwuid(0)[0]
else:
group = owner = 'root'
base_dir, root_dir, base_name = self._create_files()
base_name = os.path.join(self.mkdtemp() , 'archive')
res = make_archive(base_name, 'zip', root_dir, base_dir, owner=owner,
group=group)
self.assertTrue(os.path.exists(res))
res = make_archive(base_name, 'zip', root_dir, base_dir)
self.assertTrue(os.path.exists(res))
res = make_archive(base_name, 'tar', root_dir, base_dir,
owner=owner, group=group)
self.assertTrue(os.path.exists(res))
res = make_archive(base_name, 'tar', root_dir, base_dir,
owner='kjhkjhkjg', group='oihohoh')
self.assertTrue(os.path.exists(res))
@requires_zlib
@unittest.skipUnless(UID_GID_SUPPORT, "Requires grp and pwd support")
def test_tarfile_root_owner(self):
tmpdir, tmpdir2, base_name = self._create_files()
old_dir = os.getcwd()
os.chdir(tmpdir)
group = grp.getgrgid(0)[0]
owner = pwd.getpwuid(0)[0]
try:
archive_name = _make_tarball(base_name, 'dist', compress=None,
owner=owner, group=group)
finally:
os.chdir(old_dir)
# check if the compressed tarball was created
self.assertTrue(os.path.exists(archive_name))
# now checks the rights
archive = tarfile.open(archive_name)
try:
for member in archive.getmembers():
self.assertEqual(member.uid, 0)
self.assertEqual(member.gid, 0)
finally:
archive.close()
def test_make_archive_cwd(self):
current_dir = os.getcwd()
def _breaks(*args, **kw):
raise RuntimeError()
register_archive_format('xxx', _breaks, [], 'xxx file')
try:
try:
make_archive('xxx', 'xxx', root_dir=self.mkdtemp())
except Exception:
pass
self.assertEqual(os.getcwd(), current_dir)
finally:
unregister_archive_format('xxx')
def test_register_archive_format(self):
self.assertRaises(TypeError, register_archive_format, 'xxx', 1)
self.assertRaises(TypeError, register_archive_format, 'xxx', lambda: x,
1)
self.assertRaises(TypeError, register_archive_format, 'xxx', lambda: x,
[(1, 2), (1, 2, 3)])
register_archive_format('xxx', lambda: x, [(1, 2)], 'xxx file')
formats = [name for name, params in get_archive_formats()]
self.assertIn('xxx', formats)
unregister_archive_format('xxx')
formats = [name for name, params in get_archive_formats()]
self.assertNotIn('xxx', formats)
def _compare_dirs(self, dir1, dir2):
# check that dir1 and dir2 are equivalent,
# return the diff
diff = []
for root, dirs, files in os.walk(dir1):
for file_ in files:
path = os.path.join(root, file_)
target_path = os.path.join(dir2, os.path.split(path)[-1])
if not os.path.exists(target_path):
diff.append(file_)
return diff
@requires_zlib
def test_unpack_archive(self):
formats = ['tar', 'gztar', 'zip']
if BZ2_SUPPORTED:
formats.append('bztar')
for format in formats:
tmpdir = self.mkdtemp()
base_dir, root_dir, base_name = self._create_files()
tmpdir2 = self.mkdtemp()
filename = make_archive(base_name, format, root_dir, base_dir)
# let's try to unpack it now
unpack_archive(filename, tmpdir2)
diff = self._compare_dirs(tmpdir, tmpdir2)
self.assertEqual(diff, [])
# and again, this time with the format specified
tmpdir3 = self.mkdtemp()
unpack_archive(filename, tmpdir3, format=format)
diff = self._compare_dirs(tmpdir, tmpdir3)
self.assertEqual(diff, [])
self.assertRaises(shutil.ReadError, unpack_archive, TESTFN)
self.assertRaises(ValueError, unpack_archive, TESTFN, format='xxx')
def test_unpack_registery(self):
formats = get_unpack_formats()
def _boo(filename, extract_dir, extra):
self.assertEqual(extra, 1)
self.assertEqual(filename, 'stuff.boo')
self.assertEqual(extract_dir, 'xx')
register_unpack_format('Boo', ['.boo', '.b2'], _boo, [('extra', 1)])
unpack_archive('stuff.boo', 'xx')
# trying to register a .boo unpacker again
self.assertRaises(RegistryError, register_unpack_format, 'Boo2',
['.boo'], _boo)
# should work now
unregister_unpack_format('Boo')
register_unpack_format('Boo2', ['.boo'], _boo)
self.assertIn(('Boo2', ['.boo'], ''), get_unpack_formats())
self.assertNotIn(('Boo', ['.boo'], ''), get_unpack_formats())
# let's leave a clean state
unregister_unpack_format('Boo2')
self.assertEqual(get_unpack_formats(), formats)
@unittest.skipUnless(hasattr(shutil, 'disk_usage'),
"disk_usage not available on this platform")
def test_disk_usage(self):
usage = shutil.disk_usage(os.getcwd())
self.assertGreater(usage.total, 0)
self.assertGreater(usage.used, 0)
self.assertGreaterEqual(usage.free, 0)
self.assertGreaterEqual(usage.total, usage.used)
self.assertGreater(usage.total, usage.free)
@unittest.skipUnless(UID_GID_SUPPORT, "Requires grp and pwd support")
@unittest.skipUnless(hasattr(os, 'chown'), 'requires os.chown')
def test_chown(self):
# cleaned-up automatically by TestShutil.tearDown method
dirname = self.mkdtemp()
filename = tempfile.mktemp(dir=dirname)
write_file(filename, 'testing chown function')
with self.assertRaises(ValueError):
shutil.chown(filename)
with self.assertRaises(LookupError):
shutil.chown(filename, user='non-exising username')
with self.assertRaises(LookupError):
shutil.chown(filename, group='non-exising groupname')
with self.assertRaises(TypeError):
shutil.chown(filename, b'spam')
with self.assertRaises(TypeError):
shutil.chown(filename, 3.14)
uid = os.getuid()
gid = os.getgid()
def check_chown(path, uid=None, gid=None):
s = os.stat(filename)
if uid is not None:
self.assertEqual(uid, s.st_uid)
if gid is not None:
self.assertEqual(gid, s.st_gid)
shutil.chown(filename, uid, gid)
check_chown(filename, uid, gid)
shutil.chown(filename, uid)
check_chown(filename, uid)
shutil.chown(filename, user=uid)
check_chown(filename, uid)
shutil.chown(filename, group=gid)
check_chown(filename, gid=gid)
shutil.chown(dirname, uid, gid)
check_chown(dirname, uid, gid)
shutil.chown(dirname, uid)
check_chown(dirname, uid)
shutil.chown(dirname, user=uid)
check_chown(dirname, uid)
shutil.chown(dirname, group=gid)
check_chown(dirname, gid=gid)
user = pwd.getpwuid(uid)[0]
group = grp.getgrgid(gid)[0]
shutil.chown(filename, user, group)
check_chown(filename, uid, gid)
shutil.chown(dirname, user, group)
check_chown(dirname, uid, gid)
def test_copy_return_value(self):
# copy and copy2 both return their destination path.
for fn in (shutil.copy, shutil.copy2):
src_dir = self.mkdtemp()
dst_dir = self.mkdtemp()
src = os.path.join(src_dir, 'foo')
write_file(src, 'foo')
rv = fn(src, dst_dir)
self.assertEqual(rv, os.path.join(dst_dir, 'foo'))
rv = fn(src, os.path.join(dst_dir, 'bar'))
self.assertEqual(rv, os.path.join(dst_dir, 'bar'))
def test_copyfile_return_value(self):
# copytree returns its destination path.
src_dir = self.mkdtemp()
dst_dir = self.mkdtemp()
dst_file = os.path.join(dst_dir, 'bar')
src_file = os.path.join(src_dir, 'foo')
write_file(src_file, 'foo')
rv = shutil.copyfile(src_file, dst_file)
self.assertTrue(os.path.exists(rv))
self.assertEqual(read_file(src_file), read_file(dst_file))
def test_copytree_return_value(self):
# copytree returns its destination path.
src_dir = self.mkdtemp()
dst_dir = src_dir + "dest"
self.addCleanup(shutil.rmtree, dst_dir, True)
src = os.path.join(src_dir, 'foo')
write_file(src, 'foo')
rv = shutil.copytree(src_dir, dst_dir)
self.assertEqual(['foo'], os.listdir(rv))
class TestWhich(unittest.TestCase):
def setUp(self):
self.temp_dir = tempfile.mkdtemp(prefix="Tmp")
self.addCleanup(shutil.rmtree, self.temp_dir, True)
# Give the temp_file an ".exe" suffix for all.
# It's needed on Windows and not harmful on other platforms.
self.temp_file = tempfile.NamedTemporaryFile(dir=self.temp_dir,
prefix="Tmp",
suffix=".Exe")
os.chmod(self.temp_file.name, stat.S_IXUSR)
self.addCleanup(self.temp_file.close)
self.dir, self.file = os.path.split(self.temp_file.name)
def test_basic(self):
# Given an EXE in a directory, it should be returned.
rv = shutil.which(self.file, path=self.dir)
self.assertEqual(rv, self.temp_file.name)
def test_absolute_cmd(self):
# When given the fully qualified path to an executable that exists,
# it should be returned.
rv = shutil.which(self.temp_file.name, path=self.temp_dir)
self.assertEqual(rv, self.temp_file.name)
def test_relative_cmd(self):
# When given the relative path with a directory part to an executable
# that exists, it should be returned.
base_dir, tail_dir = os.path.split(self.dir)
relpath = os.path.join(tail_dir, self.file)
with support.change_cwd(path=base_dir):
rv = shutil.which(relpath, path=self.temp_dir)
self.assertEqual(rv, relpath)
# But it shouldn't be searched in PATH directories (issue #16957).
with support.change_cwd(path=self.dir):
rv = shutil.which(relpath, path=base_dir)
self.assertIsNone(rv)
def test_cwd(self):
# Issue #16957
base_dir = os.path.dirname(self.dir)
with support.change_cwd(path=self.dir):
rv = shutil.which(self.file, path=base_dir)
if sys.platform == "win32":
# Windows: current directory implicitly on PATH
self.assertEqual(rv, os.path.join(os.curdir, self.file))
else:
# Other platforms: shouldn't match in the current directory.
self.assertIsNone(rv)
@unittest.skipIf(hasattr(os, 'geteuid') and os.geteuid() == 0,
'non-root user required')
def test_non_matching_mode(self):
# Set the file read-only and ask for writeable files.
os.chmod(self.temp_file.name, stat.S_IREAD)
if os.access(self.temp_file.name, os.W_OK):
self.skipTest("can't set the file read-only")
rv = shutil.which(self.file, path=self.dir, mode=os.W_OK)
self.assertIsNone(rv)
def test_relative_path(self):
base_dir, tail_dir = os.path.split(self.dir)
with support.change_cwd(path=base_dir):
rv = shutil.which(self.file, path=tail_dir)
self.assertEqual(rv, os.path.join(tail_dir, self.file))
def test_nonexistent_file(self):
# Return None when no matching executable file is found on the path.
rv = shutil.which("foo.exe", path=self.dir)
self.assertIsNone(rv)
@unittest.skipUnless(sys.platform == "win32",
"pathext check is Windows-only")
def test_pathext_checking(self):
# Ask for the file without the ".exe" extension, then ensure that
# it gets found properly with the extension.
rv = shutil.which(self.file[:-4], path=self.dir)
self.assertEqual(rv, self.temp_file.name[:-4] + ".EXE")
def test_environ_path(self):
with support.EnvironmentVarGuard() as env:
env['PATH'] = self.dir
rv = shutil.which(self.file)
self.assertEqual(rv, self.temp_file.name)
def test_empty_path(self):
base_dir = os.path.dirname(self.dir)
with support.change_cwd(path=self.dir), \
support.EnvironmentVarGuard() as env:
env['PATH'] = self.dir
rv = shutil.which(self.file, path='')
self.assertIsNone(rv)
def test_empty_path_no_PATH(self):
with support.EnvironmentVarGuard() as env:
env.pop('PATH', None)
rv = shutil.which(self.file)
self.assertIsNone(rv)
class TestMove(unittest.TestCase):
def setUp(self):
filename = "foo"
self.src_dir = tempfile.mkdtemp()
self.dst_dir = tempfile.mkdtemp()
self.src_file = os.path.join(self.src_dir, filename)
self.dst_file = os.path.join(self.dst_dir, filename)
with open(self.src_file, "wb") as f:
f.write(b"spam")
def tearDown(self):
for d in (self.src_dir, self.dst_dir):
try:
if d:
shutil.rmtree(d)
except:
pass
def _check_move_file(self, src, dst, real_dst):
with open(src, "rb") as f:
contents = f.read()
shutil.move(src, dst)
with open(real_dst, "rb") as f:
self.assertEqual(contents, f.read())
self.assertFalse(os.path.exists(src))
def _check_move_dir(self, src, dst, real_dst):
contents = sorted(os.listdir(src))
shutil.move(src, dst)
self.assertEqual(contents, sorted(os.listdir(real_dst)))
self.assertFalse(os.path.exists(src))
def test_move_file(self):
# Move a file to another location on the same filesystem.
self._check_move_file(self.src_file, self.dst_file, self.dst_file)
def test_move_file_to_dir(self):
# Move a file inside an existing dir on the same filesystem.
self._check_move_file(self.src_file, self.dst_dir, self.dst_file)
@mock_rename
def test_move_file_other_fs(self):
# Move a file to an existing dir on another filesystem.
self.test_move_file()
@mock_rename
def test_move_file_to_dir_other_fs(self):
# Move a file to another location on another filesystem.
self.test_move_file_to_dir()
def test_move_dir(self):
# Move a dir to another location on the same filesystem.
dst_dir = tempfile.mktemp()
try:
self._check_move_dir(self.src_dir, dst_dir, dst_dir)
finally:
try:
shutil.rmtree(dst_dir)
except:
pass
@mock_rename
def test_move_dir_other_fs(self):
# Move a dir to another location on another filesystem.
self.test_move_dir()
def test_move_dir_to_dir(self):
# Move a dir inside an existing dir on the same filesystem.
self._check_move_dir(self.src_dir, self.dst_dir,
os.path.join(self.dst_dir, os.path.basename(self.src_dir)))
@mock_rename
def test_move_dir_to_dir_other_fs(self):
# Move a dir inside an existing dir on another filesystem.
self.test_move_dir_to_dir()
def test_existing_file_inside_dest_dir(self):
# A file with the same name inside the destination dir already exists.
with open(self.dst_file, "wb"):
pass
self.assertRaises(shutil.Error, shutil.move, self.src_file, self.dst_dir)
def test_dont_move_dir_in_itself(self):
# Moving a dir inside itself raises an Error.
dst = os.path.join(self.src_dir, "bar")
self.assertRaises(shutil.Error, shutil.move, self.src_dir, dst)
def test_destinsrc_false_negative(self):
os.mkdir(TESTFN)
try:
for src, dst in [('srcdir', 'srcdir/dest')]:
src = os.path.join(TESTFN, src)
dst = os.path.join(TESTFN, dst)
self.assertTrue(shutil._destinsrc(src, dst),
msg='_destinsrc() wrongly concluded that '
'dst (%s) is not in src (%s)' % (dst, src))
finally:
shutil.rmtree(TESTFN, ignore_errors=True)
def test_destinsrc_false_positive(self):
os.mkdir(TESTFN)
try:
for src, dst in [('srcdir', 'src/dest'), ('srcdir', 'srcdir.new')]:
src = os.path.join(TESTFN, src)
dst = os.path.join(TESTFN, dst)
self.assertFalse(shutil._destinsrc(src, dst),
msg='_destinsrc() wrongly concluded that '
'dst (%s) is in src (%s)' % (dst, src))
finally:
shutil.rmtree(TESTFN, ignore_errors=True)
@support.skip_unless_symlink
@mock_rename
def test_move_file_symlink(self):
dst = os.path.join(self.src_dir, 'bar')
os.symlink(self.src_file, dst)
shutil.move(dst, self.dst_file)
self.assertTrue(os.path.islink(self.dst_file))
self.assertTrue(os.path.samefile(self.src_file, self.dst_file))
@support.skip_unless_symlink
@mock_rename
def test_move_file_symlink_to_dir(self):
filename = "bar"
dst = os.path.join(self.src_dir, filename)
os.symlink(self.src_file, dst)
shutil.move(dst, self.dst_dir)
final_link = os.path.join(self.dst_dir, filename)
self.assertTrue(os.path.islink(final_link))
self.assertTrue(os.path.samefile(self.src_file, final_link))
@support.skip_unless_symlink
@mock_rename
def test_move_dangling_symlink(self):
src = os.path.join(self.src_dir, 'baz')
dst = os.path.join(self.src_dir, 'bar')
os.symlink(src, dst)
dst_link = os.path.join(self.dst_dir, 'quux')
shutil.move(dst, dst_link)
self.assertTrue(os.path.islink(dst_link))
self.assertEqual(os.path.realpath(src), os.path.realpath(dst_link))
@support.skip_unless_symlink
@mock_rename
def test_move_dir_symlink(self):
src = os.path.join(self.src_dir, 'baz')
dst = os.path.join(self.src_dir, 'bar')
os.mkdir(src)
os.symlink(src, dst)
dst_link = os.path.join(self.dst_dir, 'quux')
shutil.move(dst, dst_link)
self.assertTrue(os.path.islink(dst_link))
self.assertTrue(os.path.samefile(src, dst_link))
def test_move_return_value(self):
rv = shutil.move(self.src_file, self.dst_dir)
self.assertEqual(rv,
os.path.join(self.dst_dir, os.path.basename(self.src_file)))
def test_move_as_rename_return_value(self):
rv = shutil.move(self.src_file, os.path.join(self.dst_dir, 'bar'))
self.assertEqual(rv, os.path.join(self.dst_dir, 'bar'))
class TestCopyFile(unittest.TestCase):
_delete = False
class Faux(object):
_entered = False
_exited_with = None
_raised = False
def __init__(self, raise_in_exit=False, suppress_at_exit=True):
self._raise_in_exit = raise_in_exit
self._suppress_at_exit = suppress_at_exit
def read(self, *args):
return ''
def __enter__(self):
self._entered = True
def __exit__(self, exc_type, exc_val, exc_tb):
self._exited_with = exc_type, exc_val, exc_tb
if self._raise_in_exit:
self._raised = True
raise IOError("Cannot close")
return self._suppress_at_exit
def tearDown(self):
if self._delete:
del shutil.open
def _set_shutil_open(self, func):
shutil.open = func
self._delete = True
def test_w_source_open_fails(self):
def _open(filename, mode='r'):
if filename == 'srcfile':
raise IOError('Cannot open "srcfile"')
assert 0 # shouldn't reach here.
self._set_shutil_open(_open)
self.assertRaises(IOError, shutil.copyfile, 'srcfile', 'destfile')
def test_w_dest_open_fails(self):
srcfile = self.Faux()
def _open(filename, mode='r'):
if filename == 'srcfile':
return srcfile
if filename == 'destfile':
raise IOError('Cannot open "destfile"')
assert 0 # shouldn't reach here.
self._set_shutil_open(_open)
shutil.copyfile('srcfile', 'destfile')
self.assertTrue(srcfile._entered)
self.assertTrue(srcfile._exited_with[0] is IOError)
self.assertEqual(srcfile._exited_with[1].args,
('Cannot open "destfile"',))
def test_w_dest_close_fails(self):
srcfile = self.Faux()
destfile = self.Faux(True)
def _open(filename, mode='r'):
if filename == 'srcfile':
return srcfile
if filename == 'destfile':
return destfile
assert 0 # shouldn't reach here.
self._set_shutil_open(_open)
shutil.copyfile('srcfile', 'destfile')
self.assertTrue(srcfile._entered)
self.assertTrue(destfile._entered)
self.assertTrue(destfile._raised)
self.assertTrue(srcfile._exited_with[0] is IOError)
self.assertEqual(srcfile._exited_with[1].args,
('Cannot close',))
def test_w_source_close_fails(self):
srcfile = self.Faux(True)
destfile = self.Faux()
def _open(filename, mode='r'):
if filename == 'srcfile':
return srcfile
if filename == 'destfile':
return destfile
assert 0 # shouldn't reach here.
self._set_shutil_open(_open)
self.assertRaises(IOError,
shutil.copyfile, 'srcfile', 'destfile')
self.assertTrue(srcfile._entered)
self.assertTrue(destfile._entered)
self.assertFalse(destfile._raised)
self.assertTrue(srcfile._exited_with[0] is None)
self.assertTrue(srcfile._raised)
def test_move_dir_caseinsensitive(self):
# Renames a folder to the same name
# but a different case.
self.src_dir = tempfile.mkdtemp()
self.addCleanup(shutil.rmtree, self.src_dir, True)
dst_dir = os.path.join(
os.path.dirname(self.src_dir),
os.path.basename(self.src_dir).upper())
self.assertNotEqual(self.src_dir, dst_dir)
try:
shutil.move(self.src_dir, dst_dir)
self.assertTrue(os.path.isdir(dst_dir))
finally:
os.rmdir(dst_dir)
class TermsizeTests(unittest.TestCase):
def test_does_not_crash(self):
"""Check if get_terminal_size() returns a meaningful value.
There's no easy portable way to actually check the size of the
terminal, so let's check if it returns something sensible instead.
"""
size = shutil.get_terminal_size()
self.assertGreaterEqual(size.columns, 0)
self.assertGreaterEqual(size.lines, 0)
def test_os_environ_first(self):
"Check if environment variables have precedence"
with support.EnvironmentVarGuard() as env:
env['COLUMNS'] = '777'
size = shutil.get_terminal_size()
self.assertEqual(size.columns, 777)
with support.EnvironmentVarGuard() as env:
env['LINES'] = '888'
size = shutil.get_terminal_size()
self.assertEqual(size.lines, 888)
@unittest.skipUnless(os.isatty(sys.__stdout__.fileno()), "not on tty")
def test_stty_match(self):
"""Check if stty returns the same results ignoring env
This test will fail if stdin and stdout are connected to
different terminals with different sizes. Nevertheless, such
situations should be pretty rare.
"""
try:
size = subprocess.check_output(['stty', 'size']).decode().split()
except (FileNotFoundError, subprocess.CalledProcessError):
self.skipTest("stty invocation failed")
expected = (int(size[1]), int(size[0])) # reversed order
with support.EnvironmentVarGuard() as env:
del env['LINES']
del env['COLUMNS']
actual = shutil.get_terminal_size()
self.assertEqual(expected, actual)
def test_main():
support.run_unittest(TestShutil, TestMove, TestCopyFile,
TermsizeTests, TestWhich)
if __name__ == '__main__':
test_main()
| bsd-3-clause |
Xprima-ERP/odoo_addons | xpr_xis_connector/__init__.py | 1 | 1040 | # -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# This module copyright (C) 2014 Savoir-faire Linux
# (<http://www.savoirfairelinux.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from . import models
| gpl-3.0 |
FNST-OpenStack/horizon | openstack_dashboard/contrib/trove/content/databases/forms.py | 17 | 3881 | # Copyright 2014 Tesora Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.core.urlresolvers import reverse
from django.forms import ValidationError # noqa
from django.utils.translation import ugettext_lazy as _
from horizon import exceptions
from horizon import forms
from horizon import messages
from openstack_dashboard.contrib.trove import api
class ResizeVolumeForm(forms.SelfHandlingForm):
instance_id = forms.CharField(widget=forms.HiddenInput())
orig_size = forms.IntegerField(
label=_("Current Size (GB)"),
widget=forms.TextInput(attrs={'readonly': 'readonly'}),
required=False,
)
new_size = forms.IntegerField(label=_("New Size (GB)"))
def clean(self):
cleaned_data = super(ResizeVolumeForm, self).clean()
new_size = cleaned_data.get('new_size')
if new_size <= self.initial['orig_size']:
raise ValidationError(
_("New size for volume must be greater than current size."))
return cleaned_data
def handle(self, request, data):
instance = data.get('instance_id')
try:
api.trove.instance_resize_volume(request,
instance,
data['new_size'])
messages.success(request, _('Resizing volume "%s"') % instance)
except Exception as e:
redirect = reverse("horizon:project:databases:index")
exceptions.handle(request, _('Unable to resize volume. %s') %
e.message, redirect=redirect)
return True
class ResizeInstanceForm(forms.SelfHandlingForm):
instance_id = forms.CharField(widget=forms.HiddenInput())
old_flavor_name = forms.CharField(label=_("Old Flavor"),
required=False,
widget=forms.TextInput(
attrs={'readonly': 'readonly'}))
new_flavor = forms.ChoiceField(label=_("New Flavor"),
help_text=_("Choose a new instance "
"flavor."))
def __init__(self, request, *args, **kwargs):
super(ResizeInstanceForm, self).__init__(request, *args, **kwargs)
old_flavor_id = kwargs.get('initial', {}).get('old_flavor_id')
choices = kwargs.get('initial', {}).get('flavors')
# Remove current flavor from the list of flavor choices
choices = [(flavor_id, name) for (flavor_id, name) in choices
if flavor_id != old_flavor_id]
if choices:
choices.insert(0, ("", _("Select a new flavor")))
else:
choices.insert(0, ("", _("No flavors available")))
self.fields['new_flavor'].choices = choices
def handle(self, request, data):
instance = data.get('instance_id')
flavor = data.get('new_flavor')
try:
api.trove.instance_resize(request, instance, flavor)
messages.success(request, _('Resizing instance "%s"') % instance)
except Exception as e:
redirect = reverse("horizon:project:databases:index")
exceptions.handle(request, _('Unable to resize instance. %s') %
e.message, redirect=redirect)
return True
| apache-2.0 |
ocefpaf/system-test | pelican-plugins/extract_toc/extract_toc.py | 9 | 1391 | # -*- coding: utf-8 -*-
"""
Extract Table of Content
========================
A Pelican plugin to extract table of contents (ToC) from `article.content` and
place it in its own `article.toc` variable for use in templates.
"""
from os import path
from bs4 import BeautifulSoup
from pelican import signals, readers, contents
def extract_toc(content):
if isinstance(content, contents.Static):
return
soup = BeautifulSoup(content._content,'html.parser')
filename = content.source_path
extension = path.splitext(filename)[1][1:]
toc = None
# if it is a Markdown file
if extension in readers.MarkdownReader.file_extensions:
toc = soup.find('div', class_='toc')
if toc: toc.extract()
# else if it is a reST file
elif extension in readers.RstReader.file_extensions:
toc = soup.find('div', class_='contents topic')
if toc: toc.extract()
if toc:
tag=BeautifulSoup(str(toc))
tag.div['class']='toc'
tag.div['id']=''
p=tag.find('p', class_='topic-title first')
if p:p.extract()
toc=tag
elif not toc: # Pandoc reader
toc = soup.find('nav', id='TOC')
if toc:
toc.extract()
content._content = soup.decode()
content.toc = toc.decode()
def register():
signals.content_object_init.connect(extract_toc)
| unlicense |
kerneltask/RIOT | tests/gnrc_sock_udp/tests/01-run.py | 24 | 3816 | #!/usr/bin/env python3
# Copyright (C) 2016 Kaspar Schleiser <[email protected]>
#
# This file is subject to the terms and conditions of the GNU Lesser
# General Public License v2.1. See the file LICENSE in the top level
# directory for more details.
import os
import sys
from datetime import datetime
sys.path.append(os.path.join(os.environ['RIOTBASE'], 'dist/tools/testrunner'))
import testrunner
class InvalidTimeout(Exception):
pass
def testfunc(child):
child.expect_exact(u"Calling test_sock_udp_create__EADDRINUSE()")
child.expect_exact(u"Calling test_sock_udp_create__EAFNOSUPPORT()")
child.expect_exact(u"Calling test_sock_udp_create__EINVAL_addr()")
child.expect_exact(u"Calling test_sock_udp_create__EINVAL_netif()")
child.expect_exact(u"Calling test_sock_udp_create__no_endpoints()")
child.expect_exact(u"Calling test_sock_udp_create__only_local()")
child.expect_exact(u"Calling test_sock_udp_create__only_local_reuse_ep()")
child.expect_exact(u"Calling test_sock_udp_create__only_remote()")
child.expect_exact(u"Calling test_sock_udp_create__full()")
child.expect_exact(u"Calling test_sock_udp_recv__EADDRNOTAVAIL()")
child.expect_exact(u"Calling test_sock_udp_recv__EAGAIN()")
child.expect_exact(u"Calling test_sock_udp_recv__ENOBUFS()")
child.expect_exact(u"Calling test_sock_udp_recv__EPROTO()")
child.expect_exact(u"Calling test_sock_udp_recv__ETIMEDOUT()")
child.match # get to ensure program reached that point
start = datetime.now()
child.expect_exact(u" * Calling sock_udp_recv()")
child.expect(u" \\* \\(timed out with timeout (\\d+)\\)")
exp_diff = int(child.match.group(1))
stop = datetime.now()
diff = (stop - start)
diff = (diff.seconds * 1000000) + diff.microseconds
# fail within 5% of expected
if diff > (exp_diff + (exp_diff * 0.05)) or \
diff < (exp_diff - (exp_diff * 0.05)):
raise InvalidTimeout("Invalid timeout %d (expected %d)" % (diff, exp_diff));
else:
print("Timed out correctly: %d (expected %d)" % (diff, exp_diff))
child.expect_exact(u"Calling test_sock_udp_recv__socketed()")
child.expect_exact(u"Calling test_sock_udp_recv__socketed_with_remote()")
child.expect_exact(u"Calling test_sock_udp_recv__unsocketed()")
child.expect_exact(u"Calling test_sock_udp_recv__unsocketed_with_remote()")
child.expect_exact(u"Calling test_sock_udp_recv__with_timeout()")
child.expect_exact(u"Calling test_sock_udp_recv__non_blocking()")
child.expect_exact(u"Calling test_sock_udp_send__EAFNOSUPPORT()")
child.expect_exact(u"Calling test_sock_udp_send__EINVAL_addr()")
child.expect_exact(u"Calling test_sock_udp_send__EINVAL_netif()")
child.expect_exact(u"Calling test_sock_udp_send__EINVAL_port()")
child.expect_exact(u"Calling test_sock_udp_send__ENOTCONN()")
child.expect_exact(u"Calling test_sock_udp_send__socketed_no_local_no_netif()")
child.expect_exact(u"Calling test_sock_udp_send__socketed_no_netif()")
child.expect_exact(u"Calling test_sock_udp_send__socketed_no_local()")
child.expect_exact(u"Calling test_sock_udp_send__socketed()")
child.expect_exact(u"Calling test_sock_udp_send__socketed_other_remote()")
child.expect_exact(u"Calling test_sock_udp_send__unsocketed_no_local_no_netif()")
child.expect_exact(u"Calling test_sock_udp_send__unsocketed_no_netif()")
child.expect_exact(u"Calling test_sock_udp_send__unsocketed_no_local()")
child.expect_exact(u"Calling test_sock_udp_send__unsocketed()")
child.expect_exact(u"Calling test_sock_udp_send__no_sock_no_netif()")
child.expect_exact(u"Calling test_sock_udp_send__no_sock()")
child.expect_exact(u"ALL TESTS SUCCESSFUL")
if __name__ == "__main__":
sys.exit(testrunner.run(testfunc))
| lgpl-2.1 |
ImageEngine/gaffer | python/GafferUI/PathVectorDataPlugValueWidget.py | 8 | 4045 | ##########################################################################
#
# Copyright (c) 2011-2012, Image Engine Design Inc. All rights reserved.
# Copyright (c) 2012, John Haddon. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of John Haddon nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import Gaffer
import GafferUI
## Supported plug metadata - used to provide arguments to a
# PathChooserDialogue :
#
# - "path:leaf"
# - "path:valid"
# - "path:bookmarks"
class PathVectorDataPlugValueWidget( GafferUI.PlugValueWidget ) :
## path should be an instance of Gaffer.Path, optionally with
# filters applied. It will be used to convert string values to
# paths for the path uis to edit.
#
# \deprecated The pathChooserDialogueKeywords argument will be removed
# in a future version - use metadata instead.
def __init__( self, plug, path, pathChooserDialogueKeywords={}, **kw ) :
self.__dataWidget = GafferUI.PathVectorDataWidget( path=path, pathChooserDialogueKeywords=Gaffer.WeakMethod( self.__pathChooserDialogueKeywords ) )
GafferUI.PlugValueWidget.__init__( self, self.__dataWidget, plug, **kw )
self.__dataWidget.dataChangedSignal().connect( Gaffer.WeakMethod( self.__dataChanged ), scoped = False )
self.__deprecatedPathChooserDialogueKeywords = pathChooserDialogueKeywords
self._updateFromPlug()
def path( self ) :
return self.__dataWidget.path()
def _updateFromPlug( self ) :
if self.getPlug() is not None :
self.__dataWidget.setData( self.getPlug().getValue() )
self.__dataWidget.setEditable( self._editable() )
def __dataChanged( self, widget ) :
assert( widget is self.__dataWidget )
with Gaffer.UndoScope( self.getPlug().ancestor( Gaffer.ScriptNode ) ) :
with Gaffer.BlockedConnection( self._plugConnections() ) :
self.getPlug().setValue( self.__dataWidget.getData()[0] )
def __pathChooserDialogueKeywords( self ) :
result = {}
result["leaf"] = Gaffer.Metadata.value( self.getPlug(), "path:leaf" )
result["valid"] = Gaffer.Metadata.value( self.getPlug(), "path:valid" )
bookmarks = Gaffer.Metadata.value( self.getPlug(), "path:bookmarks" )
if bookmarks is not None :
result["bookmarks"] = GafferUI.Bookmarks.acquire( self.getPlug(), type( self.path() ), bookmarks )
if callable( self.__deprecatedPathChooserDialogueKeywords ) :
result.update( self.__deprecatedPathChooserDialogueKeywords() )
else :
result.update( self.__deprecatedPathChooserDialogueKeywords )
return result
| bsd-3-clause |
oVirt/vdsm | lib/vdsm/storage/sdm/api/add_bitmap.py | 2 | 2642 | #
# Copyright 2020 Red Hat, Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
# Refer to the README and COPYING files for full details of the license
#
from vdsm.common import errors
from vdsm.storage import bitmaps
from vdsm.storage import constants as sc
from vdsm.storage import guarded
from vdsm.storage import qemuimg
from vdsm.storage.sdm.volume_info import VolumeInfo
from . import base
class Error(errors.Base):
msg = ("Cannot add bitmap {self.bitmap} to "
"volume {self.vol_id}: {self.reason}")
def __init__(self, vol_id, bitmap, reason):
self.vol_id = vol_id
self.reason = reason
self.bitmap = bitmap
class Job(base.Job):
def __init__(self, job_id, host_id, vol_info, bitmap):
super(Job, self).__init__(job_id, 'add_bitmap', host_id)
self._vol_info = VolumeInfo(vol_info, host_id)
self.bitmap = bitmap
def _validate(self):
if self._vol_info.volume.getFormat() != sc.COW_FORMAT:
raise Error(
self._vol_info.vol_id,
self.bitmap,
"volume is not in COW format")
# validate that the bitmap doesn't exists on any volume on the chain
for info in qemuimg.info(self._vol_info.path, backing_chain=True):
if "format-specific" in info:
bitmaps = info["format-specific"]["data"].get("bitmaps", [])
bitmaps_names = {bitmap["name"] for bitmap in bitmaps}
if self.bitmap in bitmaps_names:
raise Error(
self._vol_info.vol_id,
self.bitmap,
"Volume already contains the requested bitmap")
def _run(self):
with guarded.context(self._vol_info.locks):
with self._vol_info.prepare():
self._validate()
with self._vol_info.volume_operation():
bitmaps.add_bitmap(self._vol_info.path, self.bitmap)
| gpl-2.0 |
jonyroda97/redbot-amigosprovaveis | lib/youtube_dl/extractor/tele13.py | 90 | 3345 | # coding: utf-8
from __future__ import unicode_literals
from .common import InfoExtractor
from .youtube import YoutubeIE
from ..utils import (
js_to_json,
qualities,
determine_ext,
)
class Tele13IE(InfoExtractor):
_VALID_URL = r'^https?://(?:www\.)?t13\.cl/videos(?:/[^/]+)+/(?P<id>[\w-]+)'
_TESTS = [
{
'url': 'http://www.t13.cl/videos/actualidad/el-circulo-de-hierro-de-michelle-bachelet-en-su-regreso-a-la-moneda',
'md5': '4cb1fa38adcad8fea88487a078831755',
'info_dict': {
'id': 'el-circulo-de-hierro-de-michelle-bachelet-en-su-regreso-a-la-moneda',
'ext': 'mp4',
'title': 'El círculo de hierro de Michelle Bachelet en su regreso a La Moneda',
},
'params': {
# HTTP Error 404: Not Found
'skip_download': True,
},
},
{
'url': 'http://www.t13.cl/videos/mundo/tendencias/video-captan-misteriosa-bola-fuego-cielos-bangkok',
'md5': '867adf6a3b3fef932c68a71d70b70946',
'info_dict': {
'id': 'rOoKv2OMpOw',
'ext': 'mp4',
'title': 'Shooting star seen on 7-Sep-2015',
'description': 'md5:7292ff2a34b2f673da77da222ae77e1e',
'uploader': 'Porjai Jaturongkhakun',
'upload_date': '20150906',
'uploader_id': 'UCnLY_3ezwNcDSC_Wc6suZxw',
},
'add_ie': ['Youtube'],
}
]
def _real_extract(self, url):
display_id = self._match_id(url)
webpage = self._download_webpage(url, display_id)
setup_js = self._search_regex(
r"(?s)jwplayer\('player-vivo'\).setup\((\{.*?\})\)",
webpage, 'setup code')
sources = self._parse_json(self._search_regex(
r'sources\s*:\s*(\[[^\]]+\])', setup_js, 'sources'),
display_id, js_to_json)
preference = qualities(['Móvil', 'SD', 'HD'])
formats = []
urls = []
for f in sources:
format_url = f['file']
if format_url and format_url not in urls:
ext = determine_ext(format_url)
if ext == 'm3u8':
formats.extend(self._extract_m3u8_formats(
format_url, display_id, 'mp4', 'm3u8_native',
m3u8_id='hls', fatal=False))
elif YoutubeIE.suitable(format_url):
return self.url_result(format_url, 'Youtube')
else:
formats.append({
'url': format_url,
'format_id': f.get('label'),
'preference': preference(f.get('label')),
'ext': ext,
})
urls.append(format_url)
self._sort_formats(formats)
return {
'id': display_id,
'title': self._search_regex(
r'title\s*:\s*"([^"]+)"', setup_js, 'title'),
'description': self._html_search_meta(
'description', webpage, 'description'),
'thumbnail': self._search_regex(
r'image\s*:\s*"([^"]+)"', setup_js, 'thumbnail', default=None),
'formats': formats,
}
| gpl-3.0 |
b4oshany/Localstrorage-audio | server/lib/werkzeug/routing.py | 313 | 62520 | # -*- coding: utf-8 -*-
"""
werkzeug.routing
~~~~~~~~~~~~~~~~
When it comes to combining multiple controller or view functions (however
you want to call them) you need a dispatcher. A simple way would be
applying regular expression tests on the ``PATH_INFO`` and calling
registered callback functions that return the value then.
This module implements a much more powerful system than simple regular
expression matching because it can also convert values in the URLs and
build URLs.
Here a simple example that creates an URL map for an application with
two subdomains (www and kb) and some URL rules:
>>> m = Map([
... # Static URLs
... Rule('/', endpoint='static/index'),
... Rule('/about', endpoint='static/about'),
... Rule('/help', endpoint='static/help'),
... # Knowledge Base
... Subdomain('kb', [
... Rule('/', endpoint='kb/index'),
... Rule('/browse/', endpoint='kb/browse'),
... Rule('/browse/<int:id>/', endpoint='kb/browse'),
... Rule('/browse/<int:id>/<int:page>', endpoint='kb/browse')
... ])
... ], default_subdomain='www')
If the application doesn't use subdomains it's perfectly fine to not set
the default subdomain and not use the `Subdomain` rule factory. The endpoint
in the rules can be anything, for example import paths or unique
identifiers. The WSGI application can use those endpoints to get the
handler for that URL. It doesn't have to be a string at all but it's
recommended.
Now it's possible to create a URL adapter for one of the subdomains and
build URLs:
>>> c = m.bind('example.com')
>>> c.build("kb/browse", dict(id=42))
'http://kb.example.com/browse/42/'
>>> c.build("kb/browse", dict())
'http://kb.example.com/browse/'
>>> c.build("kb/browse", dict(id=42, page=3))
'http://kb.example.com/browse/42/3'
>>> c.build("static/about")
'/about'
>>> c.build("static/index", force_external=True)
'http://www.example.com/'
>>> c = m.bind('example.com', subdomain='kb')
>>> c.build("static/about")
'http://www.example.com/about'
The first argument to bind is the server name *without* the subdomain.
Per default it will assume that the script is mounted on the root, but
often that's not the case so you can provide the real mount point as
second argument:
>>> c = m.bind('example.com', '/applications/example')
The third argument can be the subdomain, if not given the default
subdomain is used. For more details about binding have a look at the
documentation of the `MapAdapter`.
And here is how you can match URLs:
>>> c = m.bind('example.com')
>>> c.match("/")
('static/index', {})
>>> c.match("/about")
('static/about', {})
>>> c = m.bind('example.com', '/', 'kb')
>>> c.match("/")
('kb/index', {})
>>> c.match("/browse/42/23")
('kb/browse', {'id': 42, 'page': 23})
If matching fails you get a `NotFound` exception, if the rule thinks
it's a good idea to redirect (for example because the URL was defined
to have a slash at the end but the request was missing that slash) it
will raise a `RequestRedirect` exception. Both are subclasses of the
`HTTPException` so you can use those errors as responses in the
application.
If matching succeeded but the URL rule was incompatible to the given
method (for example there were only rules for `GET` and `HEAD` and
routing system tried to match a `POST` request) a `MethodNotAllowed`
method is raised.
:copyright: (c) 2013 by the Werkzeug Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
import re
import posixpath
from pprint import pformat
try:
from urlparse import urljoin
except ImportError:
from urllib.parse import urljoin
from werkzeug.urls import url_encode, url_quote
from werkzeug.utils import redirect, format_string
from werkzeug.exceptions import HTTPException, NotFound, MethodNotAllowed
from werkzeug._internal import _get_environ, _encode_idna
from werkzeug._compat import itervalues, iteritems, to_unicode, to_bytes, \
text_type, string_types, native_string_result, \
implements_to_string, wsgi_decoding_dance
from werkzeug.datastructures import ImmutableDict, MultiDict
_rule_re = re.compile(r'''
(?P<static>[^<]*) # static rule data
<
(?:
(?P<converter>[a-zA-Z_][a-zA-Z0-9_]*) # converter name
(?:\((?P<args>.*?)\))? # converter arguments
\: # variable delimiter
)?
(?P<variable>[a-zA-Z_][a-zA-Z0-9_]*) # variable name
>
''', re.VERBOSE)
_simple_rule_re = re.compile(r'<([^>]+)>')
_converter_args_re = re.compile(r'''
((?P<name>\w+)\s*=\s*)?
(?P<value>
True|False|
\d+.\d+|
\d+.|
\d+|
\w+|
[urUR]?(?P<stringval>"[^"]*?"|'[^']*')
)\s*,
''', re.VERBOSE|re.UNICODE)
_PYTHON_CONSTANTS = {
'None': None,
'True': True,
'False': False
}
def _pythonize(value):
if value in _PYTHON_CONSTANTS:
return _PYTHON_CONSTANTS[value]
for convert in int, float:
try:
return convert(value)
except ValueError:
pass
if value[:1] == value[-1:] and value[0] in '"\'':
value = value[1:-1]
return text_type(value)
def parse_converter_args(argstr):
argstr += ','
args = []
kwargs = {}
for item in _converter_args_re.finditer(argstr):
value = item.group('stringval')
if value is None:
value = item.group('value')
value = _pythonize(value)
if not item.group('name'):
args.append(value)
else:
name = item.group('name')
kwargs[name] = value
return tuple(args), kwargs
def parse_rule(rule):
"""Parse a rule and return it as generator. Each iteration yields tuples
in the form ``(converter, arguments, variable)``. If the converter is
`None` it's a static url part, otherwise it's a dynamic one.
:internal:
"""
pos = 0
end = len(rule)
do_match = _rule_re.match
used_names = set()
while pos < end:
m = do_match(rule, pos)
if m is None:
break
data = m.groupdict()
if data['static']:
yield None, None, data['static']
variable = data['variable']
converter = data['converter'] or 'default'
if variable in used_names:
raise ValueError('variable name %r used twice.' % variable)
used_names.add(variable)
yield converter, data['args'] or None, variable
pos = m.end()
if pos < end:
remaining = rule[pos:]
if '>' in remaining or '<' in remaining:
raise ValueError('malformed url rule: %r' % rule)
yield None, None, remaining
class RoutingException(Exception):
"""Special exceptions that require the application to redirect, notifying
about missing urls, etc.
:internal:
"""
class RequestRedirect(HTTPException, RoutingException):
"""Raise if the map requests a redirect. This is for example the case if
`strict_slashes` are activated and an url that requires a trailing slash.
The attribute `new_url` contains the absolute destination url.
"""
code = 301
def __init__(self, new_url):
RoutingException.__init__(self, new_url)
self.new_url = new_url
def get_response(self, environ):
return redirect(self.new_url, self.code)
class RequestSlash(RoutingException):
"""Internal exception."""
class RequestAliasRedirect(RoutingException):
"""This rule is an alias and wants to redirect to the canonical URL."""
def __init__(self, matched_values):
self.matched_values = matched_values
class BuildError(RoutingException, LookupError):
"""Raised if the build system cannot find a URL for an endpoint with the
values provided.
"""
def __init__(self, endpoint, values, method):
LookupError.__init__(self, endpoint, values, method)
self.endpoint = endpoint
self.values = values
self.method = method
class ValidationError(ValueError):
"""Validation error. If a rule converter raises this exception the rule
does not match the current URL and the next URL is tried.
"""
class RuleFactory(object):
"""As soon as you have more complex URL setups it's a good idea to use rule
factories to avoid repetitive tasks. Some of them are builtin, others can
be added by subclassing `RuleFactory` and overriding `get_rules`.
"""
def get_rules(self, map):
"""Subclasses of `RuleFactory` have to override this method and return
an iterable of rules."""
raise NotImplementedError()
class Subdomain(RuleFactory):
"""All URLs provided by this factory have the subdomain set to a
specific domain. For example if you want to use the subdomain for
the current language this can be a good setup::
url_map = Map([
Rule('/', endpoint='#select_language'),
Subdomain('<string(length=2):lang_code>', [
Rule('/', endpoint='index'),
Rule('/about', endpoint='about'),
Rule('/help', endpoint='help')
])
])
All the rules except for the ``'#select_language'`` endpoint will now
listen on a two letter long subdomain that holds the language code
for the current request.
"""
def __init__(self, subdomain, rules):
self.subdomain = subdomain
self.rules = rules
def get_rules(self, map):
for rulefactory in self.rules:
for rule in rulefactory.get_rules(map):
rule = rule.empty()
rule.subdomain = self.subdomain
yield rule
class Submount(RuleFactory):
"""Like `Subdomain` but prefixes the URL rule with a given string::
url_map = Map([
Rule('/', endpoint='index'),
Submount('/blog', [
Rule('/', endpoint='blog/index'),
Rule('/entry/<entry_slug>', endpoint='blog/show')
])
])
Now the rule ``'blog/show'`` matches ``/blog/entry/<entry_slug>``.
"""
def __init__(self, path, rules):
self.path = path.rstrip('/')
self.rules = rules
def get_rules(self, map):
for rulefactory in self.rules:
for rule in rulefactory.get_rules(map):
rule = rule.empty()
rule.rule = self.path + rule.rule
yield rule
class EndpointPrefix(RuleFactory):
"""Prefixes all endpoints (which must be strings for this factory) with
another string. This can be useful for sub applications::
url_map = Map([
Rule('/', endpoint='index'),
EndpointPrefix('blog/', [Submount('/blog', [
Rule('/', endpoint='index'),
Rule('/entry/<entry_slug>', endpoint='show')
])])
])
"""
def __init__(self, prefix, rules):
self.prefix = prefix
self.rules = rules
def get_rules(self, map):
for rulefactory in self.rules:
for rule in rulefactory.get_rules(map):
rule = rule.empty()
rule.endpoint = self.prefix + rule.endpoint
yield rule
class RuleTemplate(object):
"""Returns copies of the rules wrapped and expands string templates in
the endpoint, rule, defaults or subdomain sections.
Here a small example for such a rule template::
from werkzeug.routing import Map, Rule, RuleTemplate
resource = RuleTemplate([
Rule('/$name/', endpoint='$name.list'),
Rule('/$name/<int:id>', endpoint='$name.show')
])
url_map = Map([resource(name='user'), resource(name='page')])
When a rule template is called the keyword arguments are used to
replace the placeholders in all the string parameters.
"""
def __init__(self, rules):
self.rules = list(rules)
def __call__(self, *args, **kwargs):
return RuleTemplateFactory(self.rules, dict(*args, **kwargs))
class RuleTemplateFactory(RuleFactory):
"""A factory that fills in template variables into rules. Used by
`RuleTemplate` internally.
:internal:
"""
def __init__(self, rules, context):
self.rules = rules
self.context = context
def get_rules(self, map):
for rulefactory in self.rules:
for rule in rulefactory.get_rules(map):
new_defaults = subdomain = None
if rule.defaults:
new_defaults = {}
for key, value in iteritems(rule.defaults):
if isinstance(value, string_types):
value = format_string(value, self.context)
new_defaults[key] = value
if rule.subdomain is not None:
subdomain = format_string(rule.subdomain, self.context)
new_endpoint = rule.endpoint
if isinstance(new_endpoint, string_types):
new_endpoint = format_string(new_endpoint, self.context)
yield Rule(
format_string(rule.rule, self.context),
new_defaults,
subdomain,
rule.methods,
rule.build_only,
new_endpoint,
rule.strict_slashes
)
@implements_to_string
class Rule(RuleFactory):
"""A Rule represents one URL pattern. There are some options for `Rule`
that change the way it behaves and are passed to the `Rule` constructor.
Note that besides the rule-string all arguments *must* be keyword arguments
in order to not break the application on Werkzeug upgrades.
`string`
Rule strings basically are just normal URL paths with placeholders in
the format ``<converter(arguments):name>`` where the converter and the
arguments are optional. If no converter is defined the `default`
converter is used which means `string` in the normal configuration.
URL rules that end with a slash are branch URLs, others are leaves.
If you have `strict_slashes` enabled (which is the default), all
branch URLs that are matched without a trailing slash will trigger a
redirect to the same URL with the missing slash appended.
The converters are defined on the `Map`.
`endpoint`
The endpoint for this rule. This can be anything. A reference to a
function, a string, a number etc. The preferred way is using a string
because the endpoint is used for URL generation.
`defaults`
An optional dict with defaults for other rules with the same endpoint.
This is a bit tricky but useful if you want to have unique URLs::
url_map = Map([
Rule('/all/', defaults={'page': 1}, endpoint='all_entries'),
Rule('/all/page/<int:page>', endpoint='all_entries')
])
If a user now visits ``http://example.com/all/page/1`` he will be
redirected to ``http://example.com/all/``. If `redirect_defaults` is
disabled on the `Map` instance this will only affect the URL
generation.
`subdomain`
The subdomain rule string for this rule. If not specified the rule
only matches for the `default_subdomain` of the map. If the map is
not bound to a subdomain this feature is disabled.
Can be useful if you want to have user profiles on different subdomains
and all subdomains are forwarded to your application::
url_map = Map([
Rule('/', subdomain='<username>', endpoint='user/homepage'),
Rule('/stats', subdomain='<username>', endpoint='user/stats')
])
`methods`
A sequence of http methods this rule applies to. If not specified, all
methods are allowed. For example this can be useful if you want different
endpoints for `POST` and `GET`. If methods are defined and the path
matches but the method matched against is not in this list or in the
list of another rule for that path the error raised is of the type
`MethodNotAllowed` rather than `NotFound`. If `GET` is present in the
list of methods and `HEAD` is not, `HEAD` is added automatically.
.. versionchanged:: 0.6.1
`HEAD` is now automatically added to the methods if `GET` is
present. The reason for this is that existing code often did not
work properly in servers not rewriting `HEAD` to `GET`
automatically and it was not documented how `HEAD` should be
treated. This was considered a bug in Werkzeug because of that.
`strict_slashes`
Override the `Map` setting for `strict_slashes` only for this rule. If
not specified the `Map` setting is used.
`build_only`
Set this to True and the rule will never match but will create a URL
that can be build. This is useful if you have resources on a subdomain
or folder that are not handled by the WSGI application (like static data)
`redirect_to`
If given this must be either a string or callable. In case of a
callable it's called with the url adapter that triggered the match and
the values of the URL as keyword arguments and has to return the target
for the redirect, otherwise it has to be a string with placeholders in
rule syntax::
def foo_with_slug(adapter, id):
# ask the database for the slug for the old id. this of
# course has nothing to do with werkzeug.
return 'foo/' + Foo.get_slug_for_id(id)
url_map = Map([
Rule('/foo/<slug>', endpoint='foo'),
Rule('/some/old/url/<slug>', redirect_to='foo/<slug>'),
Rule('/other/old/url/<int:id>', redirect_to=foo_with_slug)
])
When the rule is matched the routing system will raise a
`RequestRedirect` exception with the target for the redirect.
Keep in mind that the URL will be joined against the URL root of the
script so don't use a leading slash on the target URL unless you
really mean root of that domain.
`alias`
If enabled this rule serves as an alias for another rule with the same
endpoint and arguments.
`host`
If provided and the URL map has host matching enabled this can be
used to provide a match rule for the whole host. This also means
that the subdomain feature is disabled.
.. versionadded:: 0.7
The `alias` and `host` parameters were added.
"""
def __init__(self, string, defaults=None, subdomain=None, methods=None,
build_only=False, endpoint=None, strict_slashes=None,
redirect_to=None, alias=False, host=None):
if not string.startswith('/'):
raise ValueError('urls must start with a leading slash')
self.rule = string
self.is_leaf = not string.endswith('/')
self.map = None
self.strict_slashes = strict_slashes
self.subdomain = subdomain
self.host = host
self.defaults = defaults
self.build_only = build_only
self.alias = alias
if methods is None:
self.methods = None
else:
self.methods = set([x.upper() for x in methods])
if 'HEAD' not in self.methods and 'GET' in self.methods:
self.methods.add('HEAD')
self.endpoint = endpoint
self.redirect_to = redirect_to
if defaults:
self.arguments = set(map(str, defaults))
else:
self.arguments = set()
self._trace = self._converters = self._regex = self._weights = None
def empty(self):
"""Return an unbound copy of this rule. This can be useful if you
want to reuse an already bound URL for another map."""
defaults = None
if self.defaults:
defaults = dict(self.defaults)
return Rule(self.rule, defaults, self.subdomain, self.methods,
self.build_only, self.endpoint, self.strict_slashes,
self.redirect_to, self.alias, self.host)
def get_rules(self, map):
yield self
def refresh(self):
"""Rebinds and refreshes the URL. Call this if you modified the
rule in place.
:internal:
"""
self.bind(self.map, rebind=True)
def bind(self, map, rebind=False):
"""Bind the url to a map and create a regular expression based on
the information from the rule itself and the defaults from the map.
:internal:
"""
if self.map is not None and not rebind:
raise RuntimeError('url rule %r already bound to map %r' %
(self, self.map))
self.map = map
if self.strict_slashes is None:
self.strict_slashes = map.strict_slashes
if self.subdomain is None:
self.subdomain = map.default_subdomain
self.compile()
def get_converter(self, variable_name, converter_name, args, kwargs):
"""Looks up the converter for the given parameter.
.. versionadded:: 0.9
"""
if not converter_name in self.map.converters:
raise LookupError('the converter %r does not exist' % converter_name)
return self.map.converters[converter_name](self.map, *args, **kwargs)
def compile(self):
"""Compiles the regular expression and stores it."""
assert self.map is not None, 'rule not bound'
if self.map.host_matching:
domain_rule = self.host or ''
else:
domain_rule = self.subdomain or ''
self._trace = []
self._converters = {}
self._weights = []
regex_parts = []
def _build_regex(rule):
for converter, arguments, variable in parse_rule(rule):
if converter is None:
regex_parts.append(re.escape(variable))
self._trace.append((False, variable))
for part in variable.split('/'):
if part:
self._weights.append((0, -len(part)))
else:
if arguments:
c_args, c_kwargs = parse_converter_args(arguments)
else:
c_args = ()
c_kwargs = {}
convobj = self.get_converter(
variable, converter, c_args, c_kwargs)
regex_parts.append('(?P<%s>%s)' % (variable, convobj.regex))
self._converters[variable] = convobj
self._trace.append((True, variable))
self._weights.append((1, convobj.weight))
self.arguments.add(str(variable))
_build_regex(domain_rule)
regex_parts.append('\\|')
self._trace.append((False, '|'))
_build_regex(self.is_leaf and self.rule or self.rule.rstrip('/'))
if not self.is_leaf:
self._trace.append((False, '/'))
if self.build_only:
return
regex = r'^%s%s$' % (
u''.join(regex_parts),
(not self.is_leaf or not self.strict_slashes) and \
'(?<!/)(?P<__suffix__>/?)' or ''
)
self._regex = re.compile(regex, re.UNICODE)
def match(self, path):
"""Check if the rule matches a given path. Path is a string in the
form ``"subdomain|/path(method)"`` and is assembled by the map. If
the map is doing host matching the subdomain part will be the host
instead.
If the rule matches a dict with the converted values is returned,
otherwise the return value is `None`.
:internal:
"""
if not self.build_only:
m = self._regex.search(path)
if m is not None:
groups = m.groupdict()
# we have a folder like part of the url without a trailing
# slash and strict slashes enabled. raise an exception that
# tells the map to redirect to the same url but with a
# trailing slash
if self.strict_slashes and not self.is_leaf and \
not groups.pop('__suffix__'):
raise RequestSlash()
# if we are not in strict slashes mode we have to remove
# a __suffix__
elif not self.strict_slashes:
del groups['__suffix__']
result = {}
for name, value in iteritems(groups):
try:
value = self._converters[name].to_python(value)
except ValidationError:
return
result[str(name)] = value
if self.defaults:
result.update(self.defaults)
if self.alias and self.map.redirect_defaults:
raise RequestAliasRedirect(result)
return result
def build(self, values, append_unknown=True):
"""Assembles the relative url for that rule and the subdomain.
If building doesn't work for some reasons `None` is returned.
:internal:
"""
tmp = []
add = tmp.append
processed = set(self.arguments)
for is_dynamic, data in self._trace:
if is_dynamic:
try:
add(self._converters[data].to_url(values[data]))
except ValidationError:
return
processed.add(data)
else:
add(url_quote(to_bytes(data, self.map.charset), safe='/:|+'))
domain_part, url = (u''.join(tmp)).split(u'|', 1)
if append_unknown:
query_vars = MultiDict(values)
for key in processed:
if key in query_vars:
del query_vars[key]
if query_vars:
url += u'?' + url_encode(query_vars, charset=self.map.charset,
sort=self.map.sort_parameters,
key=self.map.sort_key)
return domain_part, url
def provides_defaults_for(self, rule):
"""Check if this rule has defaults for a given rule.
:internal:
"""
return not self.build_only and self.defaults and \
self.endpoint == rule.endpoint and self != rule and \
self.arguments == rule.arguments
def suitable_for(self, values, method=None):
"""Check if the dict of values has enough data for url generation.
:internal:
"""
# if a method was given explicitly and that method is not supported
# by this rule, this rule is not suitable.
if method is not None and self.methods is not None \
and method not in self.methods:
return False
defaults = self.defaults or ()
# all arguments required must be either in the defaults dict or
# the value dictionary otherwise it's not suitable
for key in self.arguments:
if key not in defaults and key not in values:
return False
# in case defaults are given we ensure taht either the value was
# skipped or the value is the same as the default value.
if defaults:
for key, value in iteritems(defaults):
if key in values and value != values[key]:
return False
return True
def match_compare_key(self):
"""The match compare key for sorting.
Current implementation:
1. rules without any arguments come first for performance
reasons only as we expect them to match faster and some
common ones usually don't have any arguments (index pages etc.)
2. The more complex rules come first so the second argument is the
negative length of the number of weights.
3. lastly we order by the actual weights.
:internal:
"""
return bool(self.arguments), -len(self._weights), self._weights
def build_compare_key(self):
"""The build compare key for sorting.
:internal:
"""
return self.alias and 1 or 0, -len(self.arguments), \
-len(self.defaults or ())
def __eq__(self, other):
return self.__class__ is other.__class__ and \
self._trace == other._trace
def __ne__(self, other):
return not self.__eq__(other)
def __str__(self):
return self.rule
@native_string_result
def __repr__(self):
if self.map is None:
return u'<%s (unbound)>' % self.__class__.__name__
tmp = []
for is_dynamic, data in self._trace:
if is_dynamic:
tmp.append(u'<%s>' % data)
else:
tmp.append(data)
return u'<%s %s%s -> %s>' % (
self.__class__.__name__,
repr((u''.join(tmp)).lstrip(u'|')).lstrip(u'u'),
self.methods is not None and u' (%s)' % \
u', '.join(self.methods) or u'',
self.endpoint
)
class BaseConverter(object):
"""Base class for all converters."""
regex = '[^/]+'
weight = 100
def __init__(self, map):
self.map = map
def to_python(self, value):
return value
def to_url(self, value):
return url_quote(value, charset=self.map.charset)
class UnicodeConverter(BaseConverter):
"""This converter is the default converter and accepts any string but
only one path segment. Thus the string can not include a slash.
This is the default validator.
Example::
Rule('/pages/<page>'),
Rule('/<string(length=2):lang_code>')
:param map: the :class:`Map`.
:param minlength: the minimum length of the string. Must be greater
or equal 1.
:param maxlength: the maximum length of the string.
:param length: the exact length of the string.
"""
def __init__(self, map, minlength=1, maxlength=None, length=None):
BaseConverter.__init__(self, map)
if length is not None:
length = '{%d}' % int(length)
else:
if maxlength is None:
maxlength = ''
else:
maxlength = int(maxlength)
length = '{%s,%s}' % (
int(minlength),
maxlength
)
self.regex = '[^/]' + length
class AnyConverter(BaseConverter):
"""Matches one of the items provided. Items can either be Python
identifiers or strings::
Rule('/<any(about, help, imprint, class, "foo,bar"):page_name>')
:param map: the :class:`Map`.
:param items: this function accepts the possible items as positional
arguments.
"""
def __init__(self, map, *items):
BaseConverter.__init__(self, map)
self.regex = '(?:%s)' % '|'.join([re.escape(x) for x in items])
class PathConverter(BaseConverter):
"""Like the default :class:`UnicodeConverter`, but it also matches
slashes. This is useful for wikis and similar applications::
Rule('/<path:wikipage>')
Rule('/<path:wikipage>/edit')
:param map: the :class:`Map`.
"""
regex = '[^/].*?'
weight = 200
class NumberConverter(BaseConverter):
"""Baseclass for `IntegerConverter` and `FloatConverter`.
:internal:
"""
weight = 50
def __init__(self, map, fixed_digits=0, min=None, max=None):
BaseConverter.__init__(self, map)
self.fixed_digits = fixed_digits
self.min = min
self.max = max
def to_python(self, value):
if (self.fixed_digits and len(value) != self.fixed_digits):
raise ValidationError()
value = self.num_convert(value)
if (self.min is not None and value < self.min) or \
(self.max is not None and value > self.max):
raise ValidationError()
return value
def to_url(self, value):
value = self.num_convert(value)
if self.fixed_digits:
value = ('%%0%sd' % self.fixed_digits) % value
return str(value)
class IntegerConverter(NumberConverter):
"""This converter only accepts integer values::
Rule('/page/<int:page>')
This converter does not support negative values.
:param map: the :class:`Map`.
:param fixed_digits: the number of fixed digits in the URL. If you set
this to ``4`` for example, the application will
only match if the url looks like ``/0001/``. The
default is variable length.
:param min: the minimal value.
:param max: the maximal value.
"""
regex = r'\d+'
num_convert = int
class FloatConverter(NumberConverter):
"""This converter only accepts floating point values::
Rule('/probability/<float:probability>')
This converter does not support negative values.
:param map: the :class:`Map`.
:param min: the minimal value.
:param max: the maximal value.
"""
regex = r'\d+\.\d+'
num_convert = float
def __init__(self, map, min=None, max=None):
NumberConverter.__init__(self, map, 0, min, max)
#: the default converter mapping for the map.
DEFAULT_CONVERTERS = {
'default': UnicodeConverter,
'string': UnicodeConverter,
'any': AnyConverter,
'path': PathConverter,
'int': IntegerConverter,
'float': FloatConverter
}
class Map(object):
"""The map class stores all the URL rules and some configuration
parameters. Some of the configuration values are only stored on the
`Map` instance since those affect all rules, others are just defaults
and can be overridden for each rule. Note that you have to specify all
arguments besides the `rules` as keyword arguments!
:param rules: sequence of url rules for this map.
:param default_subdomain: The default subdomain for rules without a
subdomain defined.
:param charset: charset of the url. defaults to ``"utf-8"``
:param strict_slashes: Take care of trailing slashes.
:param redirect_defaults: This will redirect to the default rule if it
wasn't visited that way. This helps creating
unique URLs.
:param converters: A dict of converters that adds additional converters
to the list of converters. If you redefine one
converter this will override the original one.
:param sort_parameters: If set to `True` the url parameters are sorted.
See `url_encode` for more details.
:param sort_key: The sort key function for `url_encode`.
:param encoding_errors: the error method to use for decoding
:param host_matching: if set to `True` it enables the host matching
feature and disables the subdomain one. If
enabled the `host` parameter to rules is used
instead of the `subdomain` one.
.. versionadded:: 0.5
`sort_parameters` and `sort_key` was added.
.. versionadded:: 0.7
`encoding_errors` and `host_matching` was added.
"""
#: .. versionadded:: 0.6
#: a dict of default converters to be used.
default_converters = ImmutableDict(DEFAULT_CONVERTERS)
def __init__(self, rules=None, default_subdomain='', charset='utf-8',
strict_slashes=True, redirect_defaults=True,
converters=None, sort_parameters=False, sort_key=None,
encoding_errors='replace', host_matching=False):
self._rules = []
self._rules_by_endpoint = {}
self._remap = True
self.default_subdomain = default_subdomain
self.charset = charset
self.encoding_errors = encoding_errors
self.strict_slashes = strict_slashes
self.redirect_defaults = redirect_defaults
self.host_matching = host_matching
self.converters = self.default_converters.copy()
if converters:
self.converters.update(converters)
self.sort_parameters = sort_parameters
self.sort_key = sort_key
for rulefactory in rules or ():
self.add(rulefactory)
def is_endpoint_expecting(self, endpoint, *arguments):
"""Iterate over all rules and check if the endpoint expects
the arguments provided. This is for example useful if you have
some URLs that expect a language code and others that do not and
you want to wrap the builder a bit so that the current language
code is automatically added if not provided but endpoints expect
it.
:param endpoint: the endpoint to check.
:param arguments: this function accepts one or more arguments
as positional arguments. Each one of them is
checked.
"""
self.update()
arguments = set(arguments)
for rule in self._rules_by_endpoint[endpoint]:
if arguments.issubset(rule.arguments):
return True
return False
def iter_rules(self, endpoint=None):
"""Iterate over all rules or the rules of an endpoint.
:param endpoint: if provided only the rules for that endpoint
are returned.
:return: an iterator
"""
self.update()
if endpoint is not None:
return iter(self._rules_by_endpoint[endpoint])
return iter(self._rules)
def add(self, rulefactory):
"""Add a new rule or factory to the map and bind it. Requires that the
rule is not bound to another map.
:param rulefactory: a :class:`Rule` or :class:`RuleFactory`
"""
for rule in rulefactory.get_rules(self):
rule.bind(self)
self._rules.append(rule)
self._rules_by_endpoint.setdefault(rule.endpoint, []).append(rule)
self._remap = True
def bind(self, server_name, script_name=None, subdomain=None,
url_scheme='http', default_method='GET', path_info=None,
query_args=None):
"""Return a new :class:`MapAdapter` with the details specified to the
call. Note that `script_name` will default to ``'/'`` if not further
specified or `None`. The `server_name` at least is a requirement
because the HTTP RFC requires absolute URLs for redirects and so all
redirect exceptions raised by Werkzeug will contain the full canonical
URL.
If no path_info is passed to :meth:`match` it will use the default path
info passed to bind. While this doesn't really make sense for
manual bind calls, it's useful if you bind a map to a WSGI
environment which already contains the path info.
`subdomain` will default to the `default_subdomain` for this map if
no defined. If there is no `default_subdomain` you cannot use the
subdomain feature.
.. versionadded:: 0.7
`query_args` added
.. versionadded:: 0.8
`query_args` can now also be a string.
"""
server_name = server_name.lower()
if self.host_matching:
if subdomain is not None:
raise RuntimeError('host matching enabled and a '
'subdomain was provided')
elif subdomain is None:
subdomain = self.default_subdomain
if script_name is None:
script_name = '/'
server_name = _encode_idna(server_name)
return MapAdapter(self, server_name, script_name, subdomain,
url_scheme, path_info, default_method, query_args)
def bind_to_environ(self, environ, server_name=None, subdomain=None):
"""Like :meth:`bind` but you can pass it an WSGI environment and it
will fetch the information from that dictionary. Note that because of
limitations in the protocol there is no way to get the current
subdomain and real `server_name` from the environment. If you don't
provide it, Werkzeug will use `SERVER_NAME` and `SERVER_PORT` (or
`HTTP_HOST` if provided) as used `server_name` with disabled subdomain
feature.
If `subdomain` is `None` but an environment and a server name is
provided it will calculate the current subdomain automatically.
Example: `server_name` is ``'example.com'`` and the `SERVER_NAME`
in the wsgi `environ` is ``'staging.dev.example.com'`` the calculated
subdomain will be ``'staging.dev'``.
If the object passed as environ has an environ attribute, the value of
this attribute is used instead. This allows you to pass request
objects. Additionally `PATH_INFO` added as a default of the
:class:`MapAdapter` so that you don't have to pass the path info to
the match method.
.. versionchanged:: 0.5
previously this method accepted a bogus `calculate_subdomain`
parameter that did not have any effect. It was removed because
of that.
.. versionchanged:: 0.8
This will no longer raise a ValueError when an unexpected server
name was passed.
:param environ: a WSGI environment.
:param server_name: an optional server name hint (see above).
:param subdomain: optionally the current subdomain (see above).
"""
environ = _get_environ(environ)
if server_name is None:
if 'HTTP_HOST' in environ:
server_name = environ['HTTP_HOST']
else:
server_name = environ['SERVER_NAME']
if (environ['wsgi.url_scheme'], environ['SERVER_PORT']) not \
in (('https', '443'), ('http', '80')):
server_name += ':' + environ['SERVER_PORT']
elif subdomain is None and not self.host_matching:
server_name = server_name.lower()
if 'HTTP_HOST' in environ:
wsgi_server_name = environ.get('HTTP_HOST')
else:
wsgi_server_name = environ.get('SERVER_NAME')
if (environ['wsgi.url_scheme'], environ['SERVER_PORT']) not \
in (('https', '443'), ('http', '80')):
wsgi_server_name += ':' + environ['SERVER_PORT']
wsgi_server_name = wsgi_server_name.lower()
cur_server_name = wsgi_server_name.split('.')
real_server_name = server_name.split('.')
offset = -len(real_server_name)
if cur_server_name[offset:] != real_server_name:
# This can happen even with valid configs if the server was
# accesssed directly by IP address under some situations.
# Instead of raising an exception like in Werkzeug 0.7 or
# earlier we go by an invalid subdomain which will result
# in a 404 error on matching.
subdomain = '<invalid>'
else:
subdomain = '.'.join(filter(None, cur_server_name[:offset]))
def _get_wsgi_string(name):
val = environ.get(name)
if val is not None:
return wsgi_decoding_dance(val, self.charset)
script_name = _get_wsgi_string('SCRIPT_NAME')
path_info = _get_wsgi_string('PATH_INFO')
query_args = _get_wsgi_string('QUERY_STRING')
return Map.bind(self, server_name, script_name,
subdomain, environ['wsgi.url_scheme'],
environ['REQUEST_METHOD'], path_info,
query_args=query_args)
def update(self):
"""Called before matching and building to keep the compiled rules
in the correct order after things changed.
"""
if self._remap:
self._rules.sort(key=lambda x: x.match_compare_key())
for rules in itervalues(self._rules_by_endpoint):
rules.sort(key=lambda x: x.build_compare_key())
self._remap = False
def __repr__(self):
rules = self.iter_rules()
return '%s(%s)' % (self.__class__.__name__, pformat(list(rules)))
class MapAdapter(object):
"""Returned by :meth:`Map.bind` or :meth:`Map.bind_to_environ` and does
the URL matching and building based on runtime information.
"""
def __init__(self, map, server_name, script_name, subdomain,
url_scheme, path_info, default_method, query_args=None):
self.map = map
self.server_name = to_unicode(server_name)
script_name = to_unicode(script_name)
if not script_name.endswith(u'/'):
script_name += u'/'
self.script_name = script_name
self.subdomain = to_unicode(subdomain)
self.url_scheme = to_unicode(url_scheme)
self.path_info = to_unicode(path_info)
self.default_method = to_unicode(default_method)
self.query_args = query_args
def dispatch(self, view_func, path_info=None, method=None,
catch_http_exceptions=False):
"""Does the complete dispatching process. `view_func` is called with
the endpoint and a dict with the values for the view. It should
look up the view function, call it, and return a response object
or WSGI application. http exceptions are not caught by default
so that applications can display nicer error messages by just
catching them by hand. If you want to stick with the default
error messages you can pass it ``catch_http_exceptions=True`` and
it will catch the http exceptions.
Here a small example for the dispatch usage::
from werkzeug.wrappers import Request, Response
from werkzeug.wsgi import responder
from werkzeug.routing import Map, Rule
def on_index(request):
return Response('Hello from the index')
url_map = Map([Rule('/', endpoint='index')])
views = {'index': on_index}
@responder
def application(environ, start_response):
request = Request(environ)
urls = url_map.bind_to_environ(environ)
return urls.dispatch(lambda e, v: views[e](request, **v),
catch_http_exceptions=True)
Keep in mind that this method might return exception objects, too, so
use :class:`Response.force_type` to get a response object.
:param view_func: a function that is called with the endpoint as
first argument and the value dict as second. Has
to dispatch to the actual view function with this
information. (see above)
:param path_info: the path info to use for matching. Overrides the
path info specified on binding.
:param method: the HTTP method used for matching. Overrides the
method specified on binding.
:param catch_http_exceptions: set to `True` to catch any of the
werkzeug :class:`HTTPException`\s.
"""
try:
try:
endpoint, args = self.match(path_info, method)
except RequestRedirect as e:
return e
return view_func(endpoint, args)
except HTTPException as e:
if catch_http_exceptions:
return e
raise
def match(self, path_info=None, method=None, return_rule=False,
query_args=None):
"""The usage is simple: you just pass the match method the current
path info as well as the method (which defaults to `GET`). The
following things can then happen:
- you receive a `NotFound` exception that indicates that no URL is
matching. A `NotFound` exception is also a WSGI application you
can call to get a default page not found page (happens to be the
same object as `werkzeug.exceptions.NotFound`)
- you receive a `MethodNotAllowed` exception that indicates that there
is a match for this URL but not for the current request method.
This is useful for RESTful applications.
- you receive a `RequestRedirect` exception with a `new_url`
attribute. This exception is used to notify you about a request
Werkzeug requests from your WSGI application. This is for example the
case if you request ``/foo`` although the correct URL is ``/foo/``
You can use the `RequestRedirect` instance as response-like object
similar to all other subclasses of `HTTPException`.
- you get a tuple in the form ``(endpoint, arguments)`` if there is
a match (unless `return_rule` is True, in which case you get a tuple
in the form ``(rule, arguments)``)
If the path info is not passed to the match method the default path
info of the map is used (defaults to the root URL if not defined
explicitly).
All of the exceptions raised are subclasses of `HTTPException` so they
can be used as WSGI responses. The will all render generic error or
redirect pages.
Here is a small example for matching:
>>> m = Map([
... Rule('/', endpoint='index'),
... Rule('/downloads/', endpoint='downloads/index'),
... Rule('/downloads/<int:id>', endpoint='downloads/show')
... ])
>>> urls = m.bind("example.com", "/")
>>> urls.match("/", "GET")
('index', {})
>>> urls.match("/downloads/42")
('downloads/show', {'id': 42})
And here is what happens on redirect and missing URLs:
>>> urls.match("/downloads")
Traceback (most recent call last):
...
RequestRedirect: http://example.com/downloads/
>>> urls.match("/missing")
Traceback (most recent call last):
...
NotFound: 404 Not Found
:param path_info: the path info to use for matching. Overrides the
path info specified on binding.
:param method: the HTTP method used for matching. Overrides the
method specified on binding.
:param return_rule: return the rule that matched instead of just the
endpoint (defaults to `False`).
:param query_args: optional query arguments that are used for
automatic redirects as string or dictionary. It's
currently not possible to use the query arguments
for URL matching.
.. versionadded:: 0.6
`return_rule` was added.
.. versionadded:: 0.7
`query_args` was added.
.. versionchanged:: 0.8
`query_args` can now also be a string.
"""
self.map.update()
if path_info is None:
path_info = self.path_info
else:
path_info = to_unicode(path_info, self.map.charset)
if query_args is None:
query_args = self.query_args
method = (method or self.default_method).upper()
path = u'%s|/%s' % (self.map.host_matching and self.server_name or
self.subdomain, path_info.lstrip('/'))
have_match_for = set()
for rule in self.map._rules:
try:
rv = rule.match(path)
except RequestSlash:
raise RequestRedirect(self.make_redirect_url(
path_info + '/', query_args))
except RequestAliasRedirect as e:
raise RequestRedirect(self.make_alias_redirect_url(
path, rule.endpoint, e.matched_values, method, query_args))
if rv is None:
continue
if rule.methods is not None and method not in rule.methods:
have_match_for.update(rule.methods)
continue
if self.map.redirect_defaults:
redirect_url = self.get_default_redirect(rule, method, rv,
query_args)
if redirect_url is not None:
raise RequestRedirect(redirect_url)
if rule.redirect_to is not None:
if isinstance(rule.redirect_to, string_types):
def _handle_match(match):
value = rv[match.group(1)]
return rule._converters[match.group(1)].to_url(value)
redirect_url = _simple_rule_re.sub(_handle_match,
rule.redirect_to)
else:
redirect_url = rule.redirect_to(self, **rv)
raise RequestRedirect(str(urljoin('%s://%s%s%s' % (
self.url_scheme,
self.subdomain and self.subdomain + '.' or '',
self.server_name,
self.script_name
), redirect_url)))
if return_rule:
return rule, rv
else:
return rule.endpoint, rv
if have_match_for:
raise MethodNotAllowed(valid_methods=list(have_match_for))
raise NotFound()
def test(self, path_info=None, method=None):
"""Test if a rule would match. Works like `match` but returns `True`
if the URL matches, or `False` if it does not exist.
:param path_info: the path info to use for matching. Overrides the
path info specified on binding.
:param method: the HTTP method used for matching. Overrides the
method specified on binding.
"""
try:
self.match(path_info, method)
except RequestRedirect:
pass
except HTTPException:
return False
return True
def allowed_methods(self, path_info=None):
"""Returns the valid methods that match for a given path.
.. versionadded:: 0.7
"""
try:
self.match(path_info, method='--')
except MethodNotAllowed as e:
return e.valid_methods
except HTTPException as e:
pass
return []
def get_host(self, domain_part):
"""Figures out the full host name for the given domain part. The
domain part is a subdomain in case host matching is disabled or
a full host name.
"""
if self.map.host_matching:
if domain_part is None:
return self.server_name
return to_unicode(domain_part, 'ascii')
subdomain = domain_part
if subdomain is None:
subdomain = self.subdomain
else:
subdomain = to_unicode(subdomain, 'ascii')
return (subdomain and subdomain + u'.' or u'') + self.server_name
def get_default_redirect(self, rule, method, values, query_args):
"""A helper that returns the URL to redirect to if it finds one.
This is used for default redirecting only.
:internal:
"""
assert self.map.redirect_defaults
for r in self.map._rules_by_endpoint[rule.endpoint]:
# every rule that comes after this one, including ourself
# has a lower priority for the defaults. We order the ones
# with the highest priority up for building.
if r is rule:
break
if r.provides_defaults_for(rule) and \
r.suitable_for(values, method):
values.update(r.defaults)
domain_part, path = r.build(values)
return self.make_redirect_url(
path, query_args, domain_part=domain_part)
def encode_query_args(self, query_args):
if not isinstance(query_args, string_types):
query_args = url_encode(query_args, self.map.charset)
return query_args
def make_redirect_url(self, path_info, query_args=None, domain_part=None):
"""Creates a redirect URL.
:internal:
"""
suffix = ''
if query_args:
suffix = '?' + self.encode_query_args(query_args)
return str('%s://%s/%s%s' % (
self.url_scheme,
self.get_host(domain_part),
posixpath.join(self.script_name[:-1].lstrip('/'),
url_quote(path_info.lstrip('/'), self.map.charset,
safe='/:|+')),
suffix
))
def make_alias_redirect_url(self, path, endpoint, values, method, query_args):
"""Internally called to make an alias redirect URL."""
url = self.build(endpoint, values, method, append_unknown=False,
force_external=True)
if query_args:
url += '?' + self.encode_query_args(query_args)
assert url != path, 'detected invalid alias setting. No canonical ' \
'URL found'
return url
def _partial_build(self, endpoint, values, method, append_unknown):
"""Helper for :meth:`build`. Returns subdomain and path for the
rule that accepts this endpoint, values and method.
:internal:
"""
# in case the method is none, try with the default method first
if method is None:
rv = self._partial_build(endpoint, values, self.default_method,
append_unknown)
if rv is not None:
return rv
# default method did not match or a specific method is passed,
# check all and go with first result.
for rule in self.map._rules_by_endpoint.get(endpoint, ()):
if rule.suitable_for(values, method):
rv = rule.build(values, append_unknown)
if rv is not None:
return rv
def build(self, endpoint, values=None, method=None, force_external=False,
append_unknown=True):
"""Building URLs works pretty much the other way round. Instead of
`match` you call `build` and pass it the endpoint and a dict of
arguments for the placeholders.
The `build` function also accepts an argument called `force_external`
which, if you set it to `True` will force external URLs. Per default
external URLs (include the server name) will only be used if the
target URL is on a different subdomain.
>>> m = Map([
... Rule('/', endpoint='index'),
... Rule('/downloads/', endpoint='downloads/index'),
... Rule('/downloads/<int:id>', endpoint='downloads/show')
... ])
>>> urls = m.bind("example.com", "/")
>>> urls.build("index", {})
'/'
>>> urls.build("downloads/show", {'id': 42})
'/downloads/42'
>>> urls.build("downloads/show", {'id': 42}, force_external=True)
'http://example.com/downloads/42'
Because URLs cannot contain non ASCII data you will always get
bytestrings back. Non ASCII characters are urlencoded with the
charset defined on the map instance.
Additional values are converted to unicode and appended to the URL as
URL querystring parameters:
>>> urls.build("index", {'q': 'My Searchstring'})
'/?q=My+Searchstring'
If a rule does not exist when building a `BuildError` exception is
raised.
The build method accepts an argument called `method` which allows you
to specify the method you want to have an URL built for if you have
different methods for the same endpoint specified.
.. versionadded:: 0.6
the `append_unknown` parameter was added.
:param endpoint: the endpoint of the URL to build.
:param values: the values for the URL to build. Unhandled values are
appended to the URL as query parameters.
:param method: the HTTP method for the rule if there are different
URLs for different methods on the same endpoint.
:param force_external: enforce full canonical external URLs.
:param append_unknown: unknown parameters are appended to the generated
URL as query string argument. Disable this
if you want the builder to ignore those.
"""
self.map.update()
if values:
if isinstance(values, MultiDict):
valueiter = values.iteritems(multi=True)
else:
valueiter = iteritems(values)
values = dict((k, v) for k, v in valueiter if v is not None)
else:
values = {}
rv = self._partial_build(endpoint, values, method, append_unknown)
if rv is None:
raise BuildError(endpoint, values, method)
domain_part, path = rv
host = self.get_host(domain_part)
# shortcut this.
if not force_external and (
(self.map.host_matching and host == self.server_name) or
(not self.map.host_matching and domain_part == self.subdomain)):
return str(urljoin(self.script_name, './' + path.lstrip('/')))
return str('%s://%s%s/%s' % (
self.url_scheme,
host,
self.script_name[:-1],
path.lstrip('/')
))
| apache-2.0 |
robskillington/google-diff-match-patch | python2/diff_match_patch.py | 337 | 67934 | #!/usr/bin/python2.4
from __future__ import division
"""Diff Match and Patch
Copyright 2006 Google Inc.
http://code.google.com/p/google-diff-match-patch/
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
"""Functions for diff, match and patch.
Computes the difference between two texts to create a patch.
Applies the patch onto another text, allowing for errors.
"""
__author__ = '[email protected] (Neil Fraser)'
import math
import re
import sys
import time
import urllib
class diff_match_patch:
"""Class containing the diff, match and patch methods.
Also contains the behaviour settings.
"""
def __init__(self):
"""Inits a diff_match_patch object with default settings.
Redefine these in your program to override the defaults.
"""
# Number of seconds to map a diff before giving up (0 for infinity).
self.Diff_Timeout = 1.0
# Cost of an empty edit operation in terms of edit characters.
self.Diff_EditCost = 4
# At what point is no match declared (0.0 = perfection, 1.0 = very loose).
self.Match_Threshold = 0.5
# How far to search for a match (0 = exact location, 1000+ = broad match).
# A match this many characters away from the expected location will add
# 1.0 to the score (0.0 is a perfect match).
self.Match_Distance = 1000
# When deleting a large block of text (over ~64 characters), how close do
# the contents have to be to match the expected contents. (0.0 = perfection,
# 1.0 = very loose). Note that Match_Threshold controls how closely the
# end points of a delete need to match.
self.Patch_DeleteThreshold = 0.5
# Chunk size for context length.
self.Patch_Margin = 4
# The number of bits in an int.
# Python has no maximum, thus to disable patch splitting set to 0.
# However to avoid long patches in certain pathological cases, use 32.
# Multiple short patches (using native ints) are much faster than long ones.
self.Match_MaxBits = 32
# DIFF FUNCTIONS
# The data structure representing a diff is an array of tuples:
# [(DIFF_DELETE, "Hello"), (DIFF_INSERT, "Goodbye"), (DIFF_EQUAL, " world.")]
# which means: delete "Hello", add "Goodbye" and keep " world."
DIFF_DELETE = -1
DIFF_INSERT = 1
DIFF_EQUAL = 0
def diff_main(self, text1, text2, checklines=True, deadline=None):
"""Find the differences between two texts. Simplifies the problem by
stripping any common prefix or suffix off the texts before diffing.
Args:
text1: Old string to be diffed.
text2: New string to be diffed.
checklines: Optional speedup flag. If present and false, then don't run
a line-level diff first to identify the changed areas.
Defaults to true, which does a faster, slightly less optimal diff.
deadline: Optional time when the diff should be complete by. Used
internally for recursive calls. Users should set DiffTimeout instead.
Returns:
Array of changes.
"""
# Set a deadline by which time the diff must be complete.
if deadline == None:
# Unlike in most languages, Python counts time in seconds.
if self.Diff_Timeout <= 0:
deadline = sys.maxint
else:
deadline = time.time() + self.Diff_Timeout
# Check for null inputs.
if text1 == None or text2 == None:
raise ValueError("Null inputs. (diff_main)")
# Check for equality (speedup).
if text1 == text2:
if text1:
return [(self.DIFF_EQUAL, text1)]
return []
# Trim off common prefix (speedup).
commonlength = self.diff_commonPrefix(text1, text2)
commonprefix = text1[:commonlength]
text1 = text1[commonlength:]
text2 = text2[commonlength:]
# Trim off common suffix (speedup).
commonlength = self.diff_commonSuffix(text1, text2)
if commonlength == 0:
commonsuffix = ''
else:
commonsuffix = text1[-commonlength:]
text1 = text1[:-commonlength]
text2 = text2[:-commonlength]
# Compute the diff on the middle block.
diffs = self.diff_compute(text1, text2, checklines, deadline)
# Restore the prefix and suffix.
if commonprefix:
diffs[:0] = [(self.DIFF_EQUAL, commonprefix)]
if commonsuffix:
diffs.append((self.DIFF_EQUAL, commonsuffix))
self.diff_cleanupMerge(diffs)
return diffs
def diff_compute(self, text1, text2, checklines, deadline):
"""Find the differences between two texts. Assumes that the texts do not
have any common prefix or suffix.
Args:
text1: Old string to be diffed.
text2: New string to be diffed.
checklines: Speedup flag. If false, then don't run a line-level diff
first to identify the changed areas.
If true, then run a faster, slightly less optimal diff.
deadline: Time when the diff should be complete by.
Returns:
Array of changes.
"""
if not text1:
# Just add some text (speedup).
return [(self.DIFF_INSERT, text2)]
if not text2:
# Just delete some text (speedup).
return [(self.DIFF_DELETE, text1)]
if len(text1) > len(text2):
(longtext, shorttext) = (text1, text2)
else:
(shorttext, longtext) = (text1, text2)
i = longtext.find(shorttext)
if i != -1:
# Shorter text is inside the longer text (speedup).
diffs = [(self.DIFF_INSERT, longtext[:i]), (self.DIFF_EQUAL, shorttext),
(self.DIFF_INSERT, longtext[i + len(shorttext):])]
# Swap insertions for deletions if diff is reversed.
if len(text1) > len(text2):
diffs[0] = (self.DIFF_DELETE, diffs[0][1])
diffs[2] = (self.DIFF_DELETE, diffs[2][1])
return diffs
if len(shorttext) == 1:
# Single character string.
# After the previous speedup, the character can't be an equality.
return [(self.DIFF_DELETE, text1), (self.DIFF_INSERT, text2)]
# Check to see if the problem can be split in two.
hm = self.diff_halfMatch(text1, text2)
if hm:
# A half-match was found, sort out the return data.
(text1_a, text1_b, text2_a, text2_b, mid_common) = hm
# Send both pairs off for separate processing.
diffs_a = self.diff_main(text1_a, text2_a, checklines, deadline)
diffs_b = self.diff_main(text1_b, text2_b, checklines, deadline)
# Merge the results.
return diffs_a + [(self.DIFF_EQUAL, mid_common)] + diffs_b
if checklines and len(text1) > 100 and len(text2) > 100:
return self.diff_lineMode(text1, text2, deadline)
return self.diff_bisect(text1, text2, deadline)
def diff_lineMode(self, text1, text2, deadline):
"""Do a quick line-level diff on both strings, then rediff the parts for
greater accuracy.
This speedup can produce non-minimal diffs.
Args:
text1: Old string to be diffed.
text2: New string to be diffed.
deadline: Time when the diff should be complete by.
Returns:
Array of changes.
"""
# Scan the text on a line-by-line basis first.
(text1, text2, linearray) = self.diff_linesToChars(text1, text2)
diffs = self.diff_main(text1, text2, False, deadline)
# Convert the diff back to original text.
self.diff_charsToLines(diffs, linearray)
# Eliminate freak matches (e.g. blank lines)
self.diff_cleanupSemantic(diffs)
# Rediff any replacement blocks, this time character-by-character.
# Add a dummy entry at the end.
diffs.append((self.DIFF_EQUAL, ''))
pointer = 0
count_delete = 0
count_insert = 0
text_delete = ''
text_insert = ''
while pointer < len(diffs):
if diffs[pointer][0] == self.DIFF_INSERT:
count_insert += 1
text_insert += diffs[pointer][1]
elif diffs[pointer][0] == self.DIFF_DELETE:
count_delete += 1
text_delete += diffs[pointer][1]
elif diffs[pointer][0] == self.DIFF_EQUAL:
# Upon reaching an equality, check for prior redundancies.
if count_delete >= 1 and count_insert >= 1:
# Delete the offending records and add the merged ones.
a = self.diff_main(text_delete, text_insert, False, deadline)
diffs[pointer - count_delete - count_insert : pointer] = a
pointer = pointer - count_delete - count_insert + len(a)
count_insert = 0
count_delete = 0
text_delete = ''
text_insert = ''
pointer += 1
diffs.pop() # Remove the dummy entry at the end.
return diffs
def diff_bisect(self, text1, text2, deadline):
"""Find the 'middle snake' of a diff, split the problem in two
and return the recursively constructed diff.
See Myers 1986 paper: An O(ND) Difference Algorithm and Its Variations.
Args:
text1: Old string to be diffed.
text2: New string to be diffed.
deadline: Time at which to bail if not yet complete.
Returns:
Array of diff tuples.
"""
# Cache the text lengths to prevent multiple calls.
text1_length = len(text1)
text2_length = len(text2)
max_d = (text1_length + text2_length + 1) // 2
v_offset = max_d
v_length = 2 * max_d
v1 = [-1] * v_length
v1[v_offset + 1] = 0
v2 = v1[:]
delta = text1_length - text2_length
# If the total number of characters is odd, then the front path will
# collide with the reverse path.
front = (delta % 2 != 0)
# Offsets for start and end of k loop.
# Prevents mapping of space beyond the grid.
k1start = 0
k1end = 0
k2start = 0
k2end = 0
for d in xrange(max_d):
# Bail out if deadline is reached.
if time.time() > deadline:
break
# Walk the front path one step.
for k1 in xrange(-d + k1start, d + 1 - k1end, 2):
k1_offset = v_offset + k1
if k1 == -d or (k1 != d and
v1[k1_offset - 1] < v1[k1_offset + 1]):
x1 = v1[k1_offset + 1]
else:
x1 = v1[k1_offset - 1] + 1
y1 = x1 - k1
while (x1 < text1_length and y1 < text2_length and
text1[x1] == text2[y1]):
x1 += 1
y1 += 1
v1[k1_offset] = x1
if x1 > text1_length:
# Ran off the right of the graph.
k1end += 2
elif y1 > text2_length:
# Ran off the bottom of the graph.
k1start += 2
elif front:
k2_offset = v_offset + delta - k1
if k2_offset >= 0 and k2_offset < v_length and v2[k2_offset] != -1:
# Mirror x2 onto top-left coordinate system.
x2 = text1_length - v2[k2_offset]
if x1 >= x2:
# Overlap detected.
return self.diff_bisectSplit(text1, text2, x1, y1, deadline)
# Walk the reverse path one step.
for k2 in xrange(-d + k2start, d + 1 - k2end, 2):
k2_offset = v_offset + k2
if k2 == -d or (k2 != d and
v2[k2_offset - 1] < v2[k2_offset + 1]):
x2 = v2[k2_offset + 1]
else:
x2 = v2[k2_offset - 1] + 1
y2 = x2 - k2
while (x2 < text1_length and y2 < text2_length and
text1[-x2 - 1] == text2[-y2 - 1]):
x2 += 1
y2 += 1
v2[k2_offset] = x2
if x2 > text1_length:
# Ran off the left of the graph.
k2end += 2
elif y2 > text2_length:
# Ran off the top of the graph.
k2start += 2
elif not front:
k1_offset = v_offset + delta - k2
if k1_offset >= 0 and k1_offset < v_length and v1[k1_offset] != -1:
x1 = v1[k1_offset]
y1 = v_offset + x1 - k1_offset
# Mirror x2 onto top-left coordinate system.
x2 = text1_length - x2
if x1 >= x2:
# Overlap detected.
return self.diff_bisectSplit(text1, text2, x1, y1, deadline)
# Diff took too long and hit the deadline or
# number of diffs equals number of characters, no commonality at all.
return [(self.DIFF_DELETE, text1), (self.DIFF_INSERT, text2)]
def diff_bisectSplit(self, text1, text2, x, y, deadline):
"""Given the location of the 'middle snake', split the diff in two parts
and recurse.
Args:
text1: Old string to be diffed.
text2: New string to be diffed.
x: Index of split point in text1.
y: Index of split point in text2.
deadline: Time at which to bail if not yet complete.
Returns:
Array of diff tuples.
"""
text1a = text1[:x]
text2a = text2[:y]
text1b = text1[x:]
text2b = text2[y:]
# Compute both diffs serially.
diffs = self.diff_main(text1a, text2a, False, deadline)
diffsb = self.diff_main(text1b, text2b, False, deadline)
return diffs + diffsb
def diff_linesToChars(self, text1, text2):
"""Split two texts into an array of strings. Reduce the texts to a string
of hashes where each Unicode character represents one line.
Args:
text1: First string.
text2: Second string.
Returns:
Three element tuple, containing the encoded text1, the encoded text2 and
the array of unique strings. The zeroth element of the array of unique
strings is intentionally blank.
"""
lineArray = [] # e.g. lineArray[4] == "Hello\n"
lineHash = {} # e.g. lineHash["Hello\n"] == 4
# "\x00" is a valid character, but various debuggers don't like it.
# So we'll insert a junk entry to avoid generating a null character.
lineArray.append('')
def diff_linesToCharsMunge(text):
"""Split a text into an array of strings. Reduce the texts to a string
of hashes where each Unicode character represents one line.
Modifies linearray and linehash through being a closure.
Args:
text: String to encode.
Returns:
Encoded string.
"""
chars = []
# Walk the text, pulling out a substring for each line.
# text.split('\n') would would temporarily double our memory footprint.
# Modifying text would create many large strings to garbage collect.
lineStart = 0
lineEnd = -1
while lineEnd < len(text) - 1:
lineEnd = text.find('\n', lineStart)
if lineEnd == -1:
lineEnd = len(text) - 1
line = text[lineStart:lineEnd + 1]
lineStart = lineEnd + 1
if line in lineHash:
chars.append(unichr(lineHash[line]))
else:
lineArray.append(line)
lineHash[line] = len(lineArray) - 1
chars.append(unichr(len(lineArray) - 1))
return "".join(chars)
chars1 = diff_linesToCharsMunge(text1)
chars2 = diff_linesToCharsMunge(text2)
return (chars1, chars2, lineArray)
def diff_charsToLines(self, diffs, lineArray):
"""Rehydrate the text in a diff from a string of line hashes to real lines
of text.
Args:
diffs: Array of diff tuples.
lineArray: Array of unique strings.
"""
for x in xrange(len(diffs)):
text = []
for char in diffs[x][1]:
text.append(lineArray[ord(char)])
diffs[x] = (diffs[x][0], "".join(text))
def diff_commonPrefix(self, text1, text2):
"""Determine the common prefix of two strings.
Args:
text1: First string.
text2: Second string.
Returns:
The number of characters common to the start of each string.
"""
# Quick check for common null cases.
if not text1 or not text2 or text1[0] != text2[0]:
return 0
# Binary search.
# Performance analysis: http://neil.fraser.name/news/2007/10/09/
pointermin = 0
pointermax = min(len(text1), len(text2))
pointermid = pointermax
pointerstart = 0
while pointermin < pointermid:
if text1[pointerstart:pointermid] == text2[pointerstart:pointermid]:
pointermin = pointermid
pointerstart = pointermin
else:
pointermax = pointermid
pointermid = (pointermax - pointermin) // 2 + pointermin
return pointermid
def diff_commonSuffix(self, text1, text2):
"""Determine the common suffix of two strings.
Args:
text1: First string.
text2: Second string.
Returns:
The number of characters common to the end of each string.
"""
# Quick check for common null cases.
if not text1 or not text2 or text1[-1] != text2[-1]:
return 0
# Binary search.
# Performance analysis: http://neil.fraser.name/news/2007/10/09/
pointermin = 0
pointermax = min(len(text1), len(text2))
pointermid = pointermax
pointerend = 0
while pointermin < pointermid:
if (text1[-pointermid:len(text1) - pointerend] ==
text2[-pointermid:len(text2) - pointerend]):
pointermin = pointermid
pointerend = pointermin
else:
pointermax = pointermid
pointermid = (pointermax - pointermin) // 2 + pointermin
return pointermid
def diff_commonOverlap(self, text1, text2):
"""Determine if the suffix of one string is the prefix of another.
Args:
text1 First string.
text2 Second string.
Returns:
The number of characters common to the end of the first
string and the start of the second string.
"""
# Cache the text lengths to prevent multiple calls.
text1_length = len(text1)
text2_length = len(text2)
# Eliminate the null case.
if text1_length == 0 or text2_length == 0:
return 0
# Truncate the longer string.
if text1_length > text2_length:
text1 = text1[-text2_length:]
elif text1_length < text2_length:
text2 = text2[:text1_length]
text_length = min(text1_length, text2_length)
# Quick check for the worst case.
if text1 == text2:
return text_length
# Start by looking for a single character match
# and increase length until no match is found.
# Performance analysis: http://neil.fraser.name/news/2010/11/04/
best = 0
length = 1
while True:
pattern = text1[-length:]
found = text2.find(pattern)
if found == -1:
return best
length += found
if found == 0 or text1[-length:] == text2[:length]:
best = length
length += 1
def diff_halfMatch(self, text1, text2):
"""Do the two texts share a substring which is at least half the length of
the longer text?
This speedup can produce non-minimal diffs.
Args:
text1: First string.
text2: Second string.
Returns:
Five element Array, containing the prefix of text1, the suffix of text1,
the prefix of text2, the suffix of text2 and the common middle. Or None
if there was no match.
"""
if self.Diff_Timeout <= 0:
# Don't risk returning a non-optimal diff if we have unlimited time.
return None
if len(text1) > len(text2):
(longtext, shorttext) = (text1, text2)
else:
(shorttext, longtext) = (text1, text2)
if len(longtext) < 4 or len(shorttext) * 2 < len(longtext):
return None # Pointless.
def diff_halfMatchI(longtext, shorttext, i):
"""Does a substring of shorttext exist within longtext such that the
substring is at least half the length of longtext?
Closure, but does not reference any external variables.
Args:
longtext: Longer string.
shorttext: Shorter string.
i: Start index of quarter length substring within longtext.
Returns:
Five element Array, containing the prefix of longtext, the suffix of
longtext, the prefix of shorttext, the suffix of shorttext and the
common middle. Or None if there was no match.
"""
seed = longtext[i:i + len(longtext) // 4]
best_common = ''
j = shorttext.find(seed)
while j != -1:
prefixLength = self.diff_commonPrefix(longtext[i:], shorttext[j:])
suffixLength = self.diff_commonSuffix(longtext[:i], shorttext[:j])
if len(best_common) < suffixLength + prefixLength:
best_common = (shorttext[j - suffixLength:j] +
shorttext[j:j + prefixLength])
best_longtext_a = longtext[:i - suffixLength]
best_longtext_b = longtext[i + prefixLength:]
best_shorttext_a = shorttext[:j - suffixLength]
best_shorttext_b = shorttext[j + prefixLength:]
j = shorttext.find(seed, j + 1)
if len(best_common) * 2 >= len(longtext):
return (best_longtext_a, best_longtext_b,
best_shorttext_a, best_shorttext_b, best_common)
else:
return None
# First check if the second quarter is the seed for a half-match.
hm1 = diff_halfMatchI(longtext, shorttext, (len(longtext) + 3) // 4)
# Check again based on the third quarter.
hm2 = diff_halfMatchI(longtext, shorttext, (len(longtext) + 1) // 2)
if not hm1 and not hm2:
return None
elif not hm2:
hm = hm1
elif not hm1:
hm = hm2
else:
# Both matched. Select the longest.
if len(hm1[4]) > len(hm2[4]):
hm = hm1
else:
hm = hm2
# A half-match was found, sort out the return data.
if len(text1) > len(text2):
(text1_a, text1_b, text2_a, text2_b, mid_common) = hm
else:
(text2_a, text2_b, text1_a, text1_b, mid_common) = hm
return (text1_a, text1_b, text2_a, text2_b, mid_common)
def diff_cleanupSemantic(self, diffs):
"""Reduce the number of edits by eliminating semantically trivial
equalities.
Args:
diffs: Array of diff tuples.
"""
changes = False
equalities = [] # Stack of indices where equalities are found.
lastequality = None # Always equal to diffs[equalities[-1]][1]
pointer = 0 # Index of current position.
# Number of chars that changed prior to the equality.
length_insertions1, length_deletions1 = 0, 0
# Number of chars that changed after the equality.
length_insertions2, length_deletions2 = 0, 0
while pointer < len(diffs):
if diffs[pointer][0] == self.DIFF_EQUAL: # Equality found.
equalities.append(pointer)
length_insertions1, length_insertions2 = length_insertions2, 0
length_deletions1, length_deletions2 = length_deletions2, 0
lastequality = diffs[pointer][1]
else: # An insertion or deletion.
if diffs[pointer][0] == self.DIFF_INSERT:
length_insertions2 += len(diffs[pointer][1])
else:
length_deletions2 += len(diffs[pointer][1])
# Eliminate an equality that is smaller or equal to the edits on both
# sides of it.
if (lastequality and (len(lastequality) <=
max(length_insertions1, length_deletions1)) and
(len(lastequality) <= max(length_insertions2, length_deletions2))):
# Duplicate record.
diffs.insert(equalities[-1], (self.DIFF_DELETE, lastequality))
# Change second copy to insert.
diffs[equalities[-1] + 1] = (self.DIFF_INSERT,
diffs[equalities[-1] + 1][1])
# Throw away the equality we just deleted.
equalities.pop()
# Throw away the previous equality (it needs to be reevaluated).
if len(equalities):
equalities.pop()
if len(equalities):
pointer = equalities[-1]
else:
pointer = -1
# Reset the counters.
length_insertions1, length_deletions1 = 0, 0
length_insertions2, length_deletions2 = 0, 0
lastequality = None
changes = True
pointer += 1
# Normalize the diff.
if changes:
self.diff_cleanupMerge(diffs)
self.diff_cleanupSemanticLossless(diffs)
# Find any overlaps between deletions and insertions.
# e.g: <del>abcxxx</del><ins>xxxdef</ins>
# -> <del>abc</del>xxx<ins>def</ins>
# e.g: <del>xxxabc</del><ins>defxxx</ins>
# -> <ins>def</ins>xxx<del>abc</del>
# Only extract an overlap if it is as big as the edit ahead or behind it.
pointer = 1
while pointer < len(diffs):
if (diffs[pointer - 1][0] == self.DIFF_DELETE and
diffs[pointer][0] == self.DIFF_INSERT):
deletion = diffs[pointer - 1][1]
insertion = diffs[pointer][1]
overlap_length1 = self.diff_commonOverlap(deletion, insertion)
overlap_length2 = self.diff_commonOverlap(insertion, deletion)
if overlap_length1 >= overlap_length2:
if (overlap_length1 >= len(deletion) / 2.0 or
overlap_length1 >= len(insertion) / 2.0):
# Overlap found. Insert an equality and trim the surrounding edits.
diffs.insert(pointer, (self.DIFF_EQUAL,
insertion[:overlap_length1]))
diffs[pointer - 1] = (self.DIFF_DELETE,
deletion[:len(deletion) - overlap_length1])
diffs[pointer + 1] = (self.DIFF_INSERT,
insertion[overlap_length1:])
pointer += 1
else:
if (overlap_length2 >= len(deletion) / 2.0 or
overlap_length2 >= len(insertion) / 2.0):
# Reverse overlap found.
# Insert an equality and swap and trim the surrounding edits.
diffs.insert(pointer, (self.DIFF_EQUAL, deletion[:overlap_length2]))
diffs[pointer - 1] = (self.DIFF_INSERT,
insertion[:len(insertion) - overlap_length2])
diffs[pointer + 1] = (self.DIFF_DELETE, deletion[overlap_length2:])
pointer += 1
pointer += 1
pointer += 1
def diff_cleanupSemanticLossless(self, diffs):
"""Look for single edits surrounded on both sides by equalities
which can be shifted sideways to align the edit to a word boundary.
e.g: The c<ins>at c</ins>ame. -> The <ins>cat </ins>came.
Args:
diffs: Array of diff tuples.
"""
def diff_cleanupSemanticScore(one, two):
"""Given two strings, compute a score representing whether the
internal boundary falls on logical boundaries.
Scores range from 6 (best) to 0 (worst).
Closure, but does not reference any external variables.
Args:
one: First string.
two: Second string.
Returns:
The score.
"""
if not one or not two:
# Edges are the best.
return 6
# Each port of this function behaves slightly differently due to
# subtle differences in each language's definition of things like
# 'whitespace'. Since this function's purpose is largely cosmetic,
# the choice has been made to use each language's native features
# rather than force total conformity.
char1 = one[-1]
char2 = two[0]
nonAlphaNumeric1 = not char1.isalnum()
nonAlphaNumeric2 = not char2.isalnum()
whitespace1 = nonAlphaNumeric1 and char1.isspace()
whitespace2 = nonAlphaNumeric2 and char2.isspace()
lineBreak1 = whitespace1 and (char1 == "\r" or char1 == "\n")
lineBreak2 = whitespace2 and (char2 == "\r" or char2 == "\n")
blankLine1 = lineBreak1 and self.BLANKLINEEND.search(one)
blankLine2 = lineBreak2 and self.BLANKLINESTART.match(two)
if blankLine1 or blankLine2:
# Five points for blank lines.
return 5
elif lineBreak1 or lineBreak2:
# Four points for line breaks.
return 4
elif nonAlphaNumeric1 and not whitespace1 and whitespace2:
# Three points for end of sentences.
return 3
elif whitespace1 or whitespace2:
# Two points for whitespace.
return 2
elif nonAlphaNumeric1 or nonAlphaNumeric2:
# One point for non-alphanumeric.
return 1
return 0
pointer = 1
# Intentionally ignore the first and last element (don't need checking).
while pointer < len(diffs) - 1:
if (diffs[pointer - 1][0] == self.DIFF_EQUAL and
diffs[pointer + 1][0] == self.DIFF_EQUAL):
# This is a single edit surrounded by equalities.
equality1 = diffs[pointer - 1][1]
edit = diffs[pointer][1]
equality2 = diffs[pointer + 1][1]
# First, shift the edit as far left as possible.
commonOffset = self.diff_commonSuffix(equality1, edit)
if commonOffset:
commonString = edit[-commonOffset:]
equality1 = equality1[:-commonOffset]
edit = commonString + edit[:-commonOffset]
equality2 = commonString + equality2
# Second, step character by character right, looking for the best fit.
bestEquality1 = equality1
bestEdit = edit
bestEquality2 = equality2
bestScore = (diff_cleanupSemanticScore(equality1, edit) +
diff_cleanupSemanticScore(edit, equality2))
while edit and equality2 and edit[0] == equality2[0]:
equality1 += edit[0]
edit = edit[1:] + equality2[0]
equality2 = equality2[1:]
score = (diff_cleanupSemanticScore(equality1, edit) +
diff_cleanupSemanticScore(edit, equality2))
# The >= encourages trailing rather than leading whitespace on edits.
if score >= bestScore:
bestScore = score
bestEquality1 = equality1
bestEdit = edit
bestEquality2 = equality2
if diffs[pointer - 1][1] != bestEquality1:
# We have an improvement, save it back to the diff.
if bestEquality1:
diffs[pointer - 1] = (diffs[pointer - 1][0], bestEquality1)
else:
del diffs[pointer - 1]
pointer -= 1
diffs[pointer] = (diffs[pointer][0], bestEdit)
if bestEquality2:
diffs[pointer + 1] = (diffs[pointer + 1][0], bestEquality2)
else:
del diffs[pointer + 1]
pointer -= 1
pointer += 1
# Define some regex patterns for matching boundaries.
BLANKLINEEND = re.compile(r"\n\r?\n$");
BLANKLINESTART = re.compile(r"^\r?\n\r?\n");
def diff_cleanupEfficiency(self, diffs):
"""Reduce the number of edits by eliminating operationally trivial
equalities.
Args:
diffs: Array of diff tuples.
"""
changes = False
equalities = [] # Stack of indices where equalities are found.
lastequality = None # Always equal to diffs[equalities[-1]][1]
pointer = 0 # Index of current position.
pre_ins = False # Is there an insertion operation before the last equality.
pre_del = False # Is there a deletion operation before the last equality.
post_ins = False # Is there an insertion operation after the last equality.
post_del = False # Is there a deletion operation after the last equality.
while pointer < len(diffs):
if diffs[pointer][0] == self.DIFF_EQUAL: # Equality found.
if (len(diffs[pointer][1]) < self.Diff_EditCost and
(post_ins or post_del)):
# Candidate found.
equalities.append(pointer)
pre_ins = post_ins
pre_del = post_del
lastequality = diffs[pointer][1]
else:
# Not a candidate, and can never become one.
equalities = []
lastequality = None
post_ins = post_del = False
else: # An insertion or deletion.
if diffs[pointer][0] == self.DIFF_DELETE:
post_del = True
else:
post_ins = True
# Five types to be split:
# <ins>A</ins><del>B</del>XY<ins>C</ins><del>D</del>
# <ins>A</ins>X<ins>C</ins><del>D</del>
# <ins>A</ins><del>B</del>X<ins>C</ins>
# <ins>A</del>X<ins>C</ins><del>D</del>
# <ins>A</ins><del>B</del>X<del>C</del>
if lastequality and ((pre_ins and pre_del and post_ins and post_del) or
((len(lastequality) < self.Diff_EditCost / 2) and
(pre_ins + pre_del + post_ins + post_del) == 3)):
# Duplicate record.
diffs.insert(equalities[-1], (self.DIFF_DELETE, lastequality))
# Change second copy to insert.
diffs[equalities[-1] + 1] = (self.DIFF_INSERT,
diffs[equalities[-1] + 1][1])
equalities.pop() # Throw away the equality we just deleted.
lastequality = None
if pre_ins and pre_del:
# No changes made which could affect previous entry, keep going.
post_ins = post_del = True
equalities = []
else:
if len(equalities):
equalities.pop() # Throw away the previous equality.
if len(equalities):
pointer = equalities[-1]
else:
pointer = -1
post_ins = post_del = False
changes = True
pointer += 1
if changes:
self.diff_cleanupMerge(diffs)
def diff_cleanupMerge(self, diffs):
"""Reorder and merge like edit sections. Merge equalities.
Any edit section can move as long as it doesn't cross an equality.
Args:
diffs: Array of diff tuples.
"""
diffs.append((self.DIFF_EQUAL, '')) # Add a dummy entry at the end.
pointer = 0
count_delete = 0
count_insert = 0
text_delete = ''
text_insert = ''
while pointer < len(diffs):
if diffs[pointer][0] == self.DIFF_INSERT:
count_insert += 1
text_insert += diffs[pointer][1]
pointer += 1
elif diffs[pointer][0] == self.DIFF_DELETE:
count_delete += 1
text_delete += diffs[pointer][1]
pointer += 1
elif diffs[pointer][0] == self.DIFF_EQUAL:
# Upon reaching an equality, check for prior redundancies.
if count_delete + count_insert > 1:
if count_delete != 0 and count_insert != 0:
# Factor out any common prefixies.
commonlength = self.diff_commonPrefix(text_insert, text_delete)
if commonlength != 0:
x = pointer - count_delete - count_insert - 1
if x >= 0 and diffs[x][0] == self.DIFF_EQUAL:
diffs[x] = (diffs[x][0], diffs[x][1] +
text_insert[:commonlength])
else:
diffs.insert(0, (self.DIFF_EQUAL, text_insert[:commonlength]))
pointer += 1
text_insert = text_insert[commonlength:]
text_delete = text_delete[commonlength:]
# Factor out any common suffixies.
commonlength = self.diff_commonSuffix(text_insert, text_delete)
if commonlength != 0:
diffs[pointer] = (diffs[pointer][0], text_insert[-commonlength:] +
diffs[pointer][1])
text_insert = text_insert[:-commonlength]
text_delete = text_delete[:-commonlength]
# Delete the offending records and add the merged ones.
if count_delete == 0:
diffs[pointer - count_insert : pointer] = [
(self.DIFF_INSERT, text_insert)]
elif count_insert == 0:
diffs[pointer - count_delete : pointer] = [
(self.DIFF_DELETE, text_delete)]
else:
diffs[pointer - count_delete - count_insert : pointer] = [
(self.DIFF_DELETE, text_delete),
(self.DIFF_INSERT, text_insert)]
pointer = pointer - count_delete - count_insert + 1
if count_delete != 0:
pointer += 1
if count_insert != 0:
pointer += 1
elif pointer != 0 and diffs[pointer - 1][0] == self.DIFF_EQUAL:
# Merge this equality with the previous one.
diffs[pointer - 1] = (diffs[pointer - 1][0],
diffs[pointer - 1][1] + diffs[pointer][1])
del diffs[pointer]
else:
pointer += 1
count_insert = 0
count_delete = 0
text_delete = ''
text_insert = ''
if diffs[-1][1] == '':
diffs.pop() # Remove the dummy entry at the end.
# Second pass: look for single edits surrounded on both sides by equalities
# which can be shifted sideways to eliminate an equality.
# e.g: A<ins>BA</ins>C -> <ins>AB</ins>AC
changes = False
pointer = 1
# Intentionally ignore the first and last element (don't need checking).
while pointer < len(diffs) - 1:
if (diffs[pointer - 1][0] == self.DIFF_EQUAL and
diffs[pointer + 1][0] == self.DIFF_EQUAL):
# This is a single edit surrounded by equalities.
if diffs[pointer][1].endswith(diffs[pointer - 1][1]):
# Shift the edit over the previous equality.
diffs[pointer] = (diffs[pointer][0],
diffs[pointer - 1][1] +
diffs[pointer][1][:-len(diffs[pointer - 1][1])])
diffs[pointer + 1] = (diffs[pointer + 1][0],
diffs[pointer - 1][1] + diffs[pointer + 1][1])
del diffs[pointer - 1]
changes = True
elif diffs[pointer][1].startswith(diffs[pointer + 1][1]):
# Shift the edit over the next equality.
diffs[pointer - 1] = (diffs[pointer - 1][0],
diffs[pointer - 1][1] + diffs[pointer + 1][1])
diffs[pointer] = (diffs[pointer][0],
diffs[pointer][1][len(diffs[pointer + 1][1]):] +
diffs[pointer + 1][1])
del diffs[pointer + 1]
changes = True
pointer += 1
# If shifts were made, the diff needs reordering and another shift sweep.
if changes:
self.diff_cleanupMerge(diffs)
def diff_xIndex(self, diffs, loc):
"""loc is a location in text1, compute and return the equivalent location
in text2. e.g. "The cat" vs "The big cat", 1->1, 5->8
Args:
diffs: Array of diff tuples.
loc: Location within text1.
Returns:
Location within text2.
"""
chars1 = 0
chars2 = 0
last_chars1 = 0
last_chars2 = 0
for x in xrange(len(diffs)):
(op, text) = diffs[x]
if op != self.DIFF_INSERT: # Equality or deletion.
chars1 += len(text)
if op != self.DIFF_DELETE: # Equality or insertion.
chars2 += len(text)
if chars1 > loc: # Overshot the location.
break
last_chars1 = chars1
last_chars2 = chars2
if len(diffs) != x and diffs[x][0] == self.DIFF_DELETE:
# The location was deleted.
return last_chars2
# Add the remaining len(character).
return last_chars2 + (loc - last_chars1)
def diff_prettyHtml(self, diffs):
"""Convert a diff array into a pretty HTML report.
Args:
diffs: Array of diff tuples.
Returns:
HTML representation.
"""
html = []
for (op, data) in diffs:
text = (data.replace("&", "&").replace("<", "<")
.replace(">", ">").replace("\n", "¶<br>"))
if op == self.DIFF_INSERT:
html.append("<ins style=\"background:#e6ffe6;\">%s</ins>" % text)
elif op == self.DIFF_DELETE:
html.append("<del style=\"background:#ffe6e6;\">%s</del>" % text)
elif op == self.DIFF_EQUAL:
html.append("<span>%s</span>" % text)
return "".join(html)
def diff_text1(self, diffs):
"""Compute and return the source text (all equalities and deletions).
Args:
diffs: Array of diff tuples.
Returns:
Source text.
"""
text = []
for (op, data) in diffs:
if op != self.DIFF_INSERT:
text.append(data)
return "".join(text)
def diff_text2(self, diffs):
"""Compute and return the destination text (all equalities and insertions).
Args:
diffs: Array of diff tuples.
Returns:
Destination text.
"""
text = []
for (op, data) in diffs:
if op != self.DIFF_DELETE:
text.append(data)
return "".join(text)
def diff_levenshtein(self, diffs):
"""Compute the Levenshtein distance; the number of inserted, deleted or
substituted characters.
Args:
diffs: Array of diff tuples.
Returns:
Number of changes.
"""
levenshtein = 0
insertions = 0
deletions = 0
for (op, data) in diffs:
if op == self.DIFF_INSERT:
insertions += len(data)
elif op == self.DIFF_DELETE:
deletions += len(data)
elif op == self.DIFF_EQUAL:
# A deletion and an insertion is one substitution.
levenshtein += max(insertions, deletions)
insertions = 0
deletions = 0
levenshtein += max(insertions, deletions)
return levenshtein
def diff_toDelta(self, diffs):
"""Crush the diff into an encoded string which describes the operations
required to transform text1 into text2.
E.g. =3\t-2\t+ing -> Keep 3 chars, delete 2 chars, insert 'ing'.
Operations are tab-separated. Inserted text is escaped using %xx notation.
Args:
diffs: Array of diff tuples.
Returns:
Delta text.
"""
text = []
for (op, data) in diffs:
if op == self.DIFF_INSERT:
# High ascii will raise UnicodeDecodeError. Use Unicode instead.
data = data.encode("utf-8")
text.append("+" + urllib.quote(data, "!~*'();/?:@&=+$,# "))
elif op == self.DIFF_DELETE:
text.append("-%d" % len(data))
elif op == self.DIFF_EQUAL:
text.append("=%d" % len(data))
return "\t".join(text)
def diff_fromDelta(self, text1, delta):
"""Given the original text1, and an encoded string which describes the
operations required to transform text1 into text2, compute the full diff.
Args:
text1: Source string for the diff.
delta: Delta text.
Returns:
Array of diff tuples.
Raises:
ValueError: If invalid input.
"""
if type(delta) == unicode:
# Deltas should be composed of a subset of ascii chars, Unicode not
# required. If this encode raises UnicodeEncodeError, delta is invalid.
delta = delta.encode("ascii")
diffs = []
pointer = 0 # Cursor in text1
tokens = delta.split("\t")
for token in tokens:
if token == "":
# Blank tokens are ok (from a trailing \t).
continue
# Each token begins with a one character parameter which specifies the
# operation of this token (delete, insert, equality).
param = token[1:]
if token[0] == "+":
param = urllib.unquote(param).decode("utf-8")
diffs.append((self.DIFF_INSERT, param))
elif token[0] == "-" or token[0] == "=":
try:
n = int(param)
except ValueError:
raise ValueError("Invalid number in diff_fromDelta: " + param)
if n < 0:
raise ValueError("Negative number in diff_fromDelta: " + param)
text = text1[pointer : pointer + n]
pointer += n
if token[0] == "=":
diffs.append((self.DIFF_EQUAL, text))
else:
diffs.append((self.DIFF_DELETE, text))
else:
# Anything else is an error.
raise ValueError("Invalid diff operation in diff_fromDelta: " +
token[0])
if pointer != len(text1):
raise ValueError(
"Delta length (%d) does not equal source text length (%d)." %
(pointer, len(text1)))
return diffs
# MATCH FUNCTIONS
def match_main(self, text, pattern, loc):
"""Locate the best instance of 'pattern' in 'text' near 'loc'.
Args:
text: The text to search.
pattern: The pattern to search for.
loc: The location to search around.
Returns:
Best match index or -1.
"""
# Check for null inputs.
if text == None or pattern == None:
raise ValueError("Null inputs. (match_main)")
loc = max(0, min(loc, len(text)))
if text == pattern:
# Shortcut (potentially not guaranteed by the algorithm)
return 0
elif not text:
# Nothing to match.
return -1
elif text[loc:loc + len(pattern)] == pattern:
# Perfect match at the perfect spot! (Includes case of null pattern)
return loc
else:
# Do a fuzzy compare.
match = self.match_bitap(text, pattern, loc)
return match
def match_bitap(self, text, pattern, loc):
"""Locate the best instance of 'pattern' in 'text' near 'loc' using the
Bitap algorithm.
Args:
text: The text to search.
pattern: The pattern to search for.
loc: The location to search around.
Returns:
Best match index or -1.
"""
# Python doesn't have a maxint limit, so ignore this check.
#if self.Match_MaxBits != 0 and len(pattern) > self.Match_MaxBits:
# raise ValueError("Pattern too long for this application.")
# Initialise the alphabet.
s = self.match_alphabet(pattern)
def match_bitapScore(e, x):
"""Compute and return the score for a match with e errors and x location.
Accesses loc and pattern through being a closure.
Args:
e: Number of errors in match.
x: Location of match.
Returns:
Overall score for match (0.0 = good, 1.0 = bad).
"""
accuracy = float(e) / len(pattern)
proximity = abs(loc - x)
if not self.Match_Distance:
# Dodge divide by zero error.
return proximity and 1.0 or accuracy
return accuracy + (proximity / float(self.Match_Distance))
# Highest score beyond which we give up.
score_threshold = self.Match_Threshold
# Is there a nearby exact match? (speedup)
best_loc = text.find(pattern, loc)
if best_loc != -1:
score_threshold = min(match_bitapScore(0, best_loc), score_threshold)
# What about in the other direction? (speedup)
best_loc = text.rfind(pattern, loc + len(pattern))
if best_loc != -1:
score_threshold = min(match_bitapScore(0, best_loc), score_threshold)
# Initialise the bit arrays.
matchmask = 1 << (len(pattern) - 1)
best_loc = -1
bin_max = len(pattern) + len(text)
# Empty initialization added to appease pychecker.
last_rd = None
for d in xrange(len(pattern)):
# Scan for the best match each iteration allows for one more error.
# Run a binary search to determine how far from 'loc' we can stray at
# this error level.
bin_min = 0
bin_mid = bin_max
while bin_min < bin_mid:
if match_bitapScore(d, loc + bin_mid) <= score_threshold:
bin_min = bin_mid
else:
bin_max = bin_mid
bin_mid = (bin_max - bin_min) // 2 + bin_min
# Use the result from this iteration as the maximum for the next.
bin_max = bin_mid
start = max(1, loc - bin_mid + 1)
finish = min(loc + bin_mid, len(text)) + len(pattern)
rd = [0] * (finish + 2)
rd[finish + 1] = (1 << d) - 1
for j in xrange(finish, start - 1, -1):
if len(text) <= j - 1:
# Out of range.
charMatch = 0
else:
charMatch = s.get(text[j - 1], 0)
if d == 0: # First pass: exact match.
rd[j] = ((rd[j + 1] << 1) | 1) & charMatch
else: # Subsequent passes: fuzzy match.
rd[j] = (((rd[j + 1] << 1) | 1) & charMatch) | (
((last_rd[j + 1] | last_rd[j]) << 1) | 1) | last_rd[j + 1]
if rd[j] & matchmask:
score = match_bitapScore(d, j - 1)
# This match will almost certainly be better than any existing match.
# But check anyway.
if score <= score_threshold:
# Told you so.
score_threshold = score
best_loc = j - 1
if best_loc > loc:
# When passing loc, don't exceed our current distance from loc.
start = max(1, 2 * loc - best_loc)
else:
# Already passed loc, downhill from here on in.
break
# No hope for a (better) match at greater error levels.
if match_bitapScore(d + 1, loc) > score_threshold:
break
last_rd = rd
return best_loc
def match_alphabet(self, pattern):
"""Initialise the alphabet for the Bitap algorithm.
Args:
pattern: The text to encode.
Returns:
Hash of character locations.
"""
s = {}
for char in pattern:
s[char] = 0
for i in xrange(len(pattern)):
s[pattern[i]] |= 1 << (len(pattern) - i - 1)
return s
# PATCH FUNCTIONS
def patch_addContext(self, patch, text):
"""Increase the context until it is unique,
but don't let the pattern expand beyond Match_MaxBits.
Args:
patch: The patch to grow.
text: Source text.
"""
if len(text) == 0:
return
pattern = text[patch.start2 : patch.start2 + patch.length1]
padding = 0
# Look for the first and last matches of pattern in text. If two different
# matches are found, increase the pattern length.
while (text.find(pattern) != text.rfind(pattern) and (self.Match_MaxBits ==
0 or len(pattern) < self.Match_MaxBits - self.Patch_Margin -
self.Patch_Margin)):
padding += self.Patch_Margin
pattern = text[max(0, patch.start2 - padding) :
patch.start2 + patch.length1 + padding]
# Add one chunk for good luck.
padding += self.Patch_Margin
# Add the prefix.
prefix = text[max(0, patch.start2 - padding) : patch.start2]
if prefix:
patch.diffs[:0] = [(self.DIFF_EQUAL, prefix)]
# Add the suffix.
suffix = text[patch.start2 + patch.length1 :
patch.start2 + patch.length1 + padding]
if suffix:
patch.diffs.append((self.DIFF_EQUAL, suffix))
# Roll back the start points.
patch.start1 -= len(prefix)
patch.start2 -= len(prefix)
# Extend lengths.
patch.length1 += len(prefix) + len(suffix)
patch.length2 += len(prefix) + len(suffix)
def patch_make(self, a, b=None, c=None):
"""Compute a list of patches to turn text1 into text2.
Use diffs if provided, otherwise compute it ourselves.
There are four ways to call this function, depending on what data is
available to the caller:
Method 1:
a = text1, b = text2
Method 2:
a = diffs
Method 3 (optimal):
a = text1, b = diffs
Method 4 (deprecated, use method 3):
a = text1, b = text2, c = diffs
Args:
a: text1 (methods 1,3,4) or Array of diff tuples for text1 to
text2 (method 2).
b: text2 (methods 1,4) or Array of diff tuples for text1 to
text2 (method 3) or undefined (method 2).
c: Array of diff tuples for text1 to text2 (method 4) or
undefined (methods 1,2,3).
Returns:
Array of Patch objects.
"""
text1 = None
diffs = None
# Note that texts may arrive as 'str' or 'unicode'.
if isinstance(a, basestring) and isinstance(b, basestring) and c is None:
# Method 1: text1, text2
# Compute diffs from text1 and text2.
text1 = a
diffs = self.diff_main(text1, b, True)
if len(diffs) > 2:
self.diff_cleanupSemantic(diffs)
self.diff_cleanupEfficiency(diffs)
elif isinstance(a, list) and b is None and c is None:
# Method 2: diffs
# Compute text1 from diffs.
diffs = a
text1 = self.diff_text1(diffs)
elif isinstance(a, basestring) and isinstance(b, list) and c is None:
# Method 3: text1, diffs
text1 = a
diffs = b
elif (isinstance(a, basestring) and isinstance(b, basestring) and
isinstance(c, list)):
# Method 4: text1, text2, diffs
# text2 is not used.
text1 = a
diffs = c
else:
raise ValueError("Unknown call format to patch_make.")
if not diffs:
return [] # Get rid of the None case.
patches = []
patch = patch_obj()
char_count1 = 0 # Number of characters into the text1 string.
char_count2 = 0 # Number of characters into the text2 string.
prepatch_text = text1 # Recreate the patches to determine context info.
postpatch_text = text1
for x in xrange(len(diffs)):
(diff_type, diff_text) = diffs[x]
if len(patch.diffs) == 0 and diff_type != self.DIFF_EQUAL:
# A new patch starts here.
patch.start1 = char_count1
patch.start2 = char_count2
if diff_type == self.DIFF_INSERT:
# Insertion
patch.diffs.append(diffs[x])
patch.length2 += len(diff_text)
postpatch_text = (postpatch_text[:char_count2] + diff_text +
postpatch_text[char_count2:])
elif diff_type == self.DIFF_DELETE:
# Deletion.
patch.length1 += len(diff_text)
patch.diffs.append(diffs[x])
postpatch_text = (postpatch_text[:char_count2] +
postpatch_text[char_count2 + len(diff_text):])
elif (diff_type == self.DIFF_EQUAL and
len(diff_text) <= 2 * self.Patch_Margin and
len(patch.diffs) != 0 and len(diffs) != x + 1):
# Small equality inside a patch.
patch.diffs.append(diffs[x])
patch.length1 += len(diff_text)
patch.length2 += len(diff_text)
if (diff_type == self.DIFF_EQUAL and
len(diff_text) >= 2 * self.Patch_Margin):
# Time for a new patch.
if len(patch.diffs) != 0:
self.patch_addContext(patch, prepatch_text)
patches.append(patch)
patch = patch_obj()
# Unlike Unidiff, our patch lists have a rolling context.
# http://code.google.com/p/google-diff-match-patch/wiki/Unidiff
# Update prepatch text & pos to reflect the application of the
# just completed patch.
prepatch_text = postpatch_text
char_count1 = char_count2
# Update the current character count.
if diff_type != self.DIFF_INSERT:
char_count1 += len(diff_text)
if diff_type != self.DIFF_DELETE:
char_count2 += len(diff_text)
# Pick up the leftover patch if not empty.
if len(patch.diffs) != 0:
self.patch_addContext(patch, prepatch_text)
patches.append(patch)
return patches
def patch_deepCopy(self, patches):
"""Given an array of patches, return another array that is identical.
Args:
patches: Array of Patch objects.
Returns:
Array of Patch objects.
"""
patchesCopy = []
for patch in patches:
patchCopy = patch_obj()
# No need to deep copy the tuples since they are immutable.
patchCopy.diffs = patch.diffs[:]
patchCopy.start1 = patch.start1
patchCopy.start2 = patch.start2
patchCopy.length1 = patch.length1
patchCopy.length2 = patch.length2
patchesCopy.append(patchCopy)
return patchesCopy
def patch_apply(self, patches, text):
"""Merge a set of patches onto the text. Return a patched text, as well
as a list of true/false values indicating which patches were applied.
Args:
patches: Array of Patch objects.
text: Old text.
Returns:
Two element Array, containing the new text and an array of boolean values.
"""
if not patches:
return (text, [])
# Deep copy the patches so that no changes are made to originals.
patches = self.patch_deepCopy(patches)
nullPadding = self.patch_addPadding(patches)
text = nullPadding + text + nullPadding
self.patch_splitMax(patches)
# delta keeps track of the offset between the expected and actual location
# of the previous patch. If there are patches expected at positions 10 and
# 20, but the first patch was found at 12, delta is 2 and the second patch
# has an effective expected position of 22.
delta = 0
results = []
for patch in patches:
expected_loc = patch.start2 + delta
text1 = self.diff_text1(patch.diffs)
end_loc = -1
if len(text1) > self.Match_MaxBits:
# patch_splitMax will only provide an oversized pattern in the case of
# a monster delete.
start_loc = self.match_main(text, text1[:self.Match_MaxBits],
expected_loc)
if start_loc != -1:
end_loc = self.match_main(text, text1[-self.Match_MaxBits:],
expected_loc + len(text1) - self.Match_MaxBits)
if end_loc == -1 or start_loc >= end_loc:
# Can't find valid trailing context. Drop this patch.
start_loc = -1
else:
start_loc = self.match_main(text, text1, expected_loc)
if start_loc == -1:
# No match found. :(
results.append(False)
# Subtract the delta for this failed patch from subsequent patches.
delta -= patch.length2 - patch.length1
else:
# Found a match. :)
results.append(True)
delta = start_loc - expected_loc
if end_loc == -1:
text2 = text[start_loc : start_loc + len(text1)]
else:
text2 = text[start_loc : end_loc + self.Match_MaxBits]
if text1 == text2:
# Perfect match, just shove the replacement text in.
text = (text[:start_loc] + self.diff_text2(patch.diffs) +
text[start_loc + len(text1):])
else:
# Imperfect match.
# Run a diff to get a framework of equivalent indices.
diffs = self.diff_main(text1, text2, False)
if (len(text1) > self.Match_MaxBits and
self.diff_levenshtein(diffs) / float(len(text1)) >
self.Patch_DeleteThreshold):
# The end points match, but the content is unacceptably bad.
results[-1] = False
else:
self.diff_cleanupSemanticLossless(diffs)
index1 = 0
for (op, data) in patch.diffs:
if op != self.DIFF_EQUAL:
index2 = self.diff_xIndex(diffs, index1)
if op == self.DIFF_INSERT: # Insertion
text = text[:start_loc + index2] + data + text[start_loc +
index2:]
elif op == self.DIFF_DELETE: # Deletion
text = text[:start_loc + index2] + text[start_loc +
self.diff_xIndex(diffs, index1 + len(data)):]
if op != self.DIFF_DELETE:
index1 += len(data)
# Strip the padding off.
text = text[len(nullPadding):-len(nullPadding)]
return (text, results)
def patch_addPadding(self, patches):
"""Add some padding on text start and end so that edges can match
something. Intended to be called only from within patch_apply.
Args:
patches: Array of Patch objects.
Returns:
The padding string added to each side.
"""
paddingLength = self.Patch_Margin
nullPadding = ""
for x in xrange(1, paddingLength + 1):
nullPadding += chr(x)
# Bump all the patches forward.
for patch in patches:
patch.start1 += paddingLength
patch.start2 += paddingLength
# Add some padding on start of first diff.
patch = patches[0]
diffs = patch.diffs
if not diffs or diffs[0][0] != self.DIFF_EQUAL:
# Add nullPadding equality.
diffs.insert(0, (self.DIFF_EQUAL, nullPadding))
patch.start1 -= paddingLength # Should be 0.
patch.start2 -= paddingLength # Should be 0.
patch.length1 += paddingLength
patch.length2 += paddingLength
elif paddingLength > len(diffs[0][1]):
# Grow first equality.
extraLength = paddingLength - len(diffs[0][1])
newText = nullPadding[len(diffs[0][1]):] + diffs[0][1]
diffs[0] = (diffs[0][0], newText)
patch.start1 -= extraLength
patch.start2 -= extraLength
patch.length1 += extraLength
patch.length2 += extraLength
# Add some padding on end of last diff.
patch = patches[-1]
diffs = patch.diffs
if not diffs or diffs[-1][0] != self.DIFF_EQUAL:
# Add nullPadding equality.
diffs.append((self.DIFF_EQUAL, nullPadding))
patch.length1 += paddingLength
patch.length2 += paddingLength
elif paddingLength > len(diffs[-1][1]):
# Grow last equality.
extraLength = paddingLength - len(diffs[-1][1])
newText = diffs[-1][1] + nullPadding[:extraLength]
diffs[-1] = (diffs[-1][0], newText)
patch.length1 += extraLength
patch.length2 += extraLength
return nullPadding
def patch_splitMax(self, patches):
"""Look through the patches and break up any which are longer than the
maximum limit of the match algorithm.
Intended to be called only from within patch_apply.
Args:
patches: Array of Patch objects.
"""
patch_size = self.Match_MaxBits
if patch_size == 0:
# Python has the option of not splitting strings due to its ability
# to handle integers of arbitrary precision.
return
for x in xrange(len(patches)):
if patches[x].length1 <= patch_size:
continue
bigpatch = patches[x]
# Remove the big old patch.
del patches[x]
x -= 1
start1 = bigpatch.start1
start2 = bigpatch.start2
precontext = ''
while len(bigpatch.diffs) != 0:
# Create one of several smaller patches.
patch = patch_obj()
empty = True
patch.start1 = start1 - len(precontext)
patch.start2 = start2 - len(precontext)
if precontext:
patch.length1 = patch.length2 = len(precontext)
patch.diffs.append((self.DIFF_EQUAL, precontext))
while (len(bigpatch.diffs) != 0 and
patch.length1 < patch_size - self.Patch_Margin):
(diff_type, diff_text) = bigpatch.diffs[0]
if diff_type == self.DIFF_INSERT:
# Insertions are harmless.
patch.length2 += len(diff_text)
start2 += len(diff_text)
patch.diffs.append(bigpatch.diffs.pop(0))
empty = False
elif (diff_type == self.DIFF_DELETE and len(patch.diffs) == 1 and
patch.diffs[0][0] == self.DIFF_EQUAL and
len(diff_text) > 2 * patch_size):
# This is a large deletion. Let it pass in one chunk.
patch.length1 += len(diff_text)
start1 += len(diff_text)
empty = False
patch.diffs.append((diff_type, diff_text))
del bigpatch.diffs[0]
else:
# Deletion or equality. Only take as much as we can stomach.
diff_text = diff_text[:patch_size - patch.length1 -
self.Patch_Margin]
patch.length1 += len(diff_text)
start1 += len(diff_text)
if diff_type == self.DIFF_EQUAL:
patch.length2 += len(diff_text)
start2 += len(diff_text)
else:
empty = False
patch.diffs.append((diff_type, diff_text))
if diff_text == bigpatch.diffs[0][1]:
del bigpatch.diffs[0]
else:
bigpatch.diffs[0] = (bigpatch.diffs[0][0],
bigpatch.diffs[0][1][len(diff_text):])
# Compute the head context for the next patch.
precontext = self.diff_text2(patch.diffs)
precontext = precontext[-self.Patch_Margin:]
# Append the end context for this patch.
postcontext = self.diff_text1(bigpatch.diffs)[:self.Patch_Margin]
if postcontext:
patch.length1 += len(postcontext)
patch.length2 += len(postcontext)
if len(patch.diffs) != 0 and patch.diffs[-1][0] == self.DIFF_EQUAL:
patch.diffs[-1] = (self.DIFF_EQUAL, patch.diffs[-1][1] +
postcontext)
else:
patch.diffs.append((self.DIFF_EQUAL, postcontext))
if not empty:
x += 1
patches.insert(x, patch)
def patch_toText(self, patches):
"""Take a list of patches and return a textual representation.
Args:
patches: Array of Patch objects.
Returns:
Text representation of patches.
"""
text = []
for patch in patches:
text.append(str(patch))
return "".join(text)
def patch_fromText(self, textline):
"""Parse a textual representation of patches and return a list of patch
objects.
Args:
textline: Text representation of patches.
Returns:
Array of Patch objects.
Raises:
ValueError: If invalid input.
"""
if type(textline) == unicode:
# Patches should be composed of a subset of ascii chars, Unicode not
# required. If this encode raises UnicodeEncodeError, patch is invalid.
textline = textline.encode("ascii")
patches = []
if not textline:
return patches
text = textline.split('\n')
while len(text) != 0:
m = re.match("^@@ -(\d+),?(\d*) \+(\d+),?(\d*) @@$", text[0])
if not m:
raise ValueError("Invalid patch string: " + text[0])
patch = patch_obj()
patches.append(patch)
patch.start1 = int(m.group(1))
if m.group(2) == '':
patch.start1 -= 1
patch.length1 = 1
elif m.group(2) == '0':
patch.length1 = 0
else:
patch.start1 -= 1
patch.length1 = int(m.group(2))
patch.start2 = int(m.group(3))
if m.group(4) == '':
patch.start2 -= 1
patch.length2 = 1
elif m.group(4) == '0':
patch.length2 = 0
else:
patch.start2 -= 1
patch.length2 = int(m.group(4))
del text[0]
while len(text) != 0:
if text[0]:
sign = text[0][0]
else:
sign = ''
line = urllib.unquote(text[0][1:])
line = line.decode("utf-8")
if sign == '+':
# Insertion.
patch.diffs.append((self.DIFF_INSERT, line))
elif sign == '-':
# Deletion.
patch.diffs.append((self.DIFF_DELETE, line))
elif sign == ' ':
# Minor equality.
patch.diffs.append((self.DIFF_EQUAL, line))
elif sign == '@':
# Start of next patch.
break
elif sign == '':
# Blank line? Whatever.
pass
else:
# WTF?
raise ValueError("Invalid patch mode: '%s'\n%s" % (sign, line))
del text[0]
return patches
class patch_obj:
"""Class representing one patch operation.
"""
def __init__(self):
"""Initializes with an empty list of diffs.
"""
self.diffs = []
self.start1 = None
self.start2 = None
self.length1 = 0
self.length2 = 0
def __str__(self):
"""Emmulate GNU diff's format.
Header: @@ -382,8 +481,9 @@
Indicies are printed as 1-based, not 0-based.
Returns:
The GNU diff string.
"""
if self.length1 == 0:
coords1 = str(self.start1) + ",0"
elif self.length1 == 1:
coords1 = str(self.start1 + 1)
else:
coords1 = str(self.start1 + 1) + "," + str(self.length1)
if self.length2 == 0:
coords2 = str(self.start2) + ",0"
elif self.length2 == 1:
coords2 = str(self.start2 + 1)
else:
coords2 = str(self.start2 + 1) + "," + str(self.length2)
text = ["@@ -", coords1, " +", coords2, " @@\n"]
# Escape the body of the patch with %xx notation.
for (op, data) in self.diffs:
if op == diff_match_patch.DIFF_INSERT:
text.append("+")
elif op == diff_match_patch.DIFF_DELETE:
text.append("-")
elif op == diff_match_patch.DIFF_EQUAL:
text.append(" ")
# High ascii will raise UnicodeDecodeError. Use Unicode instead.
data = data.encode("utf-8")
text.append(urllib.quote(data, "!~*'();/?:@&=+$,# ") + "\n")
return "".join(text)
| apache-2.0 |
tobiasgehring/qudi | hardware/motor/motor_stage_pi.py | 1 | 28192 | # -*- coding: utf-8 -*-
"""
This file contains the hardware control of the motorized stage for PI.
Qudi is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Qudi is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with Qudi. If not, see <http://www.gnu.org/licenses/>.
Copyright (c) the Qudi Developers. See the COPYRIGHT.txt file at the
top-level directory of this distribution and at <https://github.com/Ulm-IQO/qudi/>
"""
import visa
import time
from collections import OrderedDict
from core.base import Base
from interface.motor_interface import MotorInterface
class MotorStagePI(Base, MotorInterface):
"""unstable: Christoph Müller, Simon Schmitt
This is the Interface class to define the controls for the simple
microwave hardware.
"""
_modclass = 'MotorStagePI'
_modtype = 'hardware'
def __init__(self, **kwargs):
super().__init__(**kwargs)
def on_activate(self):
""" Initialisation performed during activation of the module.
@return: error code
"""
# Read configs from config-file
config = self.getConfiguration()
# get the right com-ports from config
if 'com_port_pi_xyz' in config.keys():
self._com_port_pi_xyz = config['com_port_pi_xyz']
else:
self.log.error('No parameter "com_port_pi_xyz" found in config.\n'
'Cannot connect to motorized stage!')
# get the the right baud rates from config
if 'pi_xyz_baud_rate' in config.keys():
self._pi_xyz_baud_rate = config['pi_xyz_baud_rate']
else:
self._pi_xyz_baud_rate = 9600
self.log.warning('No parameter "pi_xyz_baud_rate" found in '
'config!\nTaking the baud rate {0} ')
# get the the right timeouts from config
if 'pi_xyz_timeout' in config.keys():
self._pi_xyz_timeout = config['pi_xyz_timeout']
else:
self._pi_xyz_timeout = 1000 # timeouts are given in millisecond in new pyvisa version
self.log.warning('No parameter "pi_xyz_timeout" found in '
'config!\n'
'Setting the timeout to {0} '
'instead.'.format(self._pi_xyz_timeout))
# get the the right term_chars from config
if 'pi_xyz_term_char' in config.keys():
self._pi_xyz_term_char = config['pi_xyz_term_char']
else:
self._pi_xyz_term_char = '\n'
self.log.warning('No parameter "pi_xyz_term_char" found in '
'config!\nTaking the term_char {0} '
'instead.'.format(self._pi_xyz_term_char))
#axis definition:
if 'pi_first_axis_label' in config.keys():
self._first_axis_label = config['pi_first_axis_label']
else:
self._first_axis_label = 'x'
self.log.warning('No parameter "pi_first_axis_label" found in '
'config!\nTaking the term_char {0} '
'instead.'.format(self._first_axis_label))
if 'pi_second_axis_label' in config.keys():
self._second_axis_label = config['pi_second_axis_label']
else:
self._second_axis_label = 'y'
self.log.warning('No parameter "pi_second_axis_label" found in '
'config!\nTaking the term_char {0} '
'instead.'.format(self._second_axis_label))
if 'pi_third_axis_label' in config.keys():
self._third_axis_label = config['pi_third_axis_label']
else:
self._third_axis_label = 'z'
self.log.warning('No parameter "pi_third_axis_label" found in '
'config!\nTaking the term_char {0} '
'instead.'.format(self._third_axis_label))
if 'pi_first_axis_ID' in config.keys():
self._first_axis_ID = config['pi_first_axis_ID']
else:
self._first_axis_ID = '1'
self.log.warning('No parameter "pi_first_axis_ID" found in '
'config!\nTaking the term_char {0} '
'instead.'.format(self._first_axis_ID))
if 'pi_second_axis_ID' in config.keys():
self._second_axis_ID = config['pi_second_axis_ID']
else:
self._second_axis_ID = '2'
self.log.warning('No parameter "pi_second_axis_ID" found in '
'config!\nTaking the term_char {0} '
'instead.'.format(self._second_axis_ID))
if 'pi_third_axis_ID' in config.keys():
self._third_axis_ID = config['pi_third_axis_ID']
else:
self._third_axis_ID = '3'
self.log.warning('No parameter "pi_third_axis_ID" found in '
'config!\nTaking the term_char {0} '
'instead.'.format(self._third_axis_ID))
self.rm = visa.ResourceManager()
self._serial_connection_xyz = self.rm.open_resource(resource_name=self._com_port_pi_xyz,
baud_rate=self._pi_xyz_baud_rate,
timeout=self._pi_xyz_timeout)
# Should be in config I guess
# setting the ranges of the axes - PI uses units of 10nm. Thus in order to convert to meters
# a multiplication with 1e7 is necessary
if 'pi_first_min' in config.keys():
self._min_first = config['pi_first_min']
else:
self._min_first = -0.1
self.log.warning('No parameter "pi_first_min" found in config!\n'
'Taking -0.1m instead.')
if 'pi_first_max' in config.keys():
self._max_first = config['pi_first_max']
else:
self._max_first = 0.1
self.log.warning('No parameter "pi_first_max" found in config!\n'
'Taking 0.1m instead.')
if 'pi_second_min' in config.keys():
self._min_second = config['pi_second_min']
else:
self._min_second = -0.1
self.log.warning('No parameter "pi_second_min" found in config!\n'
'Taking -0.1m instead.')
if 'pi_second_max' in config.keys():
self._max_second = config['pi_second_max']
else:
self._max_second = 0.1
self.log.warning('No parameter "pi_second_max" found in config!\n'
'Taking 0.1m instead.')
if 'pi_third_min' in config.keys():
self._min_third = config['pi_third_min']
else:
self._min_z = -0.1
self.log.warning('No parameter "pi_third_min" found in config!\n'
'Taking -0.1m instead.')
if 'pi_third_max' in config.keys():
self._max_third = config['pi_third_max']
else:
self._max_third = 0.1
self.log.warning('No parameter "pi_third_max" found in config!\n'
'Taking 0.1m instead.')
if 'pi_first_axis_step' in config.keys():
self.step_first_axis = config['pi_first_axis_step']
else:
self.step_first_axis = 1e-7
self.log.warning('No parameter "pi_first_axis_step" found in config!\n'
'Taking 10nm instead.')
if 'pi_second_axis_step' in config.keys():
self.step_second_axis = config['pi_second_axis_step']
else:
self.step_second_axis = 1e-7
self.log.warning('No parameter "pi_second_axis_step" found in config!\n'
'Taking 10nm instead.')
if 'pi_third_axis_step' in config.keys():
self.step_third_axis = config['pi_third_axis_step']
else:
self.step_third_axis = 1e-7
self.log.warning('No parameter "pi_third_axis_step" found in config!\n'
'Taking 10nm instead.')
if 'vel_first_min' in config.keys():
self._vel_min_first = config['vel_first_min']
else:
self._vel_min_first = 1e-5
self.log.warning('No parameter "vel_first_min" found in config!\n'
'Taking 1e-5m/s instead.')
if 'vel_first_max' in config.keys():
self._vel_max_first = config['vel_first_max']
else:
self._vel_max_first = 5e-2
self.log.warning('No parameter "vel_first_max" found in config!\n'
'Taking 5e-2m/s instead.')
if 'vel_second_min' in config.keys():
self._vel_min_second = config['vel_second_min']
else:
self._vel_min_second = 1e-5
self.log.warning('No parameter "vel_second_min" found in config!\n'
'Taking 1e-5m/s instead.')
if 'vel_second_max' in config.keys():
self._vel_max_second = config['vel_second_max']
else:
self._vel_max_second = 5e-2
self.log.warning('No parameter "vel_second_max" found in config!\n'
'Taking 5e-2m/s instead.')
if 'vel_third_min' in config.keys():
self._vel_min_third = config['vel_third_min']
else:
self._vel_min_z = 1e-5
self.log.warning('No parameter "vel_third_min" found in config!\n'
'Taking 1e-5m instead.')
if 'vel_third_max' in config.keys():
self._vel_max_third = config['vel_third_max']
else:
self._vel_max_third = 5e-2
self.log.warning('No parameter "vel_third_max" found in config!\n'
'Taking 5e-2m/s instead.')
if 'vel_first_axis_step' in config.keys():
self._vel_step_first = config['vel_first_axis_step']
else:
self._vel_step_first = 1e-5
self.log.warning('No parameter "vel_first_axis_step" found in config!\n'
'Taking 1e-5m/s instead.')
if 'vel_second_axis_step' in config.keys():
self._vel_step_second = config['vel_second_axis_step']
else:
self._vel_step_second = 1e-5
self.log.warning('No parameter "vel_second_axis_step" found in config!\n'
'Taking 1e-5m/s instead.')
if 'vel_third_axis_step' in config.keys():
self._vel_step_third = config['vel_third_axis_step']
else:
self._vel_step_third = 1e-5
self.log.warning('No parameter "vel_third_axis_step" found in config!\n'
'Taking 1e-5m/s instead.')
return 0
def on_deactivate(self):
""" Deinitialisation performed during deactivation of the module.
@return: error code
"""
self._serial_connection_xyz.close()
self.rm.close()
return 0
def get_constraints(self):
""" Retrieve the hardware constrains from the motor device.
@return dict: dict with constraints for the sequence generation and GUI
Provides all the constraints for the xyz stage and rot stage (like total
movement, velocity, ...)
Each constraint is a tuple of the form
(min_value, max_value, stepsize)
"""
constraints = OrderedDict()
axis0 = {}
axis0['label'] = self._first_axis_label
axis0['ID'] = self._first_axis_ID
axis0['unit'] = 'm' # the SI units
axis0['ramp'] = None # a possible list of ramps
axis0['pos_min'] = self._min_first
axis0['pos_max'] = self._max_first
axis0['pos_step'] = self.step_first_axis
axis0['vel_min'] = self._vel_min_first
axis0['vel_max'] = self._vel_max_first
axis0['vel_step'] = self._vel_step_first
axis0['acc_min'] = None
axis0['acc_max'] = None
axis0['acc_step'] = None
axis1 = {}
axis1['label'] = self._second_axis_label
axis1['ID'] = self._second_axis_ID
axis1['unit'] = 'm' # the SI units
axis1['ramp'] = None # a possible list of ramps
axis1['pos_min'] = self._min_second
axis1['pos_max'] = self._max_second
axis1['pos_step'] = self.step_second_axis
axis1['vel_min'] = self._vel_min_second
axis1['vel_max'] = self._vel_max_second
axis1['vel_step'] = self._vel_step_second
axis1['acc_min'] = None
axis1['acc_max'] = None
axis1['acc_step'] = None
axis2 = {}
axis2['label'] = self._third_axis_label
axis2['ID'] = self._third_axis_ID
axis2['unit'] = 'm' # the SI units
axis2['ramp'] = None # a possible list of ramps
axis2['pos_min'] = self._min_third
axis2['pos_max'] = self._max_third
axis2['pos_step'] = self.step_third_axis
axis2['vel_min'] = self._vel_min_third
axis2['vel_max'] = self._vel_max_third
axis2['vel_step'] = self._vel_step_third
axis2['acc_min'] = None
axis2['acc_max'] = None
axis2['acc_step'] = None
# assign the parameter container for x to a name which will identify it
constraints[axis0['label']] = axis0
constraints[axis1['label']] = axis1
constraints[axis2['label']] = axis2
return constraints
def move_rel(self, param_dict):
"""Moves stage in given direction (relative movement)
@param dict param_dict: dictionary, which passes all the relevant
parameters, which should be changed. Usage:
{'axis_label': <the-abs-pos-value>}.
'axis_label' must correspond to a label given
to one of the axis.
@return dict pos: dictionary with the current magnet position
"""
# There are sometimes connections problems therefore up to 3 attempts are started
for attempt in range(3):
try:
for axis_label in param_dict:
step = param_dict[axis_label]
self._do_move_rel(axis_label, step)
except:
self.log.warning('Motor connection problem! Try again...')
else:
break
else:
self.log.error('Motor cannot move!')
#The following two lines have been commented out to speed up
#pos = self.get_pos()
#return pos
return param_dict
def move_abs(self, param_dict):
"""Moves stage to absolute position
@param dict param_dict: dictionary, which passes all the relevant
parameters, which should be changed. Usage:
{'axis_label': <the-abs-pos-value>}.
'axis_label' must correspond to a label given
to one of the axis.
The values for the axes are in millimeter,
the value for the rotation is in degrees.
@return dict pos: dictionary with the current axis position
"""
# There are sometimes connections problems therefore up to 3 attempts are started
for attept in range(3):
try:
for axis_label in param_dict:
move = param_dict[axis_label]
self._do_move_abs(axis_label, move)
while not self._motor_stopped():
time.sleep(0.02)
except:
self.log.warning('Motor connection problem! Try again...')
else:
break
else:
self.log.error('Motor cannot move!')
#The following two lines have been commented out to speed up
#pos = self.get_pos()
#return pos
return param_dict
def abort(self):
"""Stops movement of the stage
@return int: error code (0:OK, -1:error)
"""
constraints = self.get_constraints()
try:
for axis_label in constraints:
self._write_xyz(axis_label,'AB')
while not self._motor_stopped():
time.sleep(0.2)
return 0
except:
self.log.error('MOTOR MOVEMENT NOT STOPPED!!!)')
return -1
def get_pos(self, param_list=None):
""" Gets current position of the stage arms
@param list param_list: optional, if a specific position of an axis
is desired, then the labels of the needed
axis should be passed in the param_list.
If nothing is passed, then from each axis the
position is asked.
@return dict: with keys being the axis labels and item the current
position. """
constraints = self.get_constraints()
param_dict = {}
# unfortunately, probably due to connection problems this specific command sometimes failing
# although it should run.... therefore some retries are added
try:
if param_list is not None:
for axis_label in param_list:
for attempt in range(25):
# self.log.debug(attempt)
try:
pos = int(self._ask_xyz(axis_label,'TT')[8:])
param_dict[axis_label] = pos * 1e-7
except:
continue
else:
break
else:
for axis_label in constraints:
for attempt in range(25):
#self.log.debug(attempt)
try:
pos = int(self._ask_xyz(axis_label,'TT')[8:])
param_dict[axis_label] = pos * 1e-7
except:
continue
else:
break
return param_dict
except:
self.log.error('Could not find current xyz motor position')
return -1
def get_status(self, param_list=None):
""" Get the status of the position
@param list param_list: optional, if a specific status of an axis
is desired, then the labels of the needed
axis should be passed in the param_list.
If nothing is passed, then from each axis the
status is asked.
@return dict: with the axis label as key and the status number as item.
The meaning of the return value is:
Bit 0: Ready Bit 1: On target Bit 2: Reference drive active Bit 3: Joystick ON
Bit 4: Macro running Bit 5: Motor OFF Bit 6: Brake ON Bit 7: Drive current active
"""
constraints = self.get_constraints()
param_dict = {}
try:
if param_list is not None:
for axis_label in param_list:
status = self._ask_xyz(axis_label,'TS')[8:]
param_dict[axis_label] = status
else:
for axis_label in constraints:
status = self._ask_xyz(axis_label, 'TS')[8:]
param_dict[axis_label] = status
return param_dict
except:
self.log.error('Status request unsuccessful')
return -1
def calibrate(self, param_list=None):
""" Calibrates the stage.
@param dict param_list: param_list: optional, if a specific calibration
of an axis is desired, then the labels of the
needed axis should be passed in the param_list.
If nothing is passed, then all connected axis
will be calibrated.
After calibration the stage moves to home position which will be the
zero point for the passed axis.
@return dict pos: dictionary with the current position of the ac#xis
"""
#constraints = self.get_constraints()
param_dict = {}
try:
for axis_label in param_list:
self._write_xyz(axis_label,'FE2')
while not self._motor_stopped():
time.sleep(0.2)
for axis_label in param_list:
self._write_xyz(axis_label,'DH')
except:
self.log.error('Calibration did not work')
for axis_label in param_list:
param_dict[axis_label] = 0.0
self.move_abs(param_dict)
pos = self.get_pos()
return pos
def get_velocity(self, param_list=None):
""" Gets the current velocity for all connected axes in m/s.
@param list param_list: optional, if a specific velocity of an axis
is desired, then the labels of the needed
axis should be passed as the param_list.
If nothing is passed, then from each axis the
velocity is asked.
@return dict : with the axis label as key and the velocity as item.
"""
constraints = self.get_constraints()
param_dict = {}
try:
if param_list is not None:
for axis_label in param_list:
vel = int(self._ask_xyz(axis_label, 'TY')[8:])
param_dict[axis_label] = vel * 1e-7
else:
for axis_label in constraints:
vel = int(self._ask_xyz(axis_label, 'TY')[8:])
param_dict[axis_label] = vel * 1e-7
return param_dict
except:
self.log.error('Could not find current axis velocity')
return -1
def set_velocity(self, param_dict):
""" Write new value for velocity in m/s.
@param dict param_dict: dictionary, which passes all the relevant
parameters, which should be changed. Usage:
{'axis_label': <the-velocity-value>}.
'axis_label' must correspond to a label given
to one of the axis.
@return dict param_dict2: dictionary with the updated axis velocity
"""
#constraints = self.get_constraints()
try:
for axis_label in param_dict:
vel = int(param_dict[axis_label] * 1.0e7)
self._write_xyz(axis_label, 'SV{0:d}'.format((vel)))
#The following two lines have been commented out to speed up
#param_dict2 = self.get_velocity()
#retrun param_dict2
return param_dict
except:
self.log.error('Could not set axis velocity')
return -1
########################## internal methods ##################################
def _write_xyz(self,axis,command):
'''this method just sends a command to the motor! DOES NOT RETURN AN ANSWER!
@param axis string: name of the axis that should be asked
@param command string: command
@return error code (0:OK, -1:error)
'''
constraints = self.get_constraints()
try:
#self.log.info(constraints[axis]['ID'] + command + '\n')
self._serial_connection_xyz.write(constraints[axis]['ID'] + command + '\n')
trash=self._read_answer_xyz() # deletes possible answers
return 0
except:
self.log.error('Command was no accepted')
return -1
def _read_answer_xyz(self):
'''this method reads the answer from the motor!
@return answer string: answer of motor
'''
still_reading = True
answer=''
while still_reading == True:
try:
answer = answer + self._serial_connection_xyz.read()[:-1]
except:
still_reading = False
#self.log.info(answer)
return answer
def _ask_xyz(self,axis,question):
'''this method combines writing a command and reading the answer
@param axis string: name of the axis that should be asked
@param command string: command
@return answer string: answer of motor
'''
constraints = self.get_constraints()
self._serial_connection_xyz.write(constraints[axis]['ID']+question+'\n')
answer=self._read_answer_xyz()
return answer
def _do_move_rel(self, axis, step):
"""internal method for the relative move
@param axis string: name of the axis that should be moved
@param float step: step in millimeter
@return str axis: axis which is moved
move float: absolute position to move to
"""
constraints = self.get_constraints()
if not(abs(constraints[axis]['pos_step']) < abs(step)):
self.log.warning('Cannot make the movement of the axis "{0}"'
'since the step is too small! Ignore command!')
else:
current_pos = self.get_pos(axis)[axis]
move = current_pos + step
self._do_move_abs(axis, move)
return axis,move
def _do_move_abs(self, axis, move):
"""internal method for the absolute move in meter
@param axis string: name of the axis that should be moved
@param float move: desired position in millimeter
@return str axis: axis which is moved
move float: absolute position to move to
"""
constraints = self.get_constraints()
#self.log.info(axis + 'MA{0}'.format(int(move*1e8)))
if not(constraints[axis]['pos_min'] <= move <= constraints[axis]['pos_max']):
self.log.warning('Cannot make the movement of the axis "{0}"'
'since the border [{1},{2}] would be crossed! Ignore command!'
''.format(axis, constraints[axis]['pos_min'], constraints[axis]['pos_max']))
else:
self._write_xyz(axis,'MA{0}'.format(int(move*1e7))) # 1e7 to convert meter to SI units
#self._write_xyz(axis, 'MP')
return axis, move
def _in_movement_xyz(self):
'''this method checks if the magnet is still moving and returns
a dictionary which of the axis are moving.
@return: dict param_dict: Dictionary displaying if axis are moving:
0 for immobile and 1 for moving
'''
constraints=self.get_constraints()
param_dict = {}
for axis_label in constraints:
tmp0 = int(self._ask_xyz(constraints[axis_label]['label'],'TS')[8:])
param_dict[axis_label] = tmp0%2
return param_dict
def _motor_stopped(self):
'''this method checks if the magnet is still moving and returns
False if it is moving and True of it is immobile
@return: bool stopped: False for immobile and True for moving
'''
param_dict=self._in_movement_xyz()
stopped=True
for axis_label in param_dict:
if param_dict[axis_label] != 0:
self.log.info(axis_label + ' is moving')
stopped=False
return stopped
#########################################################################################
#########################################################################################
#########################################################################################
| gpl-3.0 |
hilarry/mvpn | mvpn/forms.py | 1 | 1090 | #!-*- coding:utf-8 -*-
from django import forms
from django.contrib.auth.models import User
from django.forms import ModelForm
from models import Openuser,Log
class LoginForm(forms.Form):
username = forms.CharField(
required=True,
label=u"用户名",
error_messages={'required': '请输入用户名'},
widget=forms.TextInput(
attrs={
'placeholder':u"用户名",
}
),
)
password = forms.CharField(
required=True,
label=u"密码",
error_messages={'required': u'请输入密码'},
widget=forms.PasswordInput(
attrs={
'placeholder':u"密码",
}
),
)
def clean(self):
if not self.is_valid():
raise forms.ValidationError(u"用户名和密码为必填项")
else:
cleaned_data = super(LoginForm, self).clean()
class AdduserForm(ModelForm):
class Meta:
model = Openuser
fields = ['username', 'password', 'name'] | apache-2.0 |
KyletheFox/4397_Demo_App | node_modules/node-gyp/gyp/pylib/gyp/MSVSUtil.py | 1812 | 9537 | # Copyright (c) 2013 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Utility functions shared amongst the Windows generators."""
import copy
import os
# A dictionary mapping supported target types to extensions.
TARGET_TYPE_EXT = {
'executable': 'exe',
'loadable_module': 'dll',
'shared_library': 'dll',
'static_library': 'lib',
}
def _GetLargePdbShimCcPath():
"""Returns the path of the large_pdb_shim.cc file."""
this_dir = os.path.abspath(os.path.dirname(__file__))
src_dir = os.path.abspath(os.path.join(this_dir, '..', '..'))
win_data_dir = os.path.join(src_dir, 'data', 'win')
large_pdb_shim_cc = os.path.join(win_data_dir, 'large-pdb-shim.cc')
return large_pdb_shim_cc
def _DeepCopySomeKeys(in_dict, keys):
"""Performs a partial deep-copy on |in_dict|, only copying the keys in |keys|.
Arguments:
in_dict: The dictionary to copy.
keys: The keys to be copied. If a key is in this list and doesn't exist in
|in_dict| this is not an error.
Returns:
The partially deep-copied dictionary.
"""
d = {}
for key in keys:
if key not in in_dict:
continue
d[key] = copy.deepcopy(in_dict[key])
return d
def _SuffixName(name, suffix):
"""Add a suffix to the end of a target.
Arguments:
name: name of the target (foo#target)
suffix: the suffix to be added
Returns:
Target name with suffix added (foo_suffix#target)
"""
parts = name.rsplit('#', 1)
parts[0] = '%s_%s' % (parts[0], suffix)
return '#'.join(parts)
def _ShardName(name, number):
"""Add a shard number to the end of a target.
Arguments:
name: name of the target (foo#target)
number: shard number
Returns:
Target name with shard added (foo_1#target)
"""
return _SuffixName(name, str(number))
def ShardTargets(target_list, target_dicts):
"""Shard some targets apart to work around the linkers limits.
Arguments:
target_list: List of target pairs: 'base/base.gyp:base'.
target_dicts: Dict of target properties keyed on target pair.
Returns:
Tuple of the new sharded versions of the inputs.
"""
# Gather the targets to shard, and how many pieces.
targets_to_shard = {}
for t in target_dicts:
shards = int(target_dicts[t].get('msvs_shard', 0))
if shards:
targets_to_shard[t] = shards
# Shard target_list.
new_target_list = []
for t in target_list:
if t in targets_to_shard:
for i in range(targets_to_shard[t]):
new_target_list.append(_ShardName(t, i))
else:
new_target_list.append(t)
# Shard target_dict.
new_target_dicts = {}
for t in target_dicts:
if t in targets_to_shard:
for i in range(targets_to_shard[t]):
name = _ShardName(t, i)
new_target_dicts[name] = copy.copy(target_dicts[t])
new_target_dicts[name]['target_name'] = _ShardName(
new_target_dicts[name]['target_name'], i)
sources = new_target_dicts[name].get('sources', [])
new_sources = []
for pos in range(i, len(sources), targets_to_shard[t]):
new_sources.append(sources[pos])
new_target_dicts[name]['sources'] = new_sources
else:
new_target_dicts[t] = target_dicts[t]
# Shard dependencies.
for t in new_target_dicts:
for deptype in ('dependencies', 'dependencies_original'):
dependencies = copy.copy(new_target_dicts[t].get(deptype, []))
new_dependencies = []
for d in dependencies:
if d in targets_to_shard:
for i in range(targets_to_shard[d]):
new_dependencies.append(_ShardName(d, i))
else:
new_dependencies.append(d)
new_target_dicts[t][deptype] = new_dependencies
return (new_target_list, new_target_dicts)
def _GetPdbPath(target_dict, config_name, vars):
"""Returns the path to the PDB file that will be generated by a given
configuration.
The lookup proceeds as follows:
- Look for an explicit path in the VCLinkerTool configuration block.
- Look for an 'msvs_large_pdb_path' variable.
- Use '<(PRODUCT_DIR)/<(product_name).(exe|dll).pdb' if 'product_name' is
specified.
- Use '<(PRODUCT_DIR)/<(target_name).(exe|dll).pdb'.
Arguments:
target_dict: The target dictionary to be searched.
config_name: The name of the configuration of interest.
vars: A dictionary of common GYP variables with generator-specific values.
Returns:
The path of the corresponding PDB file.
"""
config = target_dict['configurations'][config_name]
msvs = config.setdefault('msvs_settings', {})
linker = msvs.get('VCLinkerTool', {})
pdb_path = linker.get('ProgramDatabaseFile')
if pdb_path:
return pdb_path
variables = target_dict.get('variables', {})
pdb_path = variables.get('msvs_large_pdb_path', None)
if pdb_path:
return pdb_path
pdb_base = target_dict.get('product_name', target_dict['target_name'])
pdb_base = '%s.%s.pdb' % (pdb_base, TARGET_TYPE_EXT[target_dict['type']])
pdb_path = vars['PRODUCT_DIR'] + '/' + pdb_base
return pdb_path
def InsertLargePdbShims(target_list, target_dicts, vars):
"""Insert a shim target that forces the linker to use 4KB pagesize PDBs.
This is a workaround for targets with PDBs greater than 1GB in size, the
limit for the 1KB pagesize PDBs created by the linker by default.
Arguments:
target_list: List of target pairs: 'base/base.gyp:base'.
target_dicts: Dict of target properties keyed on target pair.
vars: A dictionary of common GYP variables with generator-specific values.
Returns:
Tuple of the shimmed version of the inputs.
"""
# Determine which targets need shimming.
targets_to_shim = []
for t in target_dicts:
target_dict = target_dicts[t]
# We only want to shim targets that have msvs_large_pdb enabled.
if not int(target_dict.get('msvs_large_pdb', 0)):
continue
# This is intended for executable, shared_library and loadable_module
# targets where every configuration is set up to produce a PDB output.
# If any of these conditions is not true then the shim logic will fail
# below.
targets_to_shim.append(t)
large_pdb_shim_cc = _GetLargePdbShimCcPath()
for t in targets_to_shim:
target_dict = target_dicts[t]
target_name = target_dict.get('target_name')
base_dict = _DeepCopySomeKeys(target_dict,
['configurations', 'default_configuration', 'toolset'])
# This is the dict for copying the source file (part of the GYP tree)
# to the intermediate directory of the project. This is necessary because
# we can't always build a relative path to the shim source file (on Windows
# GYP and the project may be on different drives), and Ninja hates absolute
# paths (it ends up generating the .obj and .obj.d alongside the source
# file, polluting GYPs tree).
copy_suffix = 'large_pdb_copy'
copy_target_name = target_name + '_' + copy_suffix
full_copy_target_name = _SuffixName(t, copy_suffix)
shim_cc_basename = os.path.basename(large_pdb_shim_cc)
shim_cc_dir = vars['SHARED_INTERMEDIATE_DIR'] + '/' + copy_target_name
shim_cc_path = shim_cc_dir + '/' + shim_cc_basename
copy_dict = copy.deepcopy(base_dict)
copy_dict['target_name'] = copy_target_name
copy_dict['type'] = 'none'
copy_dict['sources'] = [ large_pdb_shim_cc ]
copy_dict['copies'] = [{
'destination': shim_cc_dir,
'files': [ large_pdb_shim_cc ]
}]
# This is the dict for the PDB generating shim target. It depends on the
# copy target.
shim_suffix = 'large_pdb_shim'
shim_target_name = target_name + '_' + shim_suffix
full_shim_target_name = _SuffixName(t, shim_suffix)
shim_dict = copy.deepcopy(base_dict)
shim_dict['target_name'] = shim_target_name
shim_dict['type'] = 'static_library'
shim_dict['sources'] = [ shim_cc_path ]
shim_dict['dependencies'] = [ full_copy_target_name ]
# Set up the shim to output its PDB to the same location as the final linker
# target.
for config_name, config in shim_dict.get('configurations').iteritems():
pdb_path = _GetPdbPath(target_dict, config_name, vars)
# A few keys that we don't want to propagate.
for key in ['msvs_precompiled_header', 'msvs_precompiled_source', 'test']:
config.pop(key, None)
msvs = config.setdefault('msvs_settings', {})
# Update the compiler directives in the shim target.
compiler = msvs.setdefault('VCCLCompilerTool', {})
compiler['DebugInformationFormat'] = '3'
compiler['ProgramDataBaseFileName'] = pdb_path
# Set the explicit PDB path in the appropriate configuration of the
# original target.
config = target_dict['configurations'][config_name]
msvs = config.setdefault('msvs_settings', {})
linker = msvs.setdefault('VCLinkerTool', {})
linker['GenerateDebugInformation'] = 'true'
linker['ProgramDatabaseFile'] = pdb_path
# Add the new targets. They must go to the beginning of the list so that
# the dependency generation works as expected in ninja.
target_list.insert(0, full_copy_target_name)
target_list.insert(0, full_shim_target_name)
target_dicts[full_copy_target_name] = copy_dict
target_dicts[full_shim_target_name] = shim_dict
# Update the original target to depend on the shim target.
target_dict.setdefault('dependencies', []).append(full_shim_target_name)
return (target_list, target_dicts)
| mit |
phanikiran2/Extending-RED-qdisc-in-ns3-to-support--NLRED | wutils.py | 64 | 8869 | import os
import os.path
import re
import sys
import subprocess
import shlex
# WAF modules
from waflib import Options, Utils, Logs, TaskGen, Build, Context
from waflib.Errors import WafError
# these are set from the main wscript file
APPNAME=None
VERSION=None
bld=None
def get_command_template(env, arguments=()):
cmd = Options.options.command_template or '%s'
for arg in arguments:
cmd = cmd + " " + arg
return cmd
if hasattr(os.path, "relpath"):
relpath = os.path.relpath # since Python 2.6
else:
def relpath(path, start=os.path.curdir):
"""Return a relative version of a path"""
if not path:
raise ValueError("no path specified")
start_list = os.path.abspath(start).split(os.path.sep)
path_list = os.path.abspath(path).split(os.path.sep)
# Work out how much of the filepath is shared by start and path.
i = len(os.path.commonprefix([start_list, path_list]))
rel_list = [os.path.pardir] * (len(start_list)-i) + path_list[i:]
if not rel_list:
return os.path.curdir
return os.path.join(*rel_list)
def find_program(program_name, env):
launch_dir = os.path.abspath(Context.launch_dir)
#top_dir = os.path.abspath(Options.cwd_launch)
found_programs = []
for obj in bld.all_task_gen:
if not getattr(obj, 'is_ns3_program', False):
continue
## filter out programs not in the subtree starting at the launch dir
if not (obj.path.abspath().startswith(launch_dir)
or obj.path.get_bld().abspath().startswith(launch_dir)):
continue
name1 = obj.name
name2 = os.path.join(relpath(obj.path.abspath(), launch_dir), obj.name)
names = [name1, name2]
found_programs.extend(names)
if program_name in names:
return obj
raise ValueError("program '%s' not found; available programs are: %r"
% (program_name, found_programs))
def get_proc_env(os_env=None):
env = bld.env
if sys.platform == 'linux2' or sys.platform == 'linux':
pathvar = 'LD_LIBRARY_PATH'
elif sys.platform == 'darwin':
pathvar = 'DYLD_LIBRARY_PATH'
elif sys.platform == 'win32':
pathvar = 'PATH'
elif sys.platform == 'cygwin':
pathvar = 'PATH'
elif sys.platform.startswith('freebsd'):
pathvar = 'LD_LIBRARY_PATH'
else:
Logs.warn(("Don't know how to configure "
"dynamic library path for the platform %r;"
" assuming it's LD_LIBRARY_PATH.") % (sys.platform,))
pathvar = 'LD_LIBRARY_PATH'
proc_env = dict(os.environ)
if os_env is not None:
proc_env.update(os_env)
if pathvar is not None:
if pathvar in proc_env:
proc_env[pathvar] = os.pathsep.join(list(env['NS3_MODULE_PATH']) + [proc_env[pathvar]])
else:
proc_env[pathvar] = os.pathsep.join(list(env['NS3_MODULE_PATH']))
pymoddir = bld.path.find_dir('bindings/python').get_bld().abspath()
pyvizdir = bld.path.find_dir('src/visualizer').abspath()
if 'PYTHONPATH' in proc_env:
proc_env['PYTHONPATH'] = os.pathsep.join([pymoddir, pyvizdir] + [proc_env['PYTHONPATH']])
else:
proc_env['PYTHONPATH'] = os.pathsep.join([pymoddir, pyvizdir])
if 'PATH' in proc_env:
proc_env['PATH'] = os.pathsep.join(list(env['NS3_EXECUTABLE_PATH']) + [proc_env['PATH']])
else:
proc_env['PATH'] = os.pathsep.join(list(env['NS3_EXECUTABLE_PATH']))
return proc_env
def run_argv(argv, env, os_env=None, cwd=None, force_no_valgrind=False):
proc_env = get_proc_env(os_env)
if Options.options.valgrind and not force_no_valgrind:
if Options.options.command_template:
raise WafError("Options --command-template and --valgrind are conflicting")
if not env['VALGRIND']:
raise WafError("valgrind is not installed")
# Use the first program found in the env['VALGRIND'] list
argv = [env['VALGRIND'][0], "--leak-check=full", "--show-reachable=yes", "--error-exitcode=1"] + argv
proc = subprocess.Popen(argv, env=proc_env, cwd=cwd, stderr=subprocess.PIPE)
stderrdata = proc.communicate()[1]
stderrdata = stderrdata.decode('utf-8')
error = False
for line in stderrdata:
sys.stderr.write(line)
if "== LEAK SUMMARY" in line:
error = True
retval = proc.wait()
if retval == 0 and error:
retval = 1
else:
try:
WindowsError
except NameError:
retval = subprocess.Popen(argv, env=proc_env, cwd=cwd).wait()
else:
try:
retval = subprocess.Popen(argv, env=proc_env, cwd=cwd).wait()
except WindowsError as ex:
raise WafError("Command %s raised exception %s" % (argv, ex))
if retval:
signame = None
if retval < 0: # signal?
import signal
for name, val in vars(signal).items():
if len(name) > 3 and name[:3] == 'SIG' and name[3] != '_':
if val == -retval:
signame = name
break
if signame:
raise WafError("Command %s terminated with signal %s."
" Run it under a debugger to get more information "
"(./waf --run <program> --command-template=\"gdb --args %%s <args>\")." % (argv, signame))
else:
raise WafError("Command %s exited with code %i" % (argv, retval))
return retval
def get_run_program(program_string, command_template=None):
"""
Return the program name and argv of the process that would be executed by
run_program(program_string, command_template).
"""
#print "get_run_program_argv(program_string=%r, command_template=%r)" % (program_string, command_template)
env = bld.env
if command_template in (None, '%s'):
argv = shlex.split(program_string)
#print "%r ==shlex.split==> %r" % (program_string, argv)
program_name = argv[0]
try:
program_obj = find_program(program_name, env)
except ValueError as ex:
raise WafError(str(ex))
program_node = program_obj.path.find_or_declare(program_obj.target)
#try:
# program_node = program_obj.path.find_build(ccroot.get_target_name(program_obj))
#except AttributeError:
# raise Utils.WafError("%s does not appear to be a program" % (program_name,))
execvec = [program_node.abspath()] + argv[1:]
else:
program_name = program_string
try:
program_obj = find_program(program_name, env)
except ValueError as ex:
raise WafError(str(ex))
program_node = program_obj.path.find_or_declare(program_obj.target)
#try:
# program_node = program_obj.path.find_build(ccroot.get_target_name(program_obj))
#except AttributeError:
# raise Utils.WafError("%s does not appear to be a program" % (program_name,))
tmpl = command_template % (program_node.abspath(),)
execvec = shlex.split(tmpl.replace('\\', '\\\\'))
#print "%r ==shlex.split==> %r" % (command_template % (program_node.abspath(env),), execvec)
return program_name, execvec
def run_program(program_string, env, command_template=None, cwd=None, visualize=False):
"""
if command_template is not None, then program_string == program
name and argv is given by command_template with %s replaced by the
full path to the program. Else, program_string is interpreted as
a shell command with first name being the program name.
"""
dummy_program_name, execvec = get_run_program(program_string, command_template)
if cwd is None:
if (Options.options.cwd_launch):
cwd = Options.options.cwd_launch
else:
cwd = Options.cwd_launch
if visualize:
execvec.append("--SimulatorImplementationType=ns3::VisualSimulatorImpl")
return run_argv(execvec, env, cwd=cwd)
def run_python_program(program_string, env, visualize=False):
env = bld.env
execvec = shlex.split(program_string)
if (Options.options.cwd_launch):
cwd = Options.options.cwd_launch
else:
cwd = Options.cwd_launch
if visualize:
execvec.append("--SimulatorImplementationType=ns3::VisualSimulatorImpl")
return run_argv([env['PYTHON'][0]] + execvec, env, cwd=cwd)
def uniquify_list(seq):
"""Remove duplicates while preserving order
From Dave Kirby http://www.peterbe.com/plog/uniqifiers-benchmark
"""
seen = set()
return [ x for x in seq if x not in seen and not seen.add(x)]
| gpl-2.0 |
Archives/easy-mangos | dep/ACE_wrappers/bin/svn_props.py | 95 | 1292 | #!/usr/bin/python
import sys
import re
import os
import string
print """WARNING: this script is dumb. I mean, really, really dumb. Every file is treated
as a text file, so if you are checking in any binary files, YOU MUST set a non-text
MIME type by hand, otherwise it WILL be corrupted by the checkout process.
A better approach will be to add the unmatched files to the config file in
ACE/docs/svn/config (and update yours!) so others won't have to put up with them
in the future.
To use this program, copy and paste the output from the svn command into standard
input.
"""
foo = raw_input("That being said, if you want to continue, press enter")
sin, sout = os.popen2 ("svn info")
sin.close ()
os.wait ()
url = ""
root = ""
path = ""
for line in sout.readlines ():
if line.startswith ("URL: "):
url = line.replace ("URL: ", "")[:-1]
if line.startswith ("Repository Root: "):
root = line.replace ("Repository Root: ", "")[:-1]
path = url.replace (root, "")[1:] + '/'
files = ""
eol_style = " svn ps svn:eol-style native "
keywords = " svn ps svn:keywords 'Author Date Id Revision' "
for line in sys.stdin.readlines ():
ln = line[0:line.find (':')] + ' '
ln = ln.replace (path,"")
os.system (eol_style + ln)
os.system (keywords + ln)
| gpl-2.0 |
stvstnfrd/edx-platform | openedx/core/djangoapps/credentials/tests/test_utils.py | 1 | 3242 | """Tests covering Credentials utilities."""
import uuid
import mock
from openedx.core.djangoapps.credentials.models import CredentialsApiConfig
from openedx.core.djangoapps.credentials.tests import factories
from openedx.core.djangoapps.credentials.tests.mixins import CredentialsApiConfigMixin
from openedx.core.djangoapps.credentials.utils import get_credentials
from openedx.core.djangoapps.oauth_dispatch.tests.factories import ApplicationFactory
from openedx.core.djangolib.testing.utils import CacheIsolationTestCase, skip_unless_lms
from common.djangoapps.student.tests.factories import UserFactory
UTILS_MODULE = 'openedx.core.djangoapps.credentials.utils'
@skip_unless_lms
@mock.patch(UTILS_MODULE + '.get_edx_api_data')
class TestGetCredentials(CredentialsApiConfigMixin, CacheIsolationTestCase):
""" Tests for credentials utility functions. """
ENABLED_CACHES = ['default']
def setUp(self):
super(TestGetCredentials, self).setUp() # lint-amnesty, pylint: disable=super-with-arguments
ApplicationFactory(name=CredentialsApiConfig.OAUTH2_CLIENT_NAME)
self.credentials_config = self.create_credentials_config(cache_ttl=1)
self.user = UserFactory()
def test_get_many(self, mock_get_edx_api_data):
expected = factories.UserCredential.create_batch(3)
mock_get_edx_api_data.return_value = expected
actual = get_credentials(self.user)
mock_get_edx_api_data.assert_called_once()
call = mock_get_edx_api_data.mock_calls[0]
__, __, kwargs = call
querystring = {
'username': self.user.username,
'status': 'awarded',
'only_visible': 'True',
}
cache_key = '{}.{}'.format(self.credentials_config.CACHE_KEY, self.user.username)
assert kwargs['querystring'] == querystring
assert kwargs['cache_key'] == cache_key
assert actual == expected
def test_get_one(self, mock_get_edx_api_data):
expected = factories.UserCredential()
mock_get_edx_api_data.return_value = expected
program_uuid = str(uuid.uuid4())
actual = get_credentials(self.user, program_uuid=program_uuid)
mock_get_edx_api_data.assert_called_once()
call = mock_get_edx_api_data.mock_calls[0]
__, __, kwargs = call
querystring = {
'username': self.user.username,
'status': 'awarded',
'only_visible': 'True',
'program_uuid': program_uuid,
}
cache_key = '{}.{}.{}'.format(self.credentials_config.CACHE_KEY, self.user.username, program_uuid)
assert kwargs['querystring'] == querystring
assert kwargs['cache_key'] == cache_key
assert actual == expected
def test_type_filter(self, mock_get_edx_api_data):
get_credentials(self.user, credential_type='program')
mock_get_edx_api_data.assert_called_once()
call = mock_get_edx_api_data.mock_calls[0]
__, __, kwargs = call
querystring = {
'username': self.user.username,
'status': 'awarded',
'only_visible': 'True',
'type': 'program',
}
assert kwargs['querystring'] == querystring
| agpl-3.0 |
leveille/blog.v1 | wurdig/controllers/error.py | 1 | 1807 | import cgi
from paste.urlparser import PkgResourcesParser
from pylons import request
from pylons import tmpl_context as c
from pylons.controllers.util import forward
from pylons.middleware import error_document_template
from pylons.i18n.translation import _
from webhelpers.html.builder import literal
from wurdig.lib.base import BaseController, render
class ErrorController(BaseController):
"""Generates error documents as and when they are required.
The ErrorDocuments middleware forwards to ErrorController when error
related status codes are returned from the application.
This behaviour can be altered by changing the parameters to the
ErrorDocuments middleware in your config/middleware.py file.
"""
def document(self):
"""Render the error document"""
resp = request.environ.get('pylons.original_response')
code = cgi.escape(request.GET.get('code', ''))
content = cgi.escape(request.GET.get('message', ''))
if resp:
content = literal(resp.status)
code = code or cgi.escape(str(resp.status_int))
if not code:
raise Exception(_('No status code was found'))
c.code = code
c.message = content
return render('/derived/error/document.html')
def img(self, id):
"""Serve Pylons' stock images"""
return self._serve_file('/'.join(['media/img', id]))
def style(self, id):
"""Serve Pylons' stock stylesheets"""
return self._serve_file('/'.join(['media/style', id]))
def _serve_file(self, path):
"""Call Paste's FileApp (a WSGI application) to serve the file
at the specified path
"""
request.environ['PATH_INFO'] = '/%s' % path
return forward(PkgResourcesParser('pylons', 'pylons'))
| mit |
daltonmaag/robofab | Scripts/RoboFabIntro/intro_SimpleDrawing.py | 9 | 1202 | #FLM: RoboFab Intro, Simple Drawing
#
#
# demo of drawing with RoboFab
#
#
import robofab
from robofab.world import CurrentFont, CurrentGlyph
# (make sure you have a font opened in FontLab)
f = CurrentFont()
if f == None:
Message("You should open a font first, there's nothing to look at now!")
else:
newGlyph = f.newGlyph('demoDrawGlyph', clear=True)
newGlyph.width = 1000
# The drawing is done through a specialised pen object.
# There are pen objects for different purposes, this one
# will draw in a FontLab glyph. The point of this is that
# Robofab glyphs all respond to the standard set of
# pen methods, and it is a simple way to re-interpret the
# glyph data.
# Make a new pen with the new glyph we just made
pen = newGlyph.getPen()
# Tell the pen to draw things
pen.moveTo((100, 100))
pen.lineTo((800, 100))
pen.curveTo((1000, 300), (1000, 600), (800, 800))
pen.lineTo((100, 800))
pen.lineTo((100, 100))
# Done drawing: close the path
pen.closePath()
# Robofab objects still need to tell FontLab to update.
newGlyph.update()
f.update()
# go check the font, it should now contain a new glyph named
# "demoDrawGlyph" and it should look like a square.
| bsd-3-clause |
dzolnierz/percona-server | mysql-test/suite/tokudb/t/change_column_blob.py | 54 | 1409 | import sys
def main():
print "# this test is generated by change_blob.py"
print "# generate hot blob expansion test cases"
print "--disable_warnings"
print "DROP TABLE IF EXISTS t;"
print "--enable_warnings"
print "SET SESSION DEFAULT_STORAGE_ENGINE=\"TokuDB\";"
print "SET SESSION TOKUDB_DISABLE_SLOW_ALTER=1;"
gen_tests([ "TINY", "", "MEDIUM", "LONG" ], [ "NULL", "NOT NULL"])
return 0
def gen_tests(base_types, null_types):
for from_index in range(len(base_types)):
for to_index in range(len(base_types)):
for from_null in range(len(null_types)):
for to_null in range(len(null_types)):
print "CREATE TABLE t (a %sBLOB %s);" % (base_types[from_index], null_types[from_null])
print "--replace_regex /MariaDB/XYZ/ /MySQL/XYZ/"
print "--error ER_UNSUPPORTED_EXTENSION"
print "ALTER TABLE t CHANGE COLUMN a a %sTEXT %s;" % (base_types[to_index], null_types[to_null]);
if from_null != to_null or from_index > to_index:
print "--replace_regex /MariaDB/XYZ/ /MySQL/XYZ/"
print "--error ER_UNSUPPORTED_EXTENSION"
print "ALTER TABLE t CHANGE COLUMN a a %sBLOB %s;" % (base_types[to_index], null_types[to_null]);
print "DROP TABLE t;"
sys.exit(main())
| gpl-2.0 |
Gamebasis/3DGamebasisServer | GameData/blender-2.71-windows64/2.71/python/lib/fnmatch.py | 894 | 3163 | """Filename matching with shell patterns.
fnmatch(FILENAME, PATTERN) matches according to the local convention.
fnmatchcase(FILENAME, PATTERN) always takes case in account.
The functions operate by translating the pattern into a regular
expression. They cache the compiled regular expressions for speed.
The function translate(PATTERN) returns a regular expression
corresponding to PATTERN. (It does not compile it.)
"""
import os
import posixpath
import re
import functools
__all__ = ["filter", "fnmatch", "fnmatchcase", "translate"]
def fnmatch(name, pat):
"""Test whether FILENAME matches PATTERN.
Patterns are Unix shell style:
* matches everything
? matches any single character
[seq] matches any character in seq
[!seq] matches any char not in seq
An initial period in FILENAME is not special.
Both FILENAME and PATTERN are first case-normalized
if the operating system requires it.
If you don't want this, use fnmatchcase(FILENAME, PATTERN).
"""
name = os.path.normcase(name)
pat = os.path.normcase(pat)
return fnmatchcase(name, pat)
@functools.lru_cache(maxsize=256, typed=True)
def _compile_pattern(pat):
if isinstance(pat, bytes):
pat_str = str(pat, 'ISO-8859-1')
res_str = translate(pat_str)
res = bytes(res_str, 'ISO-8859-1')
else:
res = translate(pat)
return re.compile(res).match
def filter(names, pat):
"""Return the subset of the list NAMES that match PAT."""
result = []
pat = os.path.normcase(pat)
match = _compile_pattern(pat)
if os.path is posixpath:
# normcase on posix is NOP. Optimize it away from the loop.
for name in names:
if match(name):
result.append(name)
else:
for name in names:
if match(os.path.normcase(name)):
result.append(name)
return result
def fnmatchcase(name, pat):
"""Test whether FILENAME matches PATTERN, including case.
This is a version of fnmatch() which doesn't case-normalize
its arguments.
"""
match = _compile_pattern(pat)
return match(name) is not None
def translate(pat):
"""Translate a shell PATTERN to a regular expression.
There is no way to quote meta-characters.
"""
i, n = 0, len(pat)
res = ''
while i < n:
c = pat[i]
i = i+1
if c == '*':
res = res + '.*'
elif c == '?':
res = res + '.'
elif c == '[':
j = i
if j < n and pat[j] == '!':
j = j+1
if j < n and pat[j] == ']':
j = j+1
while j < n and pat[j] != ']':
j = j+1
if j >= n:
res = res + '\\['
else:
stuff = pat[i:j].replace('\\','\\\\')
i = j+1
if stuff[0] == '!':
stuff = '^' + stuff[1:]
elif stuff[0] == '^':
stuff = '\\' + stuff
res = '%s[%s]' % (res, stuff)
else:
res = res + re.escape(c)
return res + '\Z(?ms)'
| gpl-3.0 |
tntnatbry/tensorflow | tensorflow/tensorboard/lib/python/json_util_test.py | 47 | 2271 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.platform import googletest
from tensorflow.tensorboard.lib.python import json_util
_INFINITY = float('inf')
class FloatWrapperTest(googletest.TestCase):
def _assertWrapsAs(self, to_wrap, expected):
"""Asserts that |to_wrap| becomes |expected| when wrapped."""
actual = json_util.Cleanse(to_wrap)
for a, e in zip(actual, expected):
self.assertEqual(e, a)
def testWrapsPrimitives(self):
self._assertWrapsAs(_INFINITY, 'Infinity')
self._assertWrapsAs(-_INFINITY, '-Infinity')
self._assertWrapsAs(float('nan'), 'NaN')
def testWrapsObjectValues(self):
self._assertWrapsAs({'x': _INFINITY}, {'x': 'Infinity'})
def testWrapsObjectKeys(self):
self._assertWrapsAs({_INFINITY: 'foo'}, {'Infinity': 'foo'})
def testWrapsInListsAndTuples(self):
self._assertWrapsAs([_INFINITY], ['Infinity'])
# map() returns a list even if the argument is a tuple.
self._assertWrapsAs((_INFINITY,), ['Infinity',])
def testWrapsRecursively(self):
self._assertWrapsAs({'x': [_INFINITY]}, {'x': ['Infinity']})
def testTuple_turnsIntoList(self):
self.assertEqual(json_util.Cleanse(('a', 'b')), ['a', 'b'])
def testSet_turnsIntoSortedList(self):
self.assertEqual(json_util.Cleanse(set(['b', 'a'])), ['a', 'b'])
def testByteString_turnsIntoUnicodeString(self):
self.assertEqual(json_util.Cleanse(b'\xc2\xa3'), u'\u00a3') # is # sterling
if __name__ == '__main__':
googletest.main()
| apache-2.0 |
jeremiahyan/lammps | python/examples/mc.py | 15 | 2535 | #!/usr/bin/env python -i
# preceeding line should have path for Python on your machine
# mc.py
# Purpose: mimic operation of example/MC/in.mc via Python
# Syntax: mc.py in.mc
# in.mc = LAMMPS input script
import sys,random,math
# set these parameters
# make sure neigh skin (in in.mc) > 2*deltamove
nloop = 3000
deltaperturb = 0.2
deltamove = 0.1
kT = 0.05
random.seed(27848)
# parse command line
argv = sys.argv
if len(argv) != 2:
print "Syntax: mc.py in.mc"
sys.exit()
infile = sys.argv[1]
from lammps import lammps
lmp = lammps()
# run infile one line at a time
# just sets up MC problem
lines = open(infile,'r').readlines()
for line in lines: lmp.command(line)
lmp.command("variable e equal pe")
# run 0 to get energy of perfect lattice
# emin = minimum energy
lmp.command("run 0")
natoms = lmp.extract_global("natoms",0)
emin = lmp.extract_compute("thermo_pe",0,0) / natoms
lmp.command("variable emin equal $e")
# disorder the system
# estart = initial energy
x = lmp.extract_atom("x",3)
for i in xrange(natoms):
x[i][0] += deltaperturb * (2*random.random()-1)
x[i][1] += deltaperturb * (2*random.random()-1)
lmp.command("variable elast equal $e")
lmp.command("thermo_style custom step v_emin v_elast pe")
lmp.command("run 0")
x = lmp.extract_atom("x",3)
lmp.command("variable elast equal $e")
estart = lmp.extract_compute("thermo_pe",0,0) / natoms
# loop over Monte Carlo moves
# extract x after every run, in case reneighboring changed ptr in LAMMPS
elast = estart
naccept = 0
for i in xrange(nloop):
iatom = random.randrange(0,natoms)
x0 = x[iatom][0]
y0 = x[iatom][1]
x[iatom][0] += deltamove * (2*random.random()-1)
x[iatom][1] += deltamove * (2*random.random()-1)
lmp.command("run 1 pre no post no")
x = lmp.extract_atom("x",3)
e = lmp.extract_compute("thermo_pe",0,0) / natoms
if e <= elast:
elast = e
lmp.command("variable elast equal $e")
naccept += 1
elif random.random() <= math.exp(natoms*(elast-e)/kT):
elast = e
lmp.command("variable elast equal $e")
naccept += 1
else:
x[iatom][0] = x0
x[iatom][1] = y0
# final energy and stats
lmp.command("variable nbuild equal nbuild")
nbuild = lmp.extract_variable("nbuild",None,0)
lmp.command("run 0")
estop = lmp.extract_compute("thermo_pe",0,0) / natoms
print "MC stats:"
print " starting energy =",estart
print " final energy =",estop
print " minimum energy of perfect lattice =",emin
print " accepted MC moves =",naccept
print " neighbor list rebuilds =",nbuild
| gpl-2.0 |
x75/smq | smq/plot.py | 1 | 14408 | import numpy as np
import matplotlib.pyplot as pl
import pandas as pd
import seaborn as sns
import smq.logging as log
# check pandas, seaborne
# FIXME: fix hardcoded tablenames
from smq.utils import set_attr_from_dict
def get_data_from_item_log(items):
tbl_key = items[0].name
# print "%s.run: tbl_key = %s" % (self.__class__.__name__, tbl_key)
print "plot.get_data_from_item_log: tbl_key = %s" % (tbl_key)
df = log.log_lognodes[tbl_key]
data = df.values.T
columns = df.columns
return tbl_key, df, data, columns
class Plot(object):
def __init__(self, conf):
self.conf = conf
set_attr_from_dict(self, conf)
def run(self, items):
self.make_plot(items)
def make_plot(self, items):
print "%s.make_plot: implement me" % (self.__class__.__name__)
class PlotTimeseries(Plot):
def __init__(self, conf):
Plot.__init__(self, conf)
def run(self, items):
# how many axes / plotitems
# configure subplotgrid
tbl_key = items[0].name
# tbl_key = items[0].conf["name"]
print "tbl_key", tbl_key
df = log.log_lognodes[tbl_key]
# data = log.h5file.root.item_pm_data.read()
# data = log.log_lognodes["pm"].values.T
# columns = log.log_lognodes["pm"].columns
data = df.values.T
columns = df.columns
# print "data.shape", data.shape
pl.ioff()
# create figure
fig = pl.figure()
fig.suptitle("Experiment %s" % (log.h5file.title))
# fig.suptitle("Experiment %s" % (self.title))
for i in range(data.shape[0]): # loop over data items
ax1 = pl.subplot2grid((data.shape[0], 2), (i, 0))
ax1 = self.make_plot_timeseries(ax1, data[i], columns[i])
ax2 = pl.subplot2grid((data.shape[0], 2), (i, 1)) # second plotgrid column
ax2 = self.make_plot_histogram(ax2, data[i], columns[i])
# global for plot, use last axis
ax1.set_xlabel("t [steps]")
ax2.set_xlabel("counts")
# fig.show() # this doesn't work
pl.show()
def make_plot_timeseries(self, ax, data, columns):
ax.plot(data, "k-", alpha=0.5)
# print "columns[i]", type(columns[i])
ax.legend(["%s" % (columns)])
return ax
def make_plot_histogram(self, ax, data, columns):
ax.hist(data, bins=20, orientation="horizontal")
ax.legend(["%s" % (columns)])
# pl.hist(data.T, bins=20, orientation="horizontal")
return ax
# def make_plot(self, items):
# # print "log.h5file", log.h5file
# # print "dir(log.h5file)", dir(log.h5file)
# # print "blub", type(log.h5file.root.item_pm_data)
# # for item in log.h5file.root.item_pm_data:
# # print type(item)
# # print "log.h5file.root.item_pm_data", log.h5file.root.item_pm_data.read()
# # df = log.log_lognodes["pm"]
# # g = sns.FacetGrid(df, col=list(df.columns))
# # g.map(pl.plot, )
# # print "data.shape", data.shape
# for i,datum in enumerate(data):
# pl.subplot(data.shape[0], 2, (i*2)+1)
# # pl.title(columns[i])
# # sns.timeseries.tsplot(datum)
# pl.plot(datum, "k-", alpha=0.5)
# # print "columns[i]", type(columns[i])
# pl.legend(["%s" % (columns[i])])
# pl.xlabel("t [steps]")
# # pl.legend(["acc_p", "vel_e", "vel_", "pos_", "vel_goal", "dist_goal", "acc_pred", "m"])
# # pl.subplot(122)
# for i,datum in enumerate(data):
# pl.subplot(data.shape[0], 2, (i*2)+2)
# # print "dataum", datum
# pl.hist(datum, bins=20, orientation="horizontal")
# pl.legend(["%s" % (columns[i])])
# # pl.hist(data.T, bins=20, orientation="horizontal")
# pl.xlabel("counts")
# pl.show()
class PlotTimeseries2D(Plot):
def __init__(self, conf):
Plot.__init__(self, conf)
def run(self, items):
# FIXME: assuming len(items) == 1, which might be appropriate depending on the experiment
if items[0].dim_s_motor > 2:
print "more than two dimensions in data, plot is going to be incomplete"
return
tbl_key = items[0].name
# tbl_key = items[0].conf["name"]
print "%s.run: tbl_key = %s" % (self.__class__.__name__, tbl_key)
df = log.log_lognodes[tbl_key]
data = df.values.T
columns = df.columns
# print "columns", columns
# transform df to new df
if hasattr(self, "cols"):
cols = self.cols
else:
cols = ["vel%d" % (i) for i in range(items[0].dim_s_motor)]
cols += ["acc_pred%d" % (i) for i in range(items[0].dim_s_motor)]
df2 = df[cols]
# print df
# goal columns
if not hasattr(self, "cols_goal_base"):
setattr(self, "cols_goal_base", "vel_goal")
print "PlotTimeseries2D", self.cols, self.cols_goal_base
pl.ioff() #
goal_col_1 = "%s%d" % (self.cols_goal_base, 0)
goal_col_2 = "%s%d" % (self.cols_goal_base, 1)
if self.type == "pyplot":
# pl.plot(df["vel0"], df["vel1"], "ko")
# print df["vel0"].values.dtype
pl.subplot(131)
pl.title("state distribution and goal")
# print df["vel_goal0"].values, df["vel_goal1"].values
# pl.hist2d(df["vel0"].values, df["vel1"].values, bins=20)
pl.plot(df["%s%d" % (self.cols_goal_base, 0)].values[0],
df["%s%d" % (self.cols_goal_base, 1)].values[0], "ro", markersize=16, alpha=0.5)
pl.hexbin(df[self.cols[0]].values, df[self.cols[1]].values, gridsize = 30, marginals=True)
pl.plot(df[self.cols[0]].values, df[self.cols[1]].values, "k-", alpha=0.25, linewidth=1)
# pl.xlim((-1.2, 1.2))
# pl.ylim((-1.2, 1.2))
pl.grid()
pl.colorbar()
pl.subplot(132)
pl.title("prediction distribution")
pl.hexbin(df["acc_pred0"].values, df["acc_pred1"].values, gridsize = 30, marginals=True)
pl.xlim((-1.2, 1.2))
pl.ylim((-1.2, 1.2))
pl.colorbar()
pl.subplot(133)
pl.title("goal distance distribution")
pl.hist(df["dist_goal0"].values)
pl.show()
elif self.type == "seaborn":
print "goal", df[goal_col_1][0], df[goal_col_2][0]
ax = sns.jointplot(x=self.cols[0], y=self.cols[1], data=df)
print "ax", dir(ax)
# plot goal
print "df[goal_col_1][0], df[goal_col_2][0]", self.cols_goal_base, goal_col_1, goal_col_2, df[goal_col_1][0], df[goal_col_2][0]
ax.ax_joint.plot(df[goal_col_1][0], df[goal_col_2][0], "ro", alpha=0.5)
# pl.plot(df["vel_goal0"], df["vel_goal1"], "ro")
pl.show()
class PlotTimeseriesND(Plot):
"""Plot a hexbin scattermatrix for N-dim data"""
def __init__(self, conf):
Plot.__init__(self, conf)
def run(self, items):
pl.ioff()
tbl_key, df, data, columns = get_data_from_item_log(items)
# transform df to new df
if hasattr(self, "cols"):
cols = self.cols
else:
cols = ["vel%d" % (i) for i in range(items[0].dim_s_motor)]
cols += ["acc_pred%d" % (i) for i in range(items[0].dim_s_motor)]
df2 = df[cols]
print df2
# goal columns
if not hasattr(self, "cols_goal_base"):
setattr(self, "cols_goal_base", "vel_goal")
# pp = sns.pairplot(df2)
# for i in range(3):
# for j in range(3): # 1, 2; 0, 2; 0, 1
# if i == j:
# continue
# pp.axes[i,j].plot(df["vel_goal%d" % i][0], df["vel_goal%d" % j][0], "ro", alpha=0.5)
# # print pp.axes
# # for axset in pp.axes:
# # print "a", axset
# # for
# # print "dir(pp)", dir(pp)
# pl.show()
g = sns.PairGrid(df2)
g.map_diag(pl.hist)
g.map_offdiag(pl.hexbin, cmap="gray", gridsize=30, bins="log");
# print "dir(g)", dir(g)
# print g.diag_axes
# print g.axes
for i in range(items[0].dim_s_motor):
for j in range(items[0].dim_s_motor): # 1, 2; 0, 2; 0, 1
if i == j:
continue
# column gives x axis, row gives y axis, thus need to reverse the selection for plotting goal
g.axes[i,j].plot(df["%s%d" % (self.cols_goal_base, j)], df["%s%d" % (self.cols_goal_base, i)], "ro", alpha=0.5)
pl.show()
pl.hist(df["dist_goal0"].values, bins=20)
pl.show()
class PlotExplautoSimplearm(Plot):
def __init__(self, conf):
Plot.__init__(self, conf)
def make_plot(self, items):
print "items", items
pl.ioff()
tbl_key, df, data, columns = get_data_from_item_log(items)
motors = df[["j_ang%d" % i for i in range(items[0].dim_s_motor)]]
goals = df[["j_ang_goal%d" % i for i in range(items[0].dim_s_motor)]]
# print "df", motors, columns #, df
fig = pl.figure()
for i,item in enumerate(items):
# fig.suptitle("Experiment %s" % (log.h5file.title))
ax = fig.add_subplot(len(items), 1, i+1)
for m in motors.values:
# print "m", m
item.env.env.plot_arm(ax = ax, m = m)
print "plot goal", goals.values[0]
item.env.env.plot_arm(ax = ax, m = goals.values[0], c="r")
pl.show()
################################################################################
class PlotTimeseries2(Plot):
def __init__(self, conf):
Plot.__init__(self, conf)
def run(self, items):
# how many axes / plotitems
# configure subplotgrid
tbl_key = items[0].name
# tbl_key = items[0].conf["name"]
print "tbl_key", tbl_key
df = log.log_lognodes[tbl_key]
# data = log.h5file.root.item_pm_data.read()
# data = log.log_lognodes["pm"].values.T
# columns = log.log_lognodes["pm"].columns
data = df.values.T
columns = df.columns
# print "data.shape", data.shape
pl.ioff()
# create figure
fig = pl.figure()
fig.suptitle("Experiment %s, module %s" % (self.title, tbl_key))
for i in range(data.shape[0]): # loop over data items
ax1 = pl.subplot2grid((data.shape[0], 2), (i, 0))
ax1 = self.make_plot_timeseries(ax1, data[i], columns[i])
ax2 = pl.subplot2grid((data.shape[0], 2), (i, 1)) # second plotgrid column
ax2 = self.make_plot_histogram(ax2, data[i], columns[i])
# global for plot, use last axis
ax1.set_xlabel("t [steps]")
ax2.set_xlabel("counts")
# fig.show() # this doesn't work
pl.show()
def make_plot_timeseries(self, ax, data, columns):
ax.plot(data, "k-", alpha=0.5)
# print "columns[i]", type(columns[i])
ax.legend(["%s" % (columns)])
return ax
def make_plot_histogram(self, ax, data, columns):
ax.hist(data, bins=20, orientation="horizontal")
ax.legend(["%s" % (columns)])
# pl.hist(data.T, bins=20, orientation="horizontal")
return ax
class PlotTimeseriesNDrealtimeseries(Plot):
"""Plot a hexbin scattermatrix for N-dim data"""
def __init__(self, conf):
Plot.__init__(self, conf)
def run(self, items):
pl.ioff()
tbl_key, df, data, columns = get_data_from_item_log(items)
# transform df to new df
if hasattr(self, "cols"):
cols = self.cols
else:
cols = ["vel%d" % (i) for i in range(items[0].dim_s_motor)]
cols += ["acc_pred%d" % (i) for i in range(items[0].dim_s_motor)]
# FIXME: make generic
numplots = 1
cols_ext = []
for i in range(items[0].dim_s_extero):
colname = "pos_goal%d" % i
if colname in columns:
cols_ext += [colname]
numplots = 2
colname = "ee_pos%d" % i
if colname in columns:
cols_ext += [colname]
cols_error_prop = []
colnames_error_prop = ["avgerror_prop", "davgerror_prop", "avgderror_prop"]
for ec in colnames_error_prop:
if ec in columns:
# print "lalala", err_colname
cols_error_prop.append(ec)
cols_error_ext = []
colnames_error_ext = ["avgerror_ext", "davgerror_ext", "avgderror_ext"]
for ec in colnames_error_ext:
if ec in columns:
# print "lalala", err_colname
cols_error_ext.append(ec)
df2 = df[cols]
print df2
# goal columns
if not hasattr(self, "cols_goal_base"):
setattr(self, "cols_goal_base", "vel_goal")
pl.ioff()
# create figure
fig = pl.figure()
fig.suptitle("Experiment %s, module %s" % (self.title, tbl_key))
if numplots == 1:
pl.subplot(211)
else:
pl.subplot(411)
pl.title("Proprioceptive space")
x1 = df[cols].values
x2 = df[self.cols_goals].values
# print "x1.shape", x1.shape
x1plot = x1 + np.arange(x1.shape[1])
x2plot = x2 + np.arange(x2.shape[1])
print "x1plot.shape", x1plot.shape
pl.plot(x1plot)
pl.plot(x2plot)
if numplots == 1:
pl.subplot(212)
else: # numplots == 2:
pl.subplot(412)
pl.plot(df[cols_error_prop])
if numplots == 2:
pl.subplot(413)
pl.title("Exteroceptive space")
pl.plot(df[cols_ext])
print "cols_error_ext", cols_error_ext
pl.subplot(414)
pl.plot(df[cols_error_ext])
pl.show()
| mit |
PSPDFKit-labs/libdispatch | thirdparty/click/examples/repo/repo.py | 44 | 4802 | import os
import sys
import posixpath
import click
class Repo(object):
def __init__(self, home):
self.home = home
self.config = {}
self.verbose = False
def set_config(self, key, value):
self.config[key] = value
if self.verbose:
click.echo(' config[%s] = %s' % (key, value), file=sys.stderr)
def __repr__(self):
return '<Repo %r>' % self.home
pass_repo = click.make_pass_decorator(Repo)
@click.group()
@click.option('--repo-home', envvar='REPO_HOME', default='.repo',
metavar='PATH', help='Changes the repository folder location.')
@click.option('--config', nargs=2, multiple=True,
metavar='KEY VALUE', help='Overrides a config key/value pair.')
@click.option('--verbose', '-v', is_flag=True,
help='Enables verbose mode.')
@click.version_option('1.0')
@click.pass_context
def cli(ctx, repo_home, config, verbose):
"""Repo is a command line tool that showcases how to build complex
command line interfaces with Click.
This tool is supposed to look like a distributed version control
system to show how something like this can be structured.
"""
# Create a repo object and remember it as as the context object. From
# this point onwards other commands can refer to it by using the
# @pass_repo decorator.
ctx.obj = Repo(os.path.abspath(repo_home))
ctx.obj.verbose = verbose
for key, value in config:
ctx.obj.set_config(key, value)
@cli.command()
@click.argument('src')
@click.argument('dest', required=False)
@click.option('--shallow/--deep', default=False,
help='Makes a checkout shallow or deep. Deep by default.')
@click.option('--rev', '-r', default='HEAD',
help='Clone a specific revision instead of HEAD.')
@pass_repo
def clone(repo, src, dest, shallow, rev):
"""Clones a repository.
This will clone the repository at SRC into the folder DEST. If DEST
is not provided this will automatically use the last path component
of SRC and create that folder.
"""
if dest is None:
dest = posixpath.split(src)[-1] or '.'
click.echo('Cloning repo %s to %s' % (src, os.path.abspath(dest)))
repo.home = dest
if shallow:
click.echo('Making shallow checkout')
click.echo('Checking out revision %s' % rev)
@cli.command()
@click.confirmation_option()
@pass_repo
def delete(repo):
"""Deletes a repository.
This will throw away the current repository.
"""
click.echo('Destroying repo %s' % repo.home)
click.echo('Deleted!')
@cli.command()
@click.option('--username', prompt=True,
help='The developer\'s shown username.')
@click.option('--email', prompt='E-Mail',
help='The developer\'s email address')
@click.password_option(help='The login password.')
@pass_repo
def setuser(repo, username, email, password):
"""Sets the user credentials.
This will override the current user config.
"""
repo.set_config('username', username)
repo.set_config('email', email)
repo.set_config('password', '*' * len(password))
click.echo('Changed credentials.')
@cli.command()
@click.option('--message', '-m', multiple=True,
help='The commit message. If provided multiple times each '
'argument gets converted into a new line.')
@click.argument('files', nargs=-1, type=click.Path())
@pass_repo
def commit(repo, files, message):
"""Commits outstanding changes.
Commit changes to the given files into the repository. You will need to
"repo push" to push up your changes to other repositories.
If a list of files is omitted, all changes reported by "repo status"
will be committed.
"""
if not message:
marker = '# Files to be committed:'
hint = ['', '', marker, '#']
for file in files:
hint.append('# U %s' % file)
message = click.edit('\n'.join(hint))
if message is None:
click.echo('Aborted!')
return
msg = message.split(marker)[0].rstrip()
if not msg:
click.echo('Aborted! Empty commit message')
return
else:
msg = '\n'.join(message)
click.echo('Files to be committed: %s' % (files,))
click.echo('Commit message:\n' + msg)
@cli.command(short_help='Copies files.')
@click.option('--force', is_flag=True,
help='forcibly copy over an existing managed file')
@click.argument('src', nargs=-1, type=click.Path())
@click.argument('dst', type=click.Path())
@pass_repo
def copy(repo, src, dst, force):
"""Copies one or multiple files to a new location. This copies all
files from SRC to DST.
"""
for fn in src:
click.echo('Copy from %s -> %s' % (fn, dst))
| apache-2.0 |
sorenk/ansible | lib/ansible/modules/network/f5/bigip_pool_member.py | 20 | 18557 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2017 F5 Networks Inc.
# Copyright (c) 2013 Matt Hite <[email protected]>
# GNU General Public License v3.0 (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: bigip_pool_member
short_description: Manages F5 BIG-IP LTM pool members
description:
- Manages F5 BIG-IP LTM pool members via iControl SOAP API.
version_added: 1.4
author:
- Matt Hite (@mhite)
- Tim Rupp (@caphrim007)
notes:
- Requires BIG-IP software version >= 11
- F5 developed module 'bigsuds' required (see http://devcentral.f5.com)
- Best run as a local_action in your playbook
- Supersedes bigip_pool for managing pool members
requirements:
- bigsuds
options:
state:
description:
- Pool member state.
required: True
default: present
choices:
- present
- absent
session_state:
description:
- Set new session availability status for pool member.
version_added: 2.0
choices:
- enabled
- disabled
monitor_state:
description:
- Set monitor availability status for pool member.
version_added: 2.0
choices:
- enabled
- disabled
pool:
description:
- Pool name. This pool must exist.
required: True
partition:
description:
- Partition
default: Common
host:
description:
- Pool member IP.
required: True
aliases:
- address
- name
port:
description:
- Pool member port.
required: True
connection_limit:
description:
- Pool member connection limit. Setting this to 0 disables the limit.
description:
description:
- Pool member description.
rate_limit:
description:
- Pool member rate limit (connections-per-second). Setting this to 0
disables the limit.
ratio:
description:
- Pool member ratio weight. Valid values range from 1 through 100.
New pool members -- unless overridden with this value -- default
to 1.
preserve_node:
description:
- When state is absent and the pool member is no longer referenced
in other pools, the default behavior removes the unused node
o bject. Setting this to 'yes' disables this behavior.
default: no
choices:
- yes
- no
version_added: 2.1
priority_group:
description:
- Specifies a number representing the priority group for the pool member.
- When adding a new member, the default is 0, meaning that the member has no priority.
- To specify a priority, you must activate priority group usage when you
create a new pool or when adding or removing pool members. When activated,
the system load balances traffic according to the priority group number
assigned to the pool member.
- The higher the number, the higher the priority, so a member with a priority
of 3 has higher priority than a member with a priority of 1.
version_added: 2.5
extends_documentation_fragment: f5
'''
EXAMPLES = '''
- name: Add pool member
bigip_pool_member:
server: lb.mydomain.com
user: admin
password: secret
state: present
pool: my-pool
partition: Common
host: "{{ ansible_default_ipv4['address'] }}"
port: 80
description: web server
connection_limit: 100
rate_limit: 50
ratio: 2
delegate_to: localhost
- name: Modify pool member ratio and description
bigip_pool_member:
server: lb.mydomain.com
user: admin
password: secret
state: present
pool: my-pool
partition: Common
host: "{{ ansible_default_ipv4['address'] }}"
port: 80
ratio: 1
description: nginx server
delegate_to: localhost
- name: Remove pool member from pool
bigip_pool_member:
server: lb.mydomain.com
user: admin
password: secret
state: absent
pool: my-pool
partition: Common
host: "{{ ansible_default_ipv4['address'] }}"
port: 80
delegate_to: localhost
# The BIG-IP GUI doesn't map directly to the API calls for "Pool ->
# Members -> State". The following states map to API monitor
# and session states.
#
# Enabled (all traffic allowed):
# monitor_state=enabled, session_state=enabled
# Disabled (only persistent or active connections allowed):
# monitor_state=enabled, session_state=disabled
# Forced offline (only active connections allowed):
# monitor_state=disabled, session_state=disabled
#
# See https://devcentral.f5.com/questions/icontrol-equivalent-call-for-b-node-down
- name: Force pool member offline
bigip_pool_member:
server: lb.mydomain.com
user: admin
password: secret
state: present
session_state: disabled
monitor_state: disabled
pool: my-pool
partition: Common
host: "{{ ansible_default_ipv4['address'] }}"
port: 80
delegate_to: localhost
'''
try:
import bigsuds
HAS_BIGSUDS = True
except ImportError:
pass # Handled by f5_utils.bigsuds_found
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.basic import env_fallback
from ansible.module_utils.f5_utils import bigip_api, bigsuds_found
HAS_DEVEL_IMPORTS = False
try:
from library.module_utils.network.f5.common import f5_argument_spec
from library.module_utils.network.f5.common import fqdn_name
HAS_DEVEL_IMPORTS = True
except ImportError:
from ansible.module_utils.network.f5.common import fqdn_name
from ansible.module_utils.network.f5.common import f5_argument_spec
def pool_exists(api, pool):
# hack to determine if pool exists
result = False
try:
api.LocalLB.Pool.get_object_status(pool_names=[pool])
result = True
except bigsuds.OperationFailed as e:
if "was not found" in str(e):
result = False
else:
# genuine exception
raise
return result
def member_exists(api, pool, address, port):
# hack to determine if member exists
result = False
try:
members = [{'address': address, 'port': port}]
api.LocalLB.Pool.get_member_object_status(pool_names=[pool],
members=[members])
result = True
except bigsuds.OperationFailed as e:
if "was not found" in str(e):
result = False
else:
# genuine exception
raise
return result
def delete_node_address(api, address):
result = False
try:
api.LocalLB.NodeAddressV2.delete_node_address(nodes=[address])
result = True
except bigsuds.OperationFailed as e:
if "is referenced by a member of pool" in str(e):
result = False
else:
# genuine exception
raise
return result
def remove_pool_member(api, pool, address, port):
members = [{'address': address, 'port': port}]
api.LocalLB.Pool.remove_member_v2(
pool_names=[pool],
members=[members]
)
def add_pool_member(api, pool, address, port):
members = [{'address': address, 'port': port}]
api.LocalLB.Pool.add_member_v2(
pool_names=[pool],
members=[members]
)
def get_connection_limit(api, pool, address, port):
members = [{'address': address, 'port': port}]
result = api.LocalLB.Pool.get_member_connection_limit(
pool_names=[pool],
members=[members]
)[0][0]
return result
def set_connection_limit(api, pool, address, port, limit):
members = [{'address': address, 'port': port}]
api.LocalLB.Pool.set_member_connection_limit(
pool_names=[pool],
members=[members],
limits=[[limit]]
)
def get_description(api, pool, address, port):
members = [{'address': address, 'port': port}]
result = api.LocalLB.Pool.get_member_description(
pool_names=[pool],
members=[members]
)[0][0]
return result
def set_description(api, pool, address, port, description):
members = [{'address': address, 'port': port}]
api.LocalLB.Pool.set_member_description(
pool_names=[pool],
members=[members],
descriptions=[[description]]
)
def get_rate_limit(api, pool, address, port):
members = [{'address': address, 'port': port}]
result = api.LocalLB.Pool.get_member_rate_limit(
pool_names=[pool],
members=[members]
)[0][0]
return result
def set_rate_limit(api, pool, address, port, limit):
members = [{'address': address, 'port': port}]
api.LocalLB.Pool.set_member_rate_limit(
pool_names=[pool],
members=[members],
limits=[[limit]]
)
def get_ratio(api, pool, address, port):
members = [{'address': address, 'port': port}]
result = api.LocalLB.Pool.get_member_ratio(
pool_names=[pool],
members=[members]
)[0][0]
return result
def set_ratio(api, pool, address, port, ratio):
members = [{'address': address, 'port': port}]
api.LocalLB.Pool.set_member_ratio(
pool_names=[pool],
members=[members],
ratios=[[ratio]]
)
def get_priority_group(api, pool, address, port):
members = [{'address': address, 'port': port}]
result = api.LocalLB.Pool.get_member_priority(
pool_names=[pool],
members=[members]
)[0][0]
return result
def set_priority_group(api, pool, address, port, priority_group):
members = [{'address': address, 'port': port}]
api.LocalLB.Pool.set_member_priority(
pool_names=[pool],
members=[members],
priorities=[[priority_group]]
)
def set_member_session_enabled_state(api, pool, address, port, session_state):
members = [{'address': address, 'port': port}]
session_state = ["STATE_%s" % session_state.strip().upper()]
api.LocalLB.Pool.set_member_session_enabled_state(
pool_names=[pool],
members=[members],
session_states=[session_state]
)
def get_member_session_status(api, pool, address, port):
members = [{'address': address, 'port': port}]
result = api.LocalLB.Pool.get_member_session_status(
pool_names=[pool],
members=[members]
)[0][0]
result = result.split("SESSION_STATUS_")[-1].lower()
return result
def set_member_monitor_state(api, pool, address, port, monitor_state):
members = [{'address': address, 'port': port}]
monitor_state = ["STATE_%s" % monitor_state.strip().upper()]
api.LocalLB.Pool.set_member_monitor_state(
pool_names=[pool],
members=[members],
monitor_states=[monitor_state]
)
def get_member_monitor_status(api, pool, address, port):
members = [{'address': address, 'port': port}]
result = api.LocalLB.Pool.get_member_monitor_status(
pool_names=[pool],
members=[members]
)[0][0]
result = result.split("MONITOR_STATUS_")[-1].lower()
return result
def main():
result = {}
argument_spec = f5_argument_spec
meta_args = dict(
session_state=dict(type='str', choices=['enabled', 'disabled']),
monitor_state=dict(type='str', choices=['enabled', 'disabled']),
pool=dict(type='str', required=True),
host=dict(type='str', required=True, aliases=['address', 'name']),
port=dict(type='int', required=True),
connection_limit=dict(type='int'),
description=dict(type='str'),
rate_limit=dict(type='int'),
ratio=dict(type='int'),
preserve_node=dict(type='bool', default=False),
priority_group=dict(type='int'),
state=dict(default='present', choices=['absent', 'present']),
partition=dict(
default='Common',
fallback=(env_fallback, ['F5_PARTITION'])
)
)
argument_spec.update(meta_args)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True
)
if not bigsuds_found:
module.fail_json(msg="the python bigsuds module is required")
if module.params['validate_certs']:
import ssl
if not hasattr(ssl, 'SSLContext'):
module.fail_json(
msg='bigsuds does not support verifying certificates with python < 2.7.9. '
'Either update python or set validate_certs=False on the task')
server = module.params['server']
server_port = module.params['server_port']
user = module.params['user']
password = module.params['password']
state = module.params['state']
partition = module.params['partition']
validate_certs = module.params['validate_certs']
session_state = module.params['session_state']
monitor_state = module.params['monitor_state']
pool = fqdn_name(partition, module.params['pool'])
connection_limit = module.params['connection_limit']
description = module.params['description']
rate_limit = module.params['rate_limit']
ratio = module.params['ratio']
priority_group = module.params['priority_group']
host = module.params['host']
address = fqdn_name(partition, host)
port = module.params['port']
preserve_node = module.params['preserve_node']
if (host and port is None) or (port is not None and not host):
module.fail_json(msg="both host and port must be supplied")
if 0 > port or port > 65535:
module.fail_json(msg="valid ports must be in range 0 - 65535")
try:
api = bigip_api(server, user, password, validate_certs, port=server_port)
if not pool_exists(api, pool):
module.fail_json(msg="pool %s does not exist" % pool)
result = {'changed': False} # default
if state == 'absent':
if member_exists(api, pool, address, port):
if not module.check_mode:
remove_pool_member(api, pool, address, port)
if preserve_node:
result = {'changed': True}
else:
deleted = delete_node_address(api, address)
result = {'changed': True, 'deleted': deleted}
else:
result = {'changed': True}
elif state == 'present':
if not member_exists(api, pool, address, port):
if not module.check_mode:
add_pool_member(api, pool, address, port)
if connection_limit is not None:
set_connection_limit(api, pool, address, port, connection_limit)
if description is not None:
set_description(api, pool, address, port, description)
if rate_limit is not None:
set_rate_limit(api, pool, address, port, rate_limit)
if ratio is not None:
set_ratio(api, pool, address, port, ratio)
if session_state is not None:
set_member_session_enabled_state(api, pool, address, port, session_state)
if monitor_state is not None:
set_member_monitor_state(api, pool, address, port, monitor_state)
if priority_group is not None:
set_priority_group(api, pool, address, port, priority_group)
result = {'changed': True}
else:
# pool member exists -- potentially modify attributes
if connection_limit is not None and connection_limit != get_connection_limit(api, pool, address, port):
if not module.check_mode:
set_connection_limit(api, pool, address, port, connection_limit)
result = {'changed': True}
if description is not None and description != get_description(api, pool, address, port):
if not module.check_mode:
set_description(api, pool, address, port, description)
result = {'changed': True}
if rate_limit is not None and rate_limit != get_rate_limit(api, pool, address, port):
if not module.check_mode:
set_rate_limit(api, pool, address, port, rate_limit)
result = {'changed': True}
if ratio is not None and ratio != get_ratio(api, pool, address, port):
if not module.check_mode:
set_ratio(api, pool, address, port, ratio)
result = {'changed': True}
if session_state is not None:
session_status = get_member_session_status(api, pool, address, port)
if session_state == 'enabled' and session_status == 'forced_disabled':
if not module.check_mode:
set_member_session_enabled_state(api, pool, address, port, session_state)
result = {'changed': True}
elif session_state == 'disabled' and session_status != 'forced_disabled':
if not module.check_mode:
set_member_session_enabled_state(api, pool, address, port, session_state)
result = {'changed': True}
if monitor_state is not None:
monitor_status = get_member_monitor_status(api, pool, address, port)
if monitor_state == 'enabled' and monitor_status == 'forced_down':
if not module.check_mode:
set_member_monitor_state(api, pool, address, port, monitor_state)
result = {'changed': True}
elif monitor_state == 'disabled' and monitor_status != 'forced_down':
if not module.check_mode:
set_member_monitor_state(api, pool, address, port, monitor_state)
result = {'changed': True}
if priority_group is not None and priority_group != get_priority_group(api, pool, address, port):
if not module.check_mode:
set_priority_group(api, pool, address, port, priority_group)
result = {'changed': True}
except Exception as e:
module.fail_json(msg="received exception: %s" % e)
module.exit_json(**result)
if __name__ == '__main__':
main()
| gpl-3.0 |
fujunwei/chromium-crosswalk | tools/json_schema_compiler/idl_schema_test.py | 32 | 15807 | #!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import idl_schema
import unittest
from json_parse import OrderedDict
def getFunction(schema, name):
for item in schema['functions']:
if item['name'] == name:
return item
raise KeyError('Missing function %s' % name)
def getParams(schema, name):
function = getFunction(schema, name)
return function['parameters']
def getReturns(schema, name):
function = getFunction(schema, name)
return function['returns']
def getType(schema, id):
for item in schema['types']:
if item['id'] == id:
return item
class IdlSchemaTest(unittest.TestCase):
def setUp(self):
loaded = idl_schema.Load('test/idl_basics.idl')
self.assertEquals(1, len(loaded))
self.assertEquals('idl_basics', loaded[0]['namespace'])
self.idl_basics = loaded[0]
self.maxDiff = None
def testSimpleCallbacks(self):
schema = self.idl_basics
expected = [{'type': 'function', 'name': 'cb', 'parameters':[]}]
self.assertEquals(expected, getParams(schema, 'function4'))
expected = [{'type': 'function', 'name': 'cb',
'parameters':[{'name': 'x', 'type': 'integer'}]}]
self.assertEquals(expected, getParams(schema, 'function5'))
expected = [{'type': 'function', 'name': 'cb',
'parameters':[{'name': 'arg', '$ref': 'MyType1'}]}]
self.assertEquals(expected, getParams(schema, 'function6'))
def testCallbackWithArrayArgument(self):
schema = self.idl_basics
expected = [{'type': 'function', 'name': 'cb',
'parameters':[{'name': 'arg', 'type': 'array',
'items':{'$ref': 'MyType2'}}]}]
self.assertEquals(expected, getParams(schema, 'function12'))
def testArrayOfCallbacks(self):
schema = idl_schema.Load('test/idl_function_types.idl')[0]
expected = [{'type': 'array', 'name': 'callbacks',
'items':{'type': 'function', 'name': 'MyCallback',
'parameters':[{'type': 'integer', 'name': 'x'}]}}]
self.assertEquals(expected, getParams(schema, 'whatever'))
def testLegalValues(self):
self.assertEquals({
'x': {'name': 'x', 'type': 'integer', 'enum': [1,2],
'description': 'This comment tests "double-quotes".'},
'y': {'name': 'y', 'type': 'string'},
'z': {'name': 'z', 'type': 'string'},
'a': {'name': 'a', 'type': 'string'},
'b': {'name': 'b', 'type': 'string'},
'c': {'name': 'c', 'type': 'string'}},
getType(self.idl_basics, 'MyType1')['properties'])
def testMemberOrdering(self):
self.assertEquals(
['x', 'y', 'z', 'a', 'b', 'c'],
getType(self.idl_basics, 'MyType1')['properties'].keys())
def testEnum(self):
schema = self.idl_basics
expected = {'enum': [{'name': 'name1', 'description': 'comment1'},
{'name': 'name2'}],
'description': 'Enum description',
'type': 'string', 'id': 'EnumType'}
self.assertEquals(expected, getType(schema, expected['id']))
expected = [{'name': 'type', '$ref': 'EnumType'},
{'type': 'function', 'name': 'cb',
'parameters':[{'name': 'type', '$ref': 'EnumType'}]}]
self.assertEquals(expected, getParams(schema, 'function13'))
expected = [{'items': {'$ref': 'EnumType'}, 'name': 'types',
'type': 'array'}]
self.assertEquals(expected, getParams(schema, 'function14'))
def testScopedArguments(self):
schema = self.idl_basics
expected = [{'name': 'value', '$ref': 'idl_other_namespace.SomeType'}]
self.assertEquals(expected, getParams(schema, 'function20'))
expected = [{'items': {'$ref': 'idl_other_namespace.SomeType'},
'name': 'values',
'type': 'array'}]
self.assertEquals(expected, getParams(schema, 'function21'))
expected = [{'name': 'value',
'$ref': 'idl_other_namespace.sub_namespace.AnotherType'}]
self.assertEquals(expected, getParams(schema, 'function22'))
expected = [{'items': {'$ref': 'idl_other_namespace.sub_namespace.'
'AnotherType'},
'name': 'values',
'type': 'array'}]
self.assertEquals(expected, getParams(schema, 'function23'))
def testNoCompile(self):
schema = self.idl_basics
func = getFunction(schema, 'function15')
self.assertTrue(func is not None)
self.assertTrue(func['nocompile'])
def testNoDocOnEnum(self):
schema = self.idl_basics
enum_with_nodoc = getType(schema, 'EnumTypeWithNoDoc')
self.assertTrue(enum_with_nodoc is not None)
self.assertTrue(enum_with_nodoc['nodoc'])
def testInternalNamespace(self):
idl_basics = self.idl_basics
self.assertEquals('idl_basics', idl_basics['namespace'])
self.assertTrue(idl_basics['internal'])
self.assertFalse(idl_basics['nodoc'])
def testReturnTypes(self):
schema = self.idl_basics
self.assertEquals({'name': 'function24', 'type': 'integer'},
getReturns(schema, 'function24'))
self.assertEquals({'name': 'function25', '$ref': 'MyType1',
'optional': True},
getReturns(schema, 'function25'))
self.assertEquals({'name': 'function26', 'type': 'array',
'items': {'$ref': 'MyType1'}},
getReturns(schema, 'function26'))
self.assertEquals({'name': 'function27', '$ref': 'EnumType',
'optional': True},
getReturns(schema, 'function27'))
self.assertEquals({'name': 'function28', 'type': 'array',
'items': {'$ref': 'EnumType'}},
getReturns(schema, 'function28'))
self.assertEquals({'name': 'function29', '$ref':
'idl_other_namespace.SomeType',
'optional': True},
getReturns(schema, 'function29'))
self.assertEquals({'name': 'function30', 'type': 'array',
'items': {'$ref': 'idl_other_namespace.SomeType'}},
getReturns(schema, 'function30'))
def testChromeOSPlatformsNamespace(self):
schema = idl_schema.Load('test/idl_namespace_chromeos.idl')[0]
self.assertEquals('idl_namespace_chromeos', schema['namespace'])
expected = ['chromeos']
self.assertEquals(expected, schema['platforms'])
def testAllPlatformsNamespace(self):
schema = idl_schema.Load('test/idl_namespace_all_platforms.idl')[0]
self.assertEquals('idl_namespace_all_platforms', schema['namespace'])
expected = ['chromeos', 'chromeos_touch', 'linux', 'mac', 'win']
self.assertEquals(expected, schema['platforms'])
def testNonSpecificPlatformsNamespace(self):
schema = idl_schema.Load('test/idl_namespace_non_specific_platforms.idl')[0]
self.assertEquals('idl_namespace_non_specific_platforms',
schema['namespace'])
expected = None
self.assertEquals(expected, schema['platforms'])
def testSpecificImplementNamespace(self):
schema = idl_schema.Load('test/idl_namespace_specific_implement.idl')[0]
self.assertEquals('idl_namespace_specific_implement',
schema['namespace'])
expected = 'idl_namespace_specific_implement.idl'
self.assertEquals(expected, schema['compiler_options']['implemented_in'])
def testSpecificImplementOnChromeOSNamespace(self):
schema = idl_schema.Load(
'test/idl_namespace_specific_implement_chromeos.idl')[0]
self.assertEquals('idl_namespace_specific_implement_chromeos',
schema['namespace'])
expected_implemented_path = 'idl_namespace_specific_implement_chromeos.idl'
expected_platform = ['chromeos']
self.assertEquals(expected_implemented_path,
schema['compiler_options']['implemented_in'])
self.assertEquals(expected_platform, schema['platforms'])
def testCallbackComment(self):
schema = self.idl_basics
self.assertEquals('A comment on a callback.',
getParams(schema, 'function16')[0]['description'])
self.assertEquals(
'A parameter.',
getParams(schema, 'function16')[0]['parameters'][0]['description'])
self.assertEquals(
'Just a parameter comment, with no comment on the callback.',
getParams(schema, 'function17')[0]['parameters'][0]['description'])
self.assertEquals(
'Override callback comment.',
getParams(schema, 'function18')[0]['description'])
def testFunctionComment(self):
schema = self.idl_basics
func = getFunction(schema, 'function3')
self.assertEquals(('This comment should appear in the documentation, '
'despite occupying multiple lines.'),
func['description'])
self.assertEquals(
[{'description': ('So should this comment about the argument. '
'<em>HTML</em> is fine too.'),
'name': 'arg',
'$ref': 'MyType1'}],
func['parameters'])
func = getFunction(schema, 'function4')
self.assertEquals(
'<p>This tests if "double-quotes" are escaped correctly.</p>'
'<p>It also tests a comment with two newlines.</p>',
func['description'])
def testReservedWords(self):
schema = idl_schema.Load('test/idl_reserved_words.idl')[0]
foo_type = getType(schema, 'Foo')
self.assertEquals([{'name': 'float'}, {'name': 'DOMString'}],
foo_type['enum'])
enum_type = getType(schema, 'enum')
self.assertEquals([{'name': 'callback'}, {'name': 'namespace'}],
enum_type['enum'])
dictionary = getType(schema, 'dictionary')
self.assertEquals('integer', dictionary['properties']['long']['type'])
mytype = getType(schema, 'MyType')
self.assertEquals('string', mytype['properties']['interface']['type'])
params = getParams(schema, 'static')
self.assertEquals('Foo', params[0]['$ref'])
self.assertEquals('enum', params[1]['$ref'])
def testObjectTypes(self):
schema = idl_schema.Load('test/idl_object_types.idl')[0]
foo_type = getType(schema, 'FooType')
self.assertEquals('object', foo_type['type'])
self.assertEquals('integer', foo_type['properties']['x']['type'])
self.assertEquals('object', foo_type['properties']['y']['type'])
self.assertEquals(
'any',
foo_type['properties']['y']['additionalProperties']['type'])
self.assertEquals('object', foo_type['properties']['z']['type'])
self.assertEquals(
'any',
foo_type['properties']['z']['additionalProperties']['type'])
self.assertEquals('Window', foo_type['properties']['z']['isInstanceOf'])
bar_type = getType(schema, 'BarType')
self.assertEquals('object', bar_type['type'])
self.assertEquals('any', bar_type['properties']['x']['type'])
def testObjectTypesInFunctions(self):
schema = idl_schema.Load('test/idl_object_types.idl')[0]
params = getParams(schema, 'objectFunction1')
self.assertEquals('object', params[0]['type'])
self.assertEquals('any', params[0]['additionalProperties']['type'])
self.assertEquals('ImageData', params[0]['isInstanceOf'])
params = getParams(schema, 'objectFunction2')
self.assertEquals('any', params[0]['type'])
def testObjectTypesWithOptionalFields(self):
schema = idl_schema.Load('test/idl_object_types.idl')[0]
baz_type = getType(schema, 'BazType')
self.assertEquals(True, baz_type['properties']['x']['optional'])
self.assertEquals('integer', baz_type['properties']['x']['type'])
self.assertEquals(True, baz_type['properties']['foo']['optional'])
self.assertEquals('FooType', baz_type['properties']['foo']['$ref'])
def testObjectTypesWithUnions(self):
schema = idl_schema.Load('test/idl_object_types.idl')[0]
union_type = getType(schema, 'UnionType')
expected = {
'type': 'object',
'id': 'UnionType',
'properties': {
'x': {
'name': 'x',
'optional': True,
'choices': [
{'type': 'integer'},
{'$ref': 'FooType'},
]
},
'y': {
'name': 'y',
'choices': [
{'type': 'string'},
{'type': 'object',
'additionalProperties': {'type': 'any'}}
]
},
'z': {
'name': 'z',
'choices': [
{'type': 'object', 'isInstanceOf': 'ImageData',
'additionalProperties': {'type': 'any'}},
{'type': 'integer'}
]
}
},
}
self.assertEquals(expected, union_type)
def testUnionsWithModifiers(self):
schema = idl_schema.Load('test/idl_object_types.idl')[0]
union_type = getType(schema, 'ModifiedUnionType')
expected = {
'type': 'object',
'id': 'ModifiedUnionType',
'properties': {
'x': {
'name': 'x',
'nodoc': True,
'choices': [
{'type': 'integer'},
{'type': 'string'}
]
}
}
}
self.assertEquals(expected, union_type)
def testUnionsWithFunctions(self):
schema = idl_schema.Load('test/idl_function_types.idl')[0]
union_params = getParams(schema, 'union_params')
expected = [{
'name': 'x',
'choices': [
{'type': 'integer'},
{'type': 'string'}
]
}]
self.assertEquals(expected, union_params)
def testUnionsWithCallbacks(self):
schema = idl_schema.Load('test/idl_function_types.idl')[0]
blah_params = getParams(schema, 'blah')
expected = [{
'type': 'function', 'name': 'callback', 'parameters': [{
'name': 'x',
'choices': [
{'type': 'integer'},
{'type': 'string'}
]}
]
}]
self.assertEquals(expected, blah_params)
badabish_params = getParams(schema, 'badabish')
expected = [{
'type': 'function', 'name': 'callback', 'parameters': [{
'name': 'x', 'optional': True, 'choices': [
{'type': 'integer'},
{'type': 'string'}
]
}]
}]
self.assertEquals(expected, badabish_params)
def testProperties(self):
schema = idl_schema.Load('test/idl_properties.idl')[0]
self.assertEquals(OrderedDict([
('first', OrderedDict([
('description', 'Integer property.'),
('type', 'integer'),
('value', 42),
])),
('second', OrderedDict([
('description', 'Double property.'),
('type', 'number'),
('value', 42.0),
])),
('third', OrderedDict([
('description', 'String property.'),
('type', 'string'),
('value', 'hello world'),
])),
('fourth', OrderedDict([
('description', 'Unvalued property.'),
('type', 'integer'),
])),
]), schema.get('properties'))
if __name__ == '__main__':
unittest.main()
| bsd-3-clause |
naousse/odoo | addons/mail/controllers/main.py | 383 | 1733 | import base64
import psycopg2
import openerp
from openerp import SUPERUSER_ID
from openerp import http
from openerp.http import request
from openerp.addons.web.controllers.main import content_disposition
import mimetypes
class MailController(http.Controller):
_cp_path = '/mail'
@http.route('/mail/download_attachment', type='http', auth='user')
def download_attachment(self, model, id, method, attachment_id, **kw):
# FIXME use /web/binary/saveas directly
Model = request.registry.get(model)
res = getattr(Model, method)(request.cr, request.uid, int(id), int(attachment_id))
if res:
filecontent = base64.b64decode(res.get('base64'))
filename = res.get('filename')
content_type = mimetypes.guess_type(filename)
if filecontent and filename:
return request.make_response(
filecontent,
headers=[('Content-Type', content_type[0] or 'application/octet-stream'),
('Content-Disposition', content_disposition(filename))])
return request.not_found()
@http.route('/mail/receive', type='json', auth='none')
def receive(self, req):
""" End-point to receive mail from an external SMTP server. """
dbs = req.jsonrequest.get('databases')
for db in dbs:
message = dbs[db].decode('base64')
try:
registry = openerp.registry(db)
with registry.cursor() as cr:
mail_thread = registry['mail.thread']
mail_thread.message_process(cr, SUPERUSER_ID, None, message)
except psycopg2.Error:
pass
return True
| agpl-3.0 |
sriki18/scipy | scipy/interpolate/ndgriddata.py | 39 | 7457 | """
Convenience interface to N-D interpolation
.. versionadded:: 0.9
"""
from __future__ import division, print_function, absolute_import
import numpy as np
from .interpnd import LinearNDInterpolator, NDInterpolatorBase, \
CloughTocher2DInterpolator, _ndim_coords_from_arrays
from scipy.spatial import cKDTree
__all__ = ['griddata', 'NearestNDInterpolator', 'LinearNDInterpolator',
'CloughTocher2DInterpolator']
#------------------------------------------------------------------------------
# Nearest-neighbour interpolation
#------------------------------------------------------------------------------
class NearestNDInterpolator(NDInterpolatorBase):
"""
NearestNDInterpolator(points, values)
Nearest-neighbour interpolation in N dimensions.
.. versionadded:: 0.9
Methods
-------
__call__
Parameters
----------
x : (Npoints, Ndims) ndarray of floats
Data point coordinates.
y : (Npoints,) ndarray of float or complex
Data values.
rescale : boolean, optional
Rescale points to unit cube before performing interpolation.
This is useful if some of the input dimensions have
incommensurable units and differ by many orders of magnitude.
.. versionadded:: 0.14.0
tree_options : dict, optional
Options passed to the underlying ``cKDTree``.
.. versionadded:: 0.17.0
Notes
-----
Uses ``scipy.spatial.cKDTree``
"""
def __init__(self, x, y, rescale=False, tree_options=None):
NDInterpolatorBase.__init__(self, x, y, rescale=rescale,
need_contiguous=False,
need_values=False)
if tree_options is None:
tree_options = dict()
self.tree = cKDTree(self.points, **tree_options)
self.values = y
def __call__(self, *args):
"""
Evaluate interpolator at given points.
Parameters
----------
xi : ndarray of float, shape (..., ndim)
Points where to interpolate data at.
"""
xi = _ndim_coords_from_arrays(args, ndim=self.points.shape[1])
xi = self._check_call_shape(xi)
xi = self._scale_x(xi)
dist, i = self.tree.query(xi)
return self.values[i]
#------------------------------------------------------------------------------
# Convenience interface function
#------------------------------------------------------------------------------
def griddata(points, values, xi, method='linear', fill_value=np.nan,
rescale=False):
"""
Interpolate unstructured D-dimensional data.
Parameters
----------
points : ndarray of floats, shape (n, D)
Data point coordinates. Can either be an array of
shape (n, D), or a tuple of `ndim` arrays.
values : ndarray of float or complex, shape (n,)
Data values.
xi : ndarray of float, shape (M, D)
Points at which to interpolate data.
method : {'linear', 'nearest', 'cubic'}, optional
Method of interpolation. One of
``nearest``
return the value at the data point closest to
the point of interpolation. See `NearestNDInterpolator` for
more details.
``linear``
tesselate the input point set to n-dimensional
simplices, and interpolate linearly on each simplex. See
`LinearNDInterpolator` for more details.
``cubic`` (1-D)
return the value determined from a cubic
spline.
``cubic`` (2-D)
return the value determined from a
piecewise cubic, continuously differentiable (C1), and
approximately curvature-minimizing polynomial surface. See
`CloughTocher2DInterpolator` for more details.
fill_value : float, optional
Value used to fill in for requested points outside of the
convex hull of the input points. If not provided, then the
default is ``nan``. This option has no effect for the
'nearest' method.
rescale : bool, optional
Rescale points to unit cube before performing interpolation.
This is useful if some of the input dimensions have
incommensurable units and differ by many orders of magnitude.
.. versionadded:: 0.14.0
Notes
-----
.. versionadded:: 0.9
Examples
--------
Suppose we want to interpolate the 2-D function
>>> def func(x, y):
... return x*(1-x)*np.cos(4*np.pi*x) * np.sin(4*np.pi*y**2)**2
on a grid in [0, 1]x[0, 1]
>>> grid_x, grid_y = np.mgrid[0:1:100j, 0:1:200j]
but we only know its values at 1000 data points:
>>> points = np.random.rand(1000, 2)
>>> values = func(points[:,0], points[:,1])
This can be done with `griddata` -- below we try out all of the
interpolation methods:
>>> from scipy.interpolate import griddata
>>> grid_z0 = griddata(points, values, (grid_x, grid_y), method='nearest')
>>> grid_z1 = griddata(points, values, (grid_x, grid_y), method='linear')
>>> grid_z2 = griddata(points, values, (grid_x, grid_y), method='cubic')
One can see that the exact result is reproduced by all of the
methods to some degree, but for this smooth function the piecewise
cubic interpolant gives the best results:
>>> import matplotlib.pyplot as plt
>>> plt.subplot(221)
>>> plt.imshow(func(grid_x, grid_y).T, extent=(0,1,0,1), origin='lower')
>>> plt.plot(points[:,0], points[:,1], 'k.', ms=1)
>>> plt.title('Original')
>>> plt.subplot(222)
>>> plt.imshow(grid_z0.T, extent=(0,1,0,1), origin='lower')
>>> plt.title('Nearest')
>>> plt.subplot(223)
>>> plt.imshow(grid_z1.T, extent=(0,1,0,1), origin='lower')
>>> plt.title('Linear')
>>> plt.subplot(224)
>>> plt.imshow(grid_z2.T, extent=(0,1,0,1), origin='lower')
>>> plt.title('Cubic')
>>> plt.gcf().set_size_inches(6, 6)
>>> plt.show()
"""
points = _ndim_coords_from_arrays(points)
if points.ndim < 2:
ndim = points.ndim
else:
ndim = points.shape[-1]
if ndim == 1 and method in ('nearest', 'linear', 'cubic'):
from .interpolate import interp1d
points = points.ravel()
if isinstance(xi, tuple):
if len(xi) != 1:
raise ValueError("invalid number of dimensions in xi")
xi, = xi
# Sort points/values together, necessary as input for interp1d
idx = np.argsort(points)
points = points[idx]
values = values[idx]
if method == 'nearest':
fill_value = 'extrapolate'
ip = interp1d(points, values, kind=method, axis=0, bounds_error=False,
fill_value=fill_value)
return ip(xi)
elif method == 'nearest':
ip = NearestNDInterpolator(points, values, rescale=rescale)
return ip(xi)
elif method == 'linear':
ip = LinearNDInterpolator(points, values, fill_value=fill_value,
rescale=rescale)
return ip(xi)
elif method == 'cubic' and ndim == 2:
ip = CloughTocher2DInterpolator(points, values, fill_value=fill_value,
rescale=rescale)
return ip(xi)
else:
raise ValueError("Unknown interpolation method %r for "
"%d dimensional data" % (method, ndim))
| bsd-3-clause |
troygrosfield/Django-facebook | docs/docs_env/Lib/encodings/cp1251.py | 593 | 13617 | """ Python Character Mapping Codec cp1251 generated from 'MAPPINGS/VENDORS/MICSFT/WINDOWS/CP1251.TXT' with gencodec.py.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_table)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_table)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='cp1251',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Table
decoding_table = (
u'\x00' # 0x00 -> NULL
u'\x01' # 0x01 -> START OF HEADING
u'\x02' # 0x02 -> START OF TEXT
u'\x03' # 0x03 -> END OF TEXT
u'\x04' # 0x04 -> END OF TRANSMISSION
u'\x05' # 0x05 -> ENQUIRY
u'\x06' # 0x06 -> ACKNOWLEDGE
u'\x07' # 0x07 -> BELL
u'\x08' # 0x08 -> BACKSPACE
u'\t' # 0x09 -> HORIZONTAL TABULATION
u'\n' # 0x0A -> LINE FEED
u'\x0b' # 0x0B -> VERTICAL TABULATION
u'\x0c' # 0x0C -> FORM FEED
u'\r' # 0x0D -> CARRIAGE RETURN
u'\x0e' # 0x0E -> SHIFT OUT
u'\x0f' # 0x0F -> SHIFT IN
u'\x10' # 0x10 -> DATA LINK ESCAPE
u'\x11' # 0x11 -> DEVICE CONTROL ONE
u'\x12' # 0x12 -> DEVICE CONTROL TWO
u'\x13' # 0x13 -> DEVICE CONTROL THREE
u'\x14' # 0x14 -> DEVICE CONTROL FOUR
u'\x15' # 0x15 -> NEGATIVE ACKNOWLEDGE
u'\x16' # 0x16 -> SYNCHRONOUS IDLE
u'\x17' # 0x17 -> END OF TRANSMISSION BLOCK
u'\x18' # 0x18 -> CANCEL
u'\x19' # 0x19 -> END OF MEDIUM
u'\x1a' # 0x1A -> SUBSTITUTE
u'\x1b' # 0x1B -> ESCAPE
u'\x1c' # 0x1C -> FILE SEPARATOR
u'\x1d' # 0x1D -> GROUP SEPARATOR
u'\x1e' # 0x1E -> RECORD SEPARATOR
u'\x1f' # 0x1F -> UNIT SEPARATOR
u' ' # 0x20 -> SPACE
u'!' # 0x21 -> EXCLAMATION MARK
u'"' # 0x22 -> QUOTATION MARK
u'#' # 0x23 -> NUMBER SIGN
u'$' # 0x24 -> DOLLAR SIGN
u'%' # 0x25 -> PERCENT SIGN
u'&' # 0x26 -> AMPERSAND
u"'" # 0x27 -> APOSTROPHE
u'(' # 0x28 -> LEFT PARENTHESIS
u')' # 0x29 -> RIGHT PARENTHESIS
u'*' # 0x2A -> ASTERISK
u'+' # 0x2B -> PLUS SIGN
u',' # 0x2C -> COMMA
u'-' # 0x2D -> HYPHEN-MINUS
u'.' # 0x2E -> FULL STOP
u'/' # 0x2F -> SOLIDUS
u'0' # 0x30 -> DIGIT ZERO
u'1' # 0x31 -> DIGIT ONE
u'2' # 0x32 -> DIGIT TWO
u'3' # 0x33 -> DIGIT THREE
u'4' # 0x34 -> DIGIT FOUR
u'5' # 0x35 -> DIGIT FIVE
u'6' # 0x36 -> DIGIT SIX
u'7' # 0x37 -> DIGIT SEVEN
u'8' # 0x38 -> DIGIT EIGHT
u'9' # 0x39 -> DIGIT NINE
u':' # 0x3A -> COLON
u';' # 0x3B -> SEMICOLON
u'<' # 0x3C -> LESS-THAN SIGN
u'=' # 0x3D -> EQUALS SIGN
u'>' # 0x3E -> GREATER-THAN SIGN
u'?' # 0x3F -> QUESTION MARK
u'@' # 0x40 -> COMMERCIAL AT
u'A' # 0x41 -> LATIN CAPITAL LETTER A
u'B' # 0x42 -> LATIN CAPITAL LETTER B
u'C' # 0x43 -> LATIN CAPITAL LETTER C
u'D' # 0x44 -> LATIN CAPITAL LETTER D
u'E' # 0x45 -> LATIN CAPITAL LETTER E
u'F' # 0x46 -> LATIN CAPITAL LETTER F
u'G' # 0x47 -> LATIN CAPITAL LETTER G
u'H' # 0x48 -> LATIN CAPITAL LETTER H
u'I' # 0x49 -> LATIN CAPITAL LETTER I
u'J' # 0x4A -> LATIN CAPITAL LETTER J
u'K' # 0x4B -> LATIN CAPITAL LETTER K
u'L' # 0x4C -> LATIN CAPITAL LETTER L
u'M' # 0x4D -> LATIN CAPITAL LETTER M
u'N' # 0x4E -> LATIN CAPITAL LETTER N
u'O' # 0x4F -> LATIN CAPITAL LETTER O
u'P' # 0x50 -> LATIN CAPITAL LETTER P
u'Q' # 0x51 -> LATIN CAPITAL LETTER Q
u'R' # 0x52 -> LATIN CAPITAL LETTER R
u'S' # 0x53 -> LATIN CAPITAL LETTER S
u'T' # 0x54 -> LATIN CAPITAL LETTER T
u'U' # 0x55 -> LATIN CAPITAL LETTER U
u'V' # 0x56 -> LATIN CAPITAL LETTER V
u'W' # 0x57 -> LATIN CAPITAL LETTER W
u'X' # 0x58 -> LATIN CAPITAL LETTER X
u'Y' # 0x59 -> LATIN CAPITAL LETTER Y
u'Z' # 0x5A -> LATIN CAPITAL LETTER Z
u'[' # 0x5B -> LEFT SQUARE BRACKET
u'\\' # 0x5C -> REVERSE SOLIDUS
u']' # 0x5D -> RIGHT SQUARE BRACKET
u'^' # 0x5E -> CIRCUMFLEX ACCENT
u'_' # 0x5F -> LOW LINE
u'`' # 0x60 -> GRAVE ACCENT
u'a' # 0x61 -> LATIN SMALL LETTER A
u'b' # 0x62 -> LATIN SMALL LETTER B
u'c' # 0x63 -> LATIN SMALL LETTER C
u'd' # 0x64 -> LATIN SMALL LETTER D
u'e' # 0x65 -> LATIN SMALL LETTER E
u'f' # 0x66 -> LATIN SMALL LETTER F
u'g' # 0x67 -> LATIN SMALL LETTER G
u'h' # 0x68 -> LATIN SMALL LETTER H
u'i' # 0x69 -> LATIN SMALL LETTER I
u'j' # 0x6A -> LATIN SMALL LETTER J
u'k' # 0x6B -> LATIN SMALL LETTER K
u'l' # 0x6C -> LATIN SMALL LETTER L
u'm' # 0x6D -> LATIN SMALL LETTER M
u'n' # 0x6E -> LATIN SMALL LETTER N
u'o' # 0x6F -> LATIN SMALL LETTER O
u'p' # 0x70 -> LATIN SMALL LETTER P
u'q' # 0x71 -> LATIN SMALL LETTER Q
u'r' # 0x72 -> LATIN SMALL LETTER R
u's' # 0x73 -> LATIN SMALL LETTER S
u't' # 0x74 -> LATIN SMALL LETTER T
u'u' # 0x75 -> LATIN SMALL LETTER U
u'v' # 0x76 -> LATIN SMALL LETTER V
u'w' # 0x77 -> LATIN SMALL LETTER W
u'x' # 0x78 -> LATIN SMALL LETTER X
u'y' # 0x79 -> LATIN SMALL LETTER Y
u'z' # 0x7A -> LATIN SMALL LETTER Z
u'{' # 0x7B -> LEFT CURLY BRACKET
u'|' # 0x7C -> VERTICAL LINE
u'}' # 0x7D -> RIGHT CURLY BRACKET
u'~' # 0x7E -> TILDE
u'\x7f' # 0x7F -> DELETE
u'\u0402' # 0x80 -> CYRILLIC CAPITAL LETTER DJE
u'\u0403' # 0x81 -> CYRILLIC CAPITAL LETTER GJE
u'\u201a' # 0x82 -> SINGLE LOW-9 QUOTATION MARK
u'\u0453' # 0x83 -> CYRILLIC SMALL LETTER GJE
u'\u201e' # 0x84 -> DOUBLE LOW-9 QUOTATION MARK
u'\u2026' # 0x85 -> HORIZONTAL ELLIPSIS
u'\u2020' # 0x86 -> DAGGER
u'\u2021' # 0x87 -> DOUBLE DAGGER
u'\u20ac' # 0x88 -> EURO SIGN
u'\u2030' # 0x89 -> PER MILLE SIGN
u'\u0409' # 0x8A -> CYRILLIC CAPITAL LETTER LJE
u'\u2039' # 0x8B -> SINGLE LEFT-POINTING ANGLE QUOTATION MARK
u'\u040a' # 0x8C -> CYRILLIC CAPITAL LETTER NJE
u'\u040c' # 0x8D -> CYRILLIC CAPITAL LETTER KJE
u'\u040b' # 0x8E -> CYRILLIC CAPITAL LETTER TSHE
u'\u040f' # 0x8F -> CYRILLIC CAPITAL LETTER DZHE
u'\u0452' # 0x90 -> CYRILLIC SMALL LETTER DJE
u'\u2018' # 0x91 -> LEFT SINGLE QUOTATION MARK
u'\u2019' # 0x92 -> RIGHT SINGLE QUOTATION MARK
u'\u201c' # 0x93 -> LEFT DOUBLE QUOTATION MARK
u'\u201d' # 0x94 -> RIGHT DOUBLE QUOTATION MARK
u'\u2022' # 0x95 -> BULLET
u'\u2013' # 0x96 -> EN DASH
u'\u2014' # 0x97 -> EM DASH
u'\ufffe' # 0x98 -> UNDEFINED
u'\u2122' # 0x99 -> TRADE MARK SIGN
u'\u0459' # 0x9A -> CYRILLIC SMALL LETTER LJE
u'\u203a' # 0x9B -> SINGLE RIGHT-POINTING ANGLE QUOTATION MARK
u'\u045a' # 0x9C -> CYRILLIC SMALL LETTER NJE
u'\u045c' # 0x9D -> CYRILLIC SMALL LETTER KJE
u'\u045b' # 0x9E -> CYRILLIC SMALL LETTER TSHE
u'\u045f' # 0x9F -> CYRILLIC SMALL LETTER DZHE
u'\xa0' # 0xA0 -> NO-BREAK SPACE
u'\u040e' # 0xA1 -> CYRILLIC CAPITAL LETTER SHORT U
u'\u045e' # 0xA2 -> CYRILLIC SMALL LETTER SHORT U
u'\u0408' # 0xA3 -> CYRILLIC CAPITAL LETTER JE
u'\xa4' # 0xA4 -> CURRENCY SIGN
u'\u0490' # 0xA5 -> CYRILLIC CAPITAL LETTER GHE WITH UPTURN
u'\xa6' # 0xA6 -> BROKEN BAR
u'\xa7' # 0xA7 -> SECTION SIGN
u'\u0401' # 0xA8 -> CYRILLIC CAPITAL LETTER IO
u'\xa9' # 0xA9 -> COPYRIGHT SIGN
u'\u0404' # 0xAA -> CYRILLIC CAPITAL LETTER UKRAINIAN IE
u'\xab' # 0xAB -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
u'\xac' # 0xAC -> NOT SIGN
u'\xad' # 0xAD -> SOFT HYPHEN
u'\xae' # 0xAE -> REGISTERED SIGN
u'\u0407' # 0xAF -> CYRILLIC CAPITAL LETTER YI
u'\xb0' # 0xB0 -> DEGREE SIGN
u'\xb1' # 0xB1 -> PLUS-MINUS SIGN
u'\u0406' # 0xB2 -> CYRILLIC CAPITAL LETTER BYELORUSSIAN-UKRAINIAN I
u'\u0456' # 0xB3 -> CYRILLIC SMALL LETTER BYELORUSSIAN-UKRAINIAN I
u'\u0491' # 0xB4 -> CYRILLIC SMALL LETTER GHE WITH UPTURN
u'\xb5' # 0xB5 -> MICRO SIGN
u'\xb6' # 0xB6 -> PILCROW SIGN
u'\xb7' # 0xB7 -> MIDDLE DOT
u'\u0451' # 0xB8 -> CYRILLIC SMALL LETTER IO
u'\u2116' # 0xB9 -> NUMERO SIGN
u'\u0454' # 0xBA -> CYRILLIC SMALL LETTER UKRAINIAN IE
u'\xbb' # 0xBB -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
u'\u0458' # 0xBC -> CYRILLIC SMALL LETTER JE
u'\u0405' # 0xBD -> CYRILLIC CAPITAL LETTER DZE
u'\u0455' # 0xBE -> CYRILLIC SMALL LETTER DZE
u'\u0457' # 0xBF -> CYRILLIC SMALL LETTER YI
u'\u0410' # 0xC0 -> CYRILLIC CAPITAL LETTER A
u'\u0411' # 0xC1 -> CYRILLIC CAPITAL LETTER BE
u'\u0412' # 0xC2 -> CYRILLIC CAPITAL LETTER VE
u'\u0413' # 0xC3 -> CYRILLIC CAPITAL LETTER GHE
u'\u0414' # 0xC4 -> CYRILLIC CAPITAL LETTER DE
u'\u0415' # 0xC5 -> CYRILLIC CAPITAL LETTER IE
u'\u0416' # 0xC6 -> CYRILLIC CAPITAL LETTER ZHE
u'\u0417' # 0xC7 -> CYRILLIC CAPITAL LETTER ZE
u'\u0418' # 0xC8 -> CYRILLIC CAPITAL LETTER I
u'\u0419' # 0xC9 -> CYRILLIC CAPITAL LETTER SHORT I
u'\u041a' # 0xCA -> CYRILLIC CAPITAL LETTER KA
u'\u041b' # 0xCB -> CYRILLIC CAPITAL LETTER EL
u'\u041c' # 0xCC -> CYRILLIC CAPITAL LETTER EM
u'\u041d' # 0xCD -> CYRILLIC CAPITAL LETTER EN
u'\u041e' # 0xCE -> CYRILLIC CAPITAL LETTER O
u'\u041f' # 0xCF -> CYRILLIC CAPITAL LETTER PE
u'\u0420' # 0xD0 -> CYRILLIC CAPITAL LETTER ER
u'\u0421' # 0xD1 -> CYRILLIC CAPITAL LETTER ES
u'\u0422' # 0xD2 -> CYRILLIC CAPITAL LETTER TE
u'\u0423' # 0xD3 -> CYRILLIC CAPITAL LETTER U
u'\u0424' # 0xD4 -> CYRILLIC CAPITAL LETTER EF
u'\u0425' # 0xD5 -> CYRILLIC CAPITAL LETTER HA
u'\u0426' # 0xD6 -> CYRILLIC CAPITAL LETTER TSE
u'\u0427' # 0xD7 -> CYRILLIC CAPITAL LETTER CHE
u'\u0428' # 0xD8 -> CYRILLIC CAPITAL LETTER SHA
u'\u0429' # 0xD9 -> CYRILLIC CAPITAL LETTER SHCHA
u'\u042a' # 0xDA -> CYRILLIC CAPITAL LETTER HARD SIGN
u'\u042b' # 0xDB -> CYRILLIC CAPITAL LETTER YERU
u'\u042c' # 0xDC -> CYRILLIC CAPITAL LETTER SOFT SIGN
u'\u042d' # 0xDD -> CYRILLIC CAPITAL LETTER E
u'\u042e' # 0xDE -> CYRILLIC CAPITAL LETTER YU
u'\u042f' # 0xDF -> CYRILLIC CAPITAL LETTER YA
u'\u0430' # 0xE0 -> CYRILLIC SMALL LETTER A
u'\u0431' # 0xE1 -> CYRILLIC SMALL LETTER BE
u'\u0432' # 0xE2 -> CYRILLIC SMALL LETTER VE
u'\u0433' # 0xE3 -> CYRILLIC SMALL LETTER GHE
u'\u0434' # 0xE4 -> CYRILLIC SMALL LETTER DE
u'\u0435' # 0xE5 -> CYRILLIC SMALL LETTER IE
u'\u0436' # 0xE6 -> CYRILLIC SMALL LETTER ZHE
u'\u0437' # 0xE7 -> CYRILLIC SMALL LETTER ZE
u'\u0438' # 0xE8 -> CYRILLIC SMALL LETTER I
u'\u0439' # 0xE9 -> CYRILLIC SMALL LETTER SHORT I
u'\u043a' # 0xEA -> CYRILLIC SMALL LETTER KA
u'\u043b' # 0xEB -> CYRILLIC SMALL LETTER EL
u'\u043c' # 0xEC -> CYRILLIC SMALL LETTER EM
u'\u043d' # 0xED -> CYRILLIC SMALL LETTER EN
u'\u043e' # 0xEE -> CYRILLIC SMALL LETTER O
u'\u043f' # 0xEF -> CYRILLIC SMALL LETTER PE
u'\u0440' # 0xF0 -> CYRILLIC SMALL LETTER ER
u'\u0441' # 0xF1 -> CYRILLIC SMALL LETTER ES
u'\u0442' # 0xF2 -> CYRILLIC SMALL LETTER TE
u'\u0443' # 0xF3 -> CYRILLIC SMALL LETTER U
u'\u0444' # 0xF4 -> CYRILLIC SMALL LETTER EF
u'\u0445' # 0xF5 -> CYRILLIC SMALL LETTER HA
u'\u0446' # 0xF6 -> CYRILLIC SMALL LETTER TSE
u'\u0447' # 0xF7 -> CYRILLIC SMALL LETTER CHE
u'\u0448' # 0xF8 -> CYRILLIC SMALL LETTER SHA
u'\u0449' # 0xF9 -> CYRILLIC SMALL LETTER SHCHA
u'\u044a' # 0xFA -> CYRILLIC SMALL LETTER HARD SIGN
u'\u044b' # 0xFB -> CYRILLIC SMALL LETTER YERU
u'\u044c' # 0xFC -> CYRILLIC SMALL LETTER SOFT SIGN
u'\u044d' # 0xFD -> CYRILLIC SMALL LETTER E
u'\u044e' # 0xFE -> CYRILLIC SMALL LETTER YU
u'\u044f' # 0xFF -> CYRILLIC SMALL LETTER YA
)
### Encoding table
encoding_table=codecs.charmap_build(decoding_table)
| bsd-3-clause |
jonashagstedt/tornado | tornado/test/websocket_test.py | 2 | 14565 | from __future__ import absolute_import, division, print_function, with_statement
import traceback
from tornado.concurrent import Future
from tornado import gen
from tornado.httpclient import HTTPError, HTTPRequest
from tornado.log import gen_log, app_log
from tornado.testing import AsyncHTTPTestCase, gen_test, bind_unused_port, ExpectLog
from tornado.test.util import unittest
from tornado.web import Application, RequestHandler
from tornado.util import u
try:
import tornado.websocket # noqa
from tornado.util import _websocket_mask_python
except ImportError:
# The unittest module presents misleading errors on ImportError
# (it acts as if websocket_test could not be found, hiding the underlying
# error). If we get an ImportError here (which could happen due to
# TORNADO_EXTENSION=1), print some extra information before failing.
traceback.print_exc()
raise
from tornado.websocket import WebSocketHandler, websocket_connect, WebSocketError
try:
from tornado import speedups
except ImportError:
speedups = None
class TestWebSocketHandler(WebSocketHandler):
"""Base class for testing handlers that exposes the on_close event.
This allows for deterministic cleanup of the associated socket.
"""
def initialize(self, close_future, compression_options=None):
self.close_future = close_future
self.compression_options = compression_options
def get_compression_options(self):
return self.compression_options
def on_close(self):
self.close_future.set_result((self.close_code, self.close_reason))
class EchoHandler(TestWebSocketHandler):
def on_message(self, message):
self.write_message(message, isinstance(message, bytes))
class ErrorInOnMessageHandler(TestWebSocketHandler):
def on_message(self, message):
1 / 0
class HeaderHandler(TestWebSocketHandler):
def open(self):
try:
# In a websocket context, many RequestHandler methods
# raise RuntimeErrors.
self.set_status(503)
raise Exception("did not get expected exception")
except RuntimeError:
pass
self.write_message(self.request.headers.get('X-Test', ''))
class NonWebSocketHandler(RequestHandler):
def get(self):
self.write('ok')
class CloseReasonHandler(TestWebSocketHandler):
def open(self):
self.on_close_called = False
self.close(1001, "goodbye")
class AsyncPrepareHandler(TestWebSocketHandler):
@gen.coroutine
def prepare(self):
yield gen.moment
def on_message(self, message):
self.write_message(message)
class WebSocketBaseTestCase(AsyncHTTPTestCase):
@gen.coroutine
def ws_connect(self, path, compression_options=None):
ws = yield websocket_connect(
'ws://127.0.0.1:%d%s' % (self.get_http_port(), path),
compression_options=compression_options)
raise gen.Return(ws)
@gen.coroutine
def close(self, ws):
"""Close a websocket connection and wait for the server side.
If we don't wait here, there are sometimes leak warnings in the
tests.
"""
ws.close()
yield self.close_future
class WebSocketTest(WebSocketBaseTestCase):
def get_app(self):
self.close_future = Future()
return Application([
('/echo', EchoHandler, dict(close_future=self.close_future)),
('/non_ws', NonWebSocketHandler),
('/header', HeaderHandler, dict(close_future=self.close_future)),
('/close_reason', CloseReasonHandler,
dict(close_future=self.close_future)),
('/error_in_on_message', ErrorInOnMessageHandler,
dict(close_future=self.close_future)),
('/async_prepare', AsyncPrepareHandler,
dict(close_future=self.close_future)),
])
def test_http_request(self):
# WS server, HTTP client.
response = self.fetch('/echo')
self.assertEqual(response.code, 400)
@gen_test
def test_websocket_gen(self):
ws = yield self.ws_connect('/echo')
ws.write_message('hello')
response = yield ws.read_message()
self.assertEqual(response, 'hello')
yield self.close(ws)
def test_websocket_callbacks(self):
websocket_connect(
'ws://127.0.0.1:%d/echo' % self.get_http_port(),
io_loop=self.io_loop, callback=self.stop)
ws = self.wait().result()
ws.write_message('hello')
ws.read_message(self.stop)
response = self.wait().result()
self.assertEqual(response, 'hello')
self.close_future.add_done_callback(lambda f: self.stop())
ws.close()
self.wait()
@gen_test
def test_binary_message(self):
ws = yield self.ws_connect('/echo')
ws.write_message(b'hello \xe9', binary=True)
response = yield ws.read_message()
self.assertEqual(response, b'hello \xe9')
yield self.close(ws)
@gen_test
def test_unicode_message(self):
ws = yield self.ws_connect('/echo')
ws.write_message(u('hello \u00e9'))
response = yield ws.read_message()
self.assertEqual(response, u('hello \u00e9'))
yield self.close(ws)
@gen_test
def test_error_in_on_message(self):
ws = yield self.ws_connect('/error_in_on_message')
ws.write_message('hello')
with ExpectLog(app_log, "Uncaught exception"):
response = yield ws.read_message()
self.assertIs(response, None)
yield self.close(ws)
@gen_test
def test_websocket_http_fail(self):
with self.assertRaises(HTTPError) as cm:
yield self.ws_connect('/notfound')
self.assertEqual(cm.exception.code, 404)
@gen_test
def test_websocket_http_success(self):
with self.assertRaises(WebSocketError):
yield self.ws_connect('/non_ws')
@gen_test
def test_websocket_network_fail(self):
sock, port = bind_unused_port()
sock.close()
with self.assertRaises(IOError):
with ExpectLog(gen_log, ".*"):
yield websocket_connect(
'ws://127.0.0.1:%d/' % port,
io_loop=self.io_loop,
connect_timeout=3600)
@gen_test
def test_websocket_close_buffered_data(self):
ws = yield websocket_connect(
'ws://127.0.0.1:%d/echo' % self.get_http_port())
ws.write_message('hello')
ws.write_message('world')
# Close the underlying stream.
ws.stream.close()
yield self.close_future
@gen_test
def test_websocket_headers(self):
# Ensure that arbitrary headers can be passed through websocket_connect.
ws = yield websocket_connect(
HTTPRequest('ws://127.0.0.1:%d/header' % self.get_http_port(),
headers={'X-Test': 'hello'}))
response = yield ws.read_message()
self.assertEqual(response, 'hello')
yield self.close(ws)
@gen_test
def test_server_close_reason(self):
ws = yield self.ws_connect('/close_reason')
msg = yield ws.read_message()
# A message of None means the other side closed the connection.
self.assertIs(msg, None)
self.assertEqual(ws.close_code, 1001)
self.assertEqual(ws.close_reason, "goodbye")
# The on_close callback is called no matter which side closed.
yield self.close_future
@gen_test
def test_client_close_reason(self):
ws = yield self.ws_connect('/echo')
ws.close(1001, 'goodbye')
code, reason = yield self.close_future
self.assertEqual(code, 1001)
self.assertEqual(reason, 'goodbye')
@gen_test
def test_async_prepare(self):
# Previously, an async prepare method triggered a bug that would
# result in a timeout on test shutdown (and a memory leak).
ws = yield self.ws_connect('/async_prepare')
ws.write_message('hello')
res = yield ws.read_message()
self.assertEqual(res, 'hello')
@gen_test
def test_check_origin_valid_no_path(self):
port = self.get_http_port()
url = 'ws://127.0.0.1:%d/echo' % port
headers = {'Origin': 'http://127.0.0.1:%d' % port}
ws = yield websocket_connect(HTTPRequest(url, headers=headers),
io_loop=self.io_loop)
ws.write_message('hello')
response = yield ws.read_message()
self.assertEqual(response, 'hello')
yield self.close(ws)
@gen_test
def test_check_origin_valid_with_path(self):
port = self.get_http_port()
url = 'ws://127.0.0.1:%d/echo' % port
headers = {'Origin': 'http://127.0.0.1:%d/something' % port}
ws = yield websocket_connect(HTTPRequest(url, headers=headers),
io_loop=self.io_loop)
ws.write_message('hello')
response = yield ws.read_message()
self.assertEqual(response, 'hello')
yield self.close(ws)
@gen_test
def test_check_origin_invalid_partial_url(self):
port = self.get_http_port()
url = 'ws://127.0.0.1:%d/echo' % port
headers = {'Origin': '127.0.0.1:%d' % port}
with self.assertRaises(HTTPError) as cm:
yield websocket_connect(HTTPRequest(url, headers=headers),
io_loop=self.io_loop)
self.assertEqual(cm.exception.code, 403)
@gen_test
def test_check_origin_invalid(self):
port = self.get_http_port()
url = 'ws://127.0.0.1:%d/echo' % port
# Host is 127.0.0.1, which should not be accessible from some other
# domain
headers = {'Origin': 'http://somewhereelse.com'}
with self.assertRaises(HTTPError) as cm:
yield websocket_connect(HTTPRequest(url, headers=headers),
io_loop=self.io_loop)
self.assertEqual(cm.exception.code, 403)
@gen_test
def test_check_origin_invalid_subdomains(self):
port = self.get_http_port()
url = 'ws://localhost:%d/echo' % port
# Subdomains should be disallowed by default. If we could pass a
# resolver to websocket_connect we could test sibling domains as well.
headers = {'Origin': 'http://subtenant.localhost'}
with self.assertRaises(HTTPError) as cm:
yield websocket_connect(HTTPRequest(url, headers=headers),
io_loop=self.io_loop)
self.assertEqual(cm.exception.code, 403)
class CompressionTestMixin(object):
MESSAGE = 'Hello world. Testing 123 123'
def get_app(self):
self.close_future = Future()
return Application([
('/echo', EchoHandler, dict(
close_future=self.close_future,
compression_options=self.get_server_compression_options())),
])
def get_server_compression_options(self):
return None
def get_client_compression_options(self):
return None
@gen_test
def test_message_sizes(self):
ws = yield self.ws_connect(
'/echo',
compression_options=self.get_client_compression_options())
# Send the same message three times so we can measure the
# effect of the context_takeover options.
for i in range(3):
ws.write_message(self.MESSAGE)
response = yield ws.read_message()
self.assertEqual(response, self.MESSAGE)
self.assertEqual(ws.protocol._message_bytes_out, len(self.MESSAGE) * 3)
self.assertEqual(ws.protocol._message_bytes_in, len(self.MESSAGE) * 3)
self.verify_wire_bytes(ws.protocol._wire_bytes_in,
ws.protocol._wire_bytes_out)
yield self.close(ws)
class UncompressedTestMixin(CompressionTestMixin):
"""Specialization of CompressionTestMixin when we expect no compression."""
def verify_wire_bytes(self, bytes_in, bytes_out):
# Bytes out includes the 4-byte mask key per message.
self.assertEqual(bytes_out, 3 * (len(self.MESSAGE) + 6))
self.assertEqual(bytes_in, 3 * (len(self.MESSAGE) + 2))
class NoCompressionTest(UncompressedTestMixin, WebSocketBaseTestCase):
pass
# If only one side tries to compress, the extension is not negotiated.
class ServerOnlyCompressionTest(UncompressedTestMixin, WebSocketBaseTestCase):
def get_server_compression_options(self):
return {}
class ClientOnlyCompressionTest(UncompressedTestMixin, WebSocketBaseTestCase):
def get_client_compression_options(self):
return {}
class DefaultCompressionTest(CompressionTestMixin, WebSocketBaseTestCase):
def get_server_compression_options(self):
return {}
def get_client_compression_options(self):
return {}
def verify_wire_bytes(self, bytes_in, bytes_out):
self.assertLess(bytes_out, 3 * (len(self.MESSAGE) + 6))
self.assertLess(bytes_in, 3 * (len(self.MESSAGE) + 2))
# Bytes out includes the 4 bytes mask key per message.
self.assertEqual(bytes_out, bytes_in + 12)
class MaskFunctionMixin(object):
# Subclasses should define self.mask(mask, data)
def test_mask(self):
self.assertEqual(self.mask(b'abcd', b''), b'')
self.assertEqual(self.mask(b'abcd', b'b'), b'\x03')
self.assertEqual(self.mask(b'abcd', b'54321'), b'TVPVP')
self.assertEqual(self.mask(b'ZXCV', b'98765432'), b'c`t`olpd')
# Include test cases with \x00 bytes (to ensure that the C
# extension isn't depending on null-terminated strings) and
# bytes with the high bit set (to smoke out signedness issues).
self.assertEqual(self.mask(b'\x00\x01\x02\x03',
b'\xff\xfb\xfd\xfc\xfe\xfa'),
b'\xff\xfa\xff\xff\xfe\xfb')
self.assertEqual(self.mask(b'\xff\xfb\xfd\xfc',
b'\x00\x01\x02\x03\x04\x05'),
b'\xff\xfa\xff\xff\xfb\xfe')
class PythonMaskFunctionTest(MaskFunctionMixin, unittest.TestCase):
def mask(self, mask, data):
return _websocket_mask_python(mask, data)
@unittest.skipIf(speedups is None, "tornado.speedups module not present")
class CythonMaskFunctionTest(MaskFunctionMixin, unittest.TestCase):
def mask(self, mask, data):
return speedups.websocket_mask(mask, data)
| apache-2.0 |
aeonick/skyin | blog/mylib.py | 1 | 8661 | # -*- coding: utf-8 -*-
from blogDB import get_db
class blogInfo:
def __init__(self):
blogdb = get_db()
cur = blogdb.cursor()
cur.execute('SELECT id,content from info order by id')
temp = cur.fetchall()
self.title=temp[4][1]
self.subtitle=temp[3][1]
self.password=temp[2][1]
self.sidebar=temp[1][1]
self.tags=temp[0][1]
self.cate=dict(temp[6:])
def verify(self,password=''):
import hashlib
m = hashlib.md5()
m.update(password)
m.update(m.hexdigest()+'1396')
if m.hexdigest()==self.password:
return True
else:
return False
def config(self,title='',subtitle='',sidebar='',tags=''):
blogdb = get_db()
cur = blogdb.cursor()
if title:
cur.execute('UPDATE info SET content = %s where id = -1',(title,))
if subtitle:
cur.execute('UPDATE info SET content = %s where id = -2',(subtitle,))
if sidebar:
cur.execute('UPDATE info SET content = %s where id = -4',(sidebar,))
if tags:
cur.execute('UPDATE info SET content = %s where id = -5',(tags,))
blogdb.commit()
def setPwd(self,old,new):
import hashlib
m = hashlib.md5()
m.update(old)
m.update(m.hexdigest()+'1396')
if m.hexdigest()==self.password:
m = hashlib.md5()
m.update(new)
m.update(m.hexdigest()+'1396')
blogdb = get_db()
cur = blogdb.cursor()
cur.execute('UPDATE info SET content = %s where id = -3',(m.hexdigest(),))
blogdb.commit()
return 'Success'
else:
return "Couldn't match"
def setCate(self,oldId,newId,content):
blogdb = get_db()
cur = blogdb.cursor()
try:
if newId<1:
cur.execute('delete from info where id = %s',(oldId,))
cur.execute('UPDATE blog SET file=0 where file = %s',(oldId,))
blogdb.commit()
if oldId==0:
cur.execute('insert into info (id,content) values (%s, %s)', (newId,content))
else:
cur.execute('UPDATE info SET id=%s,content=%s where id = %s',(newId,content,oldId))
cur.execute('UPDATE blog SET file=%s where file = %s',(newId,oldId))
blogdb.commit()
return 'Success'
except:
return 'Database Error'
class Article:
def __init__(self,id=0):
self.id = id
def getIt(self):
blogdb = get_db()
cur = blogdb.cursor()
cur.execute('SELECT title, date, content, tag, abstract, file,img from blog where id = %s',(self.id,))
arti = cur.fetchall()[0]
self.title = arti[0]
self.date = arti[1]
if hasattr(self.date,'strftime'):
self.date = self.date.strftime("%Y-%m-%d %H:%M:%S")
self.content = arti[2]
self.tag = arti[3] or ''
self.abstract = arti[4]
self.file=arti[5]
self.img=arti[6] or ''
def edit(self, title, tag, img, file, content):
abstract = abstr(content)
tags = (tag or '').replace(',',',')
blogdb = get_db()
cur = blogdb.cursor()
if self.id:
cur.execute('UPDATE blog SET title = %s ,content = %s,abstract = %s,tag = %s ,file = %s ,img=%s WHERE ID = %s;', (title, content,abstract,tags,file,img, self.id))
else:
cur.execute('insert into blog (title,tag,file,abstract,content,img) values (%s, %s, %s, %s, %s, %s)', (title,tags,file,abstract,content,img))
cur.execute('select id from blog order by id desc limit 1')
blog = cur.fetchall()
self.id = blog[0][0]
blogdb.commit()
cur.execute('delete from tag where blog = %s',(self.id,))
tags = tags.split(',')
for tag in tags:
cur.execute('insert into tag (tag, blog) values (%s, %s)', (tag, self.id))
blogdb.commit()
def delIt(self):
blogdb = get_db()
cur = blogdb.cursor()
cur.execute('DELETE FROM blog WHERE id = %s ',(self.id,))
cur.execute('DELETE FROM tag WHERE blog = %s ',(self.id,))
cur.execute('DELETE FROM comm WHERE blog = %s ',(self.id,))
blogdb.commit()
def hideIt(self):
blogdb = get_db()
cur = blogdb.cursor()
cur.execute('update blog set file = 0 WHERE id = %s ',(self.id,))
cur.execute('DELETE FROM tag WHERE blog = %s ',(self.id,))
blogdb.commit()
class Comment:
def __init__(self, id=0):
self.id = id
def getIt(self):
blogdb = get_db()
cur = blogdb.cursor()
cur.execute(' SELECT content, date, author, id, reply FROM comm WHERE blog = %s ORDER BY id DESC',(self.id,))
temp = cur.fetchall()
def preRep(c):
c=list(c)+['']
if c[4]:
c[5]='1'
else:
c[4]=c[3]
return c
temp = map(preRep,list(temp))
def coSort(x,y):
if x[4]<y[4]:
return 1
else:
return -1
temp.sort(coSort)
self.cl = temp
def getNew(self):
blogdb = get_db()
cur = blogdb.cursor()
cur.execute(' SELECT content, date, author, id, blog FROM comm ORDER BY id DESC LIMIT 8 ')
temp = cur.fetchall()
self.cl = temp
return self.cl
def insert(self, content, author, reply):
author = author or u'访客'
reply = reply or None
blogdb = get_db()
cur = blogdb.cursor()
cur.execute('insert into comm (content, author, blog, reply) values (%s, %s, %s, %s)', (content, author, self.id, reply))
blogdb.commit()
def delIt(self,cid):
blogdb = get_db()
cur = blogdb.cursor()
cur.execute('DELETE FROM comm WHERE id = %s ',(cid,))
blogdb.commit()
class ArtiList:
def __init__(self,method = '',key = '',page = 1):
self.method = method
self.key = key
self.offset = ( page - 1 ) * 8
self.page = page
def getRe(self):
results = []
for arti in self.al:
temp = Article(arti)
temp.getIt()
results.append(temp)
self.results = results
def getPagn(self):
blogdb = get_db()
cur = blogdb.cursor()
if self.method == 'file':
cur.execute('SELECT count(*) FROM blog WHERE file = %s;',(self.key,))
elif self.method == 'tag':
cur.execute('SELECT count(*) FROM tag WHERE tag = %s;',(self.key,))
else:
cur.execute('SELECT count(*) FROM blog where file>0;')
pMax = cur.fetchall()
pMax = (int(pMax[0][0])+7)/8
if self.page<=pMax and self.page>0:
pagn = [[x,'',x] for x in range(1,pMax+1)]
pagn[self.page-1][1]='active'
if self.page==1:
before=[]
else:
before=[['','prev',self.page-1],]
if self.page==pMax:
after=[]
else:
after=[['','next',self.page+1],]
self.pagn = before+pagn+after
else:
self.pagn = []
return self.pagn
def getAl(self):
blogdb = get_db()
cur = blogdb.cursor()
if self.method == 'file':
cur.execute(' SELECT id FROM blog WHERE file = %s ORDER BY id DESC LIMIT 8 OFFSET %s',(self.key,self.offset,))
elif self.method == 'tag':
cur.execute('select blog from tag where tag = %s LIMIT 8 OFFSET %s',(self.key,self.offset,))
else:
cur.execute(' SELECT id FROM blog where file>0 ORDER BY id DESC LIMIT 8 OFFSET %s',(self.offset,))
al = cur.fetchall()
al = map(lambda x: int(x[0]),al)
al.sort(reverse = True)
self.al = al
def abstr(text):
text = text[:1200]
text = text.replace(u' ',u' ')
text = text.replace(u'</p',u'\n<')
text = text.replace(u'</b',u'\n<')
text = text.replace(u'</h',u'\n<')
text = text.replace(u'<br>',u'\n')
def fn(x, y):
if x[-1] == "<" and y != ">":
return x
else:
return x+y
if text:
text = reduce(fn,text)
text = text.replace(u'<>',u'')
text = text.replace(u'\n\n\n',u'\n')
text = text.replace(u'\n\n',u'\n')
text = text[:120]
while text[0]==u'\n':
text=text[1:]
while text[-1]==u'\n':
text=text[:-1]
text=text+'...'
return text | apache-2.0 |
jpablobr/emacs.d | vendor/misc/emacs-skype/build/Skype4Py/Skype4Py/sms.py | 21 | 7879 | """Short messaging system.
"""
__docformat__ = 'restructuredtext en'
from utils import *
class SmsMessage(Cached):
"""Represents an SMS message.
"""
_ValidateHandle = int
def __repr__(self):
return Cached.__repr__(self, 'Id')
def _Alter(self, AlterName, Args=None):
return self._Owner._Alter('SMS', self.Id, AlterName, Args)
def _Init(self):
self._MakeOwner()
def _Property(self, PropName, Set=None, Cache=True):
return self._Owner._Property('SMS', self.Id, PropName, Set, Cache)
def Delete(self):
"""Deletes this SMS message.
"""
self._Owner._DoCommand('DELETE SMS %s' % self.Id)
def MarkAsSeen(self):
"""Marks this SMS message as seen.
"""
self._Owner._DoCommand('SET SMS %s SEEN' % self.Id)
def Send(self):
"""Sends this SMS message.
"""
self._Alter('SEND')
def _GetBody(self):
return self._Property('BODY')
def _SetBody(self, Value):
self._Property('BODY', Value)
Body = property(_GetBody, _SetBody,
doc="""Text of this SMS message.
:type: unicode
""")
def _GetChunks(self):
return SmsChunkCollection(self, xrange(int(chop(self._Property('CHUNKING', Cache=False))[0])))
Chunks = property(_GetChunks,
doc="""Chunks of this SMS message. More than one if this is a multi-part message.
:type: `SmsChunkCollection`
""")
def _GetDatetime(self):
from datetime import datetime
return datetime.fromtimestamp(self.Timestamp)
Datetime = property(_GetDatetime,
doc="""Timestamp of this SMS message as datetime object.
:type: datetime.datetime
""")
def _GetFailureReason(self):
return str(self._Property('FAILUREREASON'))
FailureReason = property(_GetFailureReason,
doc="""Reason an SMS message failed. Read this if `Status` == `enums.smsMessageStatusFailed`.
:type: `enums`.smsFailureReason*
""")
def _GetId(self):
return self._Handle
Id = property(_GetId,
doc="""Unique SMS message Id.
:type: int
""")
def _GetIsFailedUnseen(self):
return (self._Property('IS_FAILED_UNSEEN') == 'TRUE')
IsFailedUnseen = property(_GetIsFailedUnseen,
doc="""Tells if a failed SMS message was unseen.
:type: bool
""")
def _GetPrice(self):
return int(self._Property('PRICE'))
Price = property(_GetPrice,
doc="""SMS price. Expressed using `PricePrecision`. For a value expressed using `PriceCurrency`, use `PriceValue`.
:type: int
:see: `PriceCurrency`, `PricePrecision`, `PriceToText`, `PriceValue`
""")
def _GetPriceCurrency(self):
return self._Property('PRICE_CURRENCY')
PriceCurrency = property(_GetPriceCurrency,
doc="""SMS price currency.
:type: unicode
:see: `Price`, `PricePrecision`, `PriceToText`, `PriceValue`
""")
def _GetPricePrecision(self):
return int(self._Property('PRICE_PRECISION'))
PricePrecision = property(_GetPricePrecision,
doc="""SMS price precision.
:type: int
:see: `Price`, `PriceCurrency`, `PriceToText`, `PriceValue`
""")
def _GetPriceToText(self):
return (u'%s %.3f' % (self.PriceCurrency, self.PriceValue)).strip()
PriceToText = property(_GetPriceToText,
doc="""SMS price as properly formatted text with currency.
:type: unicode
:see: `Price`, `PriceCurrency`, `PricePrecision`, `PriceValue`
""")
def _GetPriceValue(self):
if self.Price < 0:
return 0.0
return float(self.Price) / (10 ** self.PricePrecision)
PriceValue = property(_GetPriceValue,
doc="""SMS price. Expressed in `PriceCurrency`.
:type: float
:see: `Price`, `PriceCurrency`, `PricePrecision`, `PriceToText`
""")
def _GetReplyToNumber(self):
return str(self._Property('REPLY_TO_NUMBER'))
def _SetReplyToNumber(self, Value):
self._Property('REPLY_TO_NUMBER', Value)
ReplyToNumber = property(_GetReplyToNumber, _SetReplyToNumber,
doc="""Reply-to number for this SMS message.
:type: str
""")
def _SetSeen(self, Value):
from warnings import warn
warn('SmsMessage.Seen = x: Use SmsMessage.MarkAsSeen() instead.', DeprecationWarning, stacklevel=2)
if Value:
self.MarkAsSeen()
else:
raise SkypeError(0, 'Seen can only be set to True')
Seen = property(fset=_SetSeen,
doc="""Set the read status of the SMS message. Accepts only True value.
:type: bool
:deprecated: Extremely unpythonic, use `MarkAsSeen` instead.
""")
def _GetStatus(self):
return str(self._Property('STATUS'))
Status = property(_GetStatus,
doc="""SMS message status.
:type: `enums`.smsMessageStatus*
""")
def _GetTargetNumbers(self):
return tuple(split(self._Property('TARGET_NUMBERS'), ', '))
def _SetTargetNumbers(self, Value):
self._Property('TARGET_NUMBERS', ', '.join(Value))
TargetNumbers = property(_GetTargetNumbers, _SetTargetNumbers,
doc="""Target phone numbers.
:type: tuple of str
""")
def _GetTargets(self):
return SmsTargetCollection(self, split(self._Property('TARGET_NUMBERS'), ', '))
Targets = property(_GetTargets,
doc="""Target objects.
:type: `SmsTargetCollection`
""")
def _GetTimestamp(self):
return float(self._Property('TIMESTAMP'))
Timestamp = property(_GetTimestamp,
doc="""Timestamp of this SMS message.
:type: float
:see: `Datetime`
""")
def _GetType(self):
return str(self._Property('TYPE'))
Type = property(_GetType,
doc="""SMS message type
:type: `enums`.smsMessageType*
""")
class SmsMessageCollection(CachedCollection):
_CachedType = SmsMessage
class SmsChunk(Cached):
"""Represents a single chunk of a multi-part SMS message.
"""
_ValidateHandle = int
def __repr__(self):
return Cached.__repr__(self, 'Id', 'Message')
def _GetCharactersLeft(self):
count, left = map(int, chop(self.Message._Property('CHUNKING', Cache=False)))
if self.Id == count - 1:
return left
return 0
CharactersLeft = property(_GetCharactersLeft,
doc="""CharactersLeft.
:type: int
""")
def _GetId(self):
return self._Handle
Id = property(_GetId,
doc="""SMS chunk Id.
:type: int
""")
def _GetMessage(self):
return self._Owner
Message = property(_GetMessage,
doc="""SMS message associated with this chunk.
:type: `SmsMessage`
""")
def _GetText(self):
return self.Message._Property('CHUNK %s' % self.Id)
Text = property(_GetText,
doc="""Text (body) of this SMS chunk.
:type: unicode
""")
class SmsChunkCollection(CachedCollection):
_CachedType = SmsChunk
class SmsTarget(Cached):
"""Represents a single target of a multi-target SMS message.
"""
_ValidateHandle = str
def __repr__(self):
return Cached.__repr__(self, 'Number', 'Message')
def _GetMessage(self):
return self._Owner
Message = property(_GetMessage,
doc="""An SMS message object this target refers to.
:type: `SmsMessage`
""")
def _GetNumber(self):
return self._Handle
Number = property(_GetNumber,
doc="""Target phone number.
:type: str
""")
def _GetStatus(self):
for t in split(self.Message._Property('TARGET_STATUSES'), ', '):
number, status = t.split('=')
if number == self.Number:
return str(status)
Status = property(_GetStatus,
doc="""Status of this target.
:type: `enums`.smsTargetStatus*
""")
class SmsTargetCollection(CachedCollection):
_CachedType = SmsTarget
| gpl-3.0 |
toudi/katana | test/test_communication.py | 1 | 1067 | from unittest import TestCase
import socket
from json import loads, dumps
from uuid import UUID
INVALID_OPERATION = {
'status': 'error',
'message': 'Invalid operation'
}
class CommunicationTestCase(TestCase):
def send(self, message):
s = socket.socket(socket.AF_UNIX)
s.connect('katana.sock')
s.sendall(dumps(message) + '\n')
message = loads(s.recv(4096))
s.close()
if message['status'] == 'error':
raise Exception(message['message'])
return message['result']
def test_that_passing_invalid_json_results_in_error(self):
self.assertRaises(Exception, self.send)
def test_that_passing_invalid_operation_results_in_error(self):
self.assertRaises(Exception, self.send, {'action': 'inexisting'})
def test_that_initiated_transaction_has_a_valid_uuid(self):
transaction = self.send({
'action': 'begin_transaction',
})
self.assertIsNotNone(transaction)
x = UUID(transaction)
self.assertEquals(x.hex, transaction) | mit |
nagyistoce/photivo | scons-local-2.2.0/SCons/Tool/textfile.py | 14 | 6079 | # -*- python -*-
#
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__doc__ = """
Textfile/Substfile builder for SCons.
Create file 'target' which typically is a textfile. The 'source'
may be any combination of strings, Nodes, or lists of same. A
'linesep' will be put between any part written and defaults to
os.linesep.
The only difference between the Textfile builder and the Substfile
builder is that strings are converted to Value() nodes for the
former and File() nodes for the latter. To insert files in the
former or strings in the latter, wrap them in a File() or Value(),
respectively.
The values of SUBST_DICT first have any construction variables
expanded (its keys are not expanded). If a value of SUBST_DICT is
a python callable function, it is called and the result is expanded
as the value. Values are substituted in a "random" order; if any
substitution could be further expanded by another subsitition, it
is unpredictible whether the expansion will occur.
"""
__revision__ = "src/engine/SCons/Tool/textfile.py issue-2856:2676:d23b7a2f45e8 2012/08/05 15:38:28 garyo"
import SCons
import os
import re
from SCons.Node import Node
from SCons.Node.Python import Value
from SCons.Util import is_String, is_Sequence, is_Dict
def _do_subst(node, subs):
"""
Fetch the node contents and replace all instances of the keys with
their values. For example, if subs is
{'%VERSION%': '1.2345', '%BASE%': 'MyProg', '%prefix%': '/bin'},
then all instances of %VERSION% in the file will be replaced with
1.2345 and so forth.
"""
contents = node.get_text_contents()
if not subs: return contents
for (k,v) in subs:
contents = re.sub(k, v, contents)
return contents
def _action(target, source, env):
# prepare the line separator
linesep = env['LINESEPARATOR']
if linesep is None:
linesep = os.linesep
elif is_String(linesep):
pass
elif isinstance(linesep, Value):
linesep = linesep.get_text_contents()
else:
raise SCons.Errors.UserError(
'unexpected type/class for LINESEPARATOR: %s'
% repr(linesep), None)
# create a dictionary to use for the substitutions
if 'SUBST_DICT' not in env:
subs = None # no substitutions
else:
d = env['SUBST_DICT']
if is_Dict(d):
d = list(d.items())
elif is_Sequence(d):
pass
else:
raise SCons.Errors.UserError('SUBST_DICT must be dict or sequence')
subs = []
for (k,v) in d:
if callable(v):
v = v()
if is_String(v):
v = env.subst(v)
else:
v = str(v)
subs.append((k,v))
# write the file
try:
fd = open(target[0].get_path(), "wb")
except (OSError,IOError), e:
raise SCons.Errors.UserError("Can't write target file %s" % target[0])
# separate lines by 'linesep' only if linesep is not empty
lsep = None
for s in source:
if lsep: fd.write(lsep)
fd.write(_do_subst(s, subs))
lsep = linesep
fd.close()
def _strfunc(target, source, env):
return "Creating '%s'" % target[0]
def _convert_list_R(newlist, sources):
for elem in sources:
if is_Sequence(elem):
_convert_list_R(newlist, elem)
elif isinstance(elem, Node):
newlist.append(elem)
else:
newlist.append(Value(elem))
def _convert_list(target, source, env):
if len(target) != 1:
raise SCons.Errors.UserError("Only one target file allowed")
newlist = []
_convert_list_R(newlist, source)
return target, newlist
_common_varlist = ['SUBST_DICT', 'LINESEPARATOR']
_text_varlist = _common_varlist + ['TEXTFILEPREFIX', 'TEXTFILESUFFIX']
_text_builder = SCons.Builder.Builder(
action = SCons.Action.Action(_action, _strfunc, varlist = _text_varlist),
source_factory = Value,
emitter = _convert_list,
prefix = '$TEXTFILEPREFIX',
suffix = '$TEXTFILESUFFIX',
)
_subst_varlist = _common_varlist + ['SUBSTFILEPREFIX', 'TEXTFILESUFFIX']
_subst_builder = SCons.Builder.Builder(
action = SCons.Action.Action(_action, _strfunc, varlist = _subst_varlist),
source_factory = SCons.Node.FS.File,
emitter = _convert_list,
prefix = '$SUBSTFILEPREFIX',
suffix = '$SUBSTFILESUFFIX',
src_suffix = ['.in'],
)
def generate(env):
env['LINESEPARATOR'] = os.linesep
env['BUILDERS']['Textfile'] = _text_builder
env['TEXTFILEPREFIX'] = ''
env['TEXTFILESUFFIX'] = '.txt'
env['BUILDERS']['Substfile'] = _subst_builder
env['SUBSTFILEPREFIX'] = ''
env['SUBSTFILESUFFIX'] = ''
def exists(env):
return 1
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| gpl-3.0 |
Robert904/pymumble | callbacks.py | 3 | 3905 | # -*- coding: utf-8 -*-
from errors import UnknownCallbackError
from constants import *
class CallBacks(dict):
"""
Define the callbacks that can be registered by the application.
Multiple functions can be assigned to a callback using "add_callback"
The call is done from within the pymumble loop thread, it's important to
keep processing short to avoid delays on audio transmission
"""
def __init__(self):
self.update(
{PYMUMBLE_CLBK_CONNECTED: None, # Connection succeeded
PYMUMBLE_CLBK_CHANNELCREATED: None, # send the created channel object as parameter
PYMUMBLE_CLBK_CHANNELUPDATED: None, # send the updated channel object and a dict with all the modified fields as parameter
PYMUMBLE_CLBK_CHANNELREMOVED: None, # send the removed channel object as parameter
PYMUMBLE_CLBK_USERCREATED: None, # send the added user object as parameter
PYMUMBLE_CLBK_USERUPDATED: None, # send the updated user object and a dict with all the modified fields as parameter
PYMUMBLE_CLBK_USERREMOVED: None, # send the removed user object and the mumble message as parameter
PYMUMBLE_CLBK_SOUNDRECEIVED: None, # send the user object that received the sound and the SoundChunk object itself
PYMUMBLE_CLBK_TEXTMESSAGERECEIVED: None, # Send the received message
} )
def set_callback(self, callback, dest):
"""Define the function to call for a specific callback. Suppress any axisting callback function"""
if callback not in self:
raise UnknownCallbackError("Callback \"%s\" does not exists." % callback)
self[callback] = [dest]
def add_callback(self, callback, dest):
"""Add the function to call for a specific callback."""
if callback not in self:
raise UnknownCallbackError("Callback \"%s\" does not exists." % callback)
if self[callback] is None:
self[callback] = list()
self[callback].append(dest)
def get_callback(self, callback):
"""Get the functions assigned to a callback as a list. Return None if no callback defined"""
if callback not in self:
raise UnknownCallbackError("Callback \"%s\" does not exists." % callback)
return self[callback]
def remove_callback(self, callback, dest):
"""Remove a specific function from a specific callback. Function object must be the one added before."""
if callback not in self:
raise UnknownCallbackError("Callback \"%s\" does not exists." % callback)
if self[callback] is None or dest not in self[callback]:
raise UnknownCallbackError("Function not registered for callback \"%s\"." % callback)
self[callback].remove(dest)
if len(self[callback]) == 0:
self[callback] = None
def reset_callback(self, callback):
"""remove functions for a defined callback"""
if callback not in self:
raise UnknownCallbackError("Callback \"%s\" does not exists." % callback)
self[callback] = None
def call_callback(self, callback, *pos_parameters):
"""Call all the registered function for a specific callback."""
if callback not in self:
raise UnknownCallbackError("Callback \"%s\" does not exists." % callback)
if self[callback]:
for func in self[callback]:
func(*pos_parameters)
def __call__(self, callback, *pos_parameters):
"""shortcut to be able to call the dict element as a function"""
self.call_callback(callback, *pos_parameters)
def get_callbacks_list(self):
"""Get a list of all callbacks"""
return self.keys()
| gpl-3.0 |
Xowap/ansible | lib/ansible/utils/vars.py | 55 | 2382 | # (c) 2012-2014, Michael DeHaan <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible import constants as C
from ansible.parsing.splitter import parse_kv
from ansible.utils.unicode import to_unicode
def combine_vars(a, b):
if C.DEFAULT_HASH_BEHAVIOUR == "merge":
return merge_hash(a, b)
else:
return dict(a.items() + b.items())
def merge_hash(a, b):
''' recursively merges hash b into a
keys from b take precedence over keys from a '''
result = {}
for dicts in a, b:
# next, iterate over b keys and values
for k, v in dicts.iteritems():
# if there's already such key in a
# and that key contains dict
if k in result and isinstance(result[k], dict):
# merge those dicts recursively
result[k] = merge_hash(a[k], v)
else:
# otherwise, just copy a value from b to a
result[k] = v
return result
def load_extra_vars(loader, options):
extra_vars = {}
for extra_vars_opt in options.extra_vars:
extra_vars_opt = to_unicode(extra_vars_opt, errors='strict')
if extra_vars_opt.startswith(u"@"):
# Argument is a YAML file (JSON is a subset of YAML)
data = loader.load_from_file(extra_vars_opt[1:])
elif extra_vars_opt and extra_vars_opt[0] in u'[{':
# Arguments as YAML
data = loader.load(extra_vars_opt)
else:
# Arguments as Key-value
data = parse_kv(extra_vars_opt)
extra_vars = combine_vars(extra_vars, data)
return extra_vars
| gpl-3.0 |
mims2707/bite-project | deps/mrtaskman/server/mapreduce/lib/graphy/pie_chart.py | 77 | 6233 | #!/usr/bin/env python
#
# Copyright 2008 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Code for pie charts."""
import warnings
from mapreduce.lib.graphy import common
from mapreduce.lib.graphy import util
class Segment(common.DataSeries):
"""A single segment of the pie chart.
Object attributes:
size: relative size of the segment
label: label of the segment (if any)
color: color of the segment (if any)
"""
def __init__(self, size, label=None, color=None):
if label is not None and util._IsColor(label):
warnings.warn('Your code may be broken! '
'Label looks like a hex triplet; it might be a color. '
'The old argument order (color before label) is '
'deprecated.',
DeprecationWarning, stacklevel=2)
style = common._BasicStyle(color)
super(Segment, self).__init__([size], label=label, style=style)
assert size >= 0
def _GetSize(self):
return self.data[0]
def _SetSize(self, value):
assert value >= 0
self.data[0] = value
size = property(_GetSize, _SetSize,
doc = """The relative size of this pie segment.""")
# Since Segments are so simple, provide color for convenience.
def _GetColor(self):
return self.style.color
def _SetColor(self, color):
self.style.color = color
color = property(_GetColor, _SetColor,
doc = """The color of this pie segment.""")
class PieChart(common.BaseChart):
"""Represents a pie chart.
The pie chart consists of a single "pie" by default, but additional pies
may be added using the AddPie method. The Google Chart API will display
the pies as concentric circles, with pie #0 on the inside; other backends
may display the pies differently.
"""
def __init__(self, points=None, labels=None, colors=None):
"""Constructor for PieChart objects.
Creates a pie chart with a single pie.
Args:
points: A list of data points for the pie chart;
i.e., relative sizes of the pie segments
labels: A list of labels for the pie segments.
TODO: Allow the user to pass in None as one of
the labels in order to skip that label.
colors: A list of colors for the pie segments, as hex strings
(f.ex. '0000ff' for blue). If there are less colors than pie
segments, the Google Chart API will attempt to produce a smooth
color transition between segments by spreading the colors across
them.
"""
super(PieChart, self).__init__()
self.formatters = []
self._colors = None
if points:
self.AddPie(points, labels, colors)
def AddPie(self, points, labels=None, colors=None):
"""Add a whole pie to the chart.
Args:
points: A list of pie segment sizes
labels: A list of labels for the pie segments
colors: A list of colors for the segments. Missing colors will be chosen
automatically.
Return:
The index of the newly added pie.
"""
num_colors = len(colors or [])
num_labels = len(labels or [])
pie_index = len(self.data)
self.data.append([])
for i, pt in enumerate(points):
label = None
if i < num_labels:
label = labels[i]
color = None
if i < num_colors:
color = colors[i]
self.AddSegment(pt, label=label, color=color, pie_index=pie_index)
return pie_index
def AddSegments(self, points, labels, colors):
"""DEPRECATED."""
warnings.warn('PieChart.AddSegments is deprecated. Call AddPie instead. ',
DeprecationWarning, stacklevel=2)
num_colors = len(colors or [])
for i, pt in enumerate(points):
assert pt >= 0
label = labels[i]
color = None
if i < num_colors:
color = colors[i]
self.AddSegment(pt, label=label, color=color)
def AddSegment(self, size, label=None, color=None, pie_index=0):
"""Add a pie segment to this chart, and return the segment.
size: The size of the segment.
label: The label for the segment.
color: The color of the segment, or None to automatically choose the color.
pie_index: The index of the pie that will receive the new segment.
By default, the chart has one pie (pie #0); use the AddPie method to
add more pies.
"""
if isinstance(size, Segment):
warnings.warn("AddSegment(segment) is deprecated. Use AddSegment(size, "
"label, color) instead", DeprecationWarning, stacklevel=2)
segment = size
else:
segment = Segment(size, label=label, color=color)
assert segment.size >= 0
if pie_index == 0 and not self.data:
# Create the default pie
self.data.append([])
assert (pie_index >= 0 and pie_index < len(self.data))
self.data[pie_index].append(segment)
return segment
def AddSeries(self, points, color=None, style=None, markers=None, label=None):
"""DEPRECATED
Add a new segment to the chart and return it.
The segment must contain exactly one data point; all parameters
other than color and label are ignored.
"""
warnings.warn('PieChart.AddSeries is deprecated. Call AddSegment or '
'AddSegments instead.', DeprecationWarning)
return self.AddSegment(Segment(points[0], color=color, label=label))
def SetColors(self, *colors):
"""Change the colors of this chart to the specified list of colors.
Note that this will completely override the individual colors specified
in the pie segments. Missing colors will be interpolated, so that the
list of colors covers all segments in all the pies.
"""
self._colors = colors
| apache-2.0 |
arskom/pyubl | ubl/const/__init__.py | 1 | 1607 | # encoding: utf8
#
# Copyright (c), Arskom Ltd. and pyubl contributors, see CONTRIBUTORS file.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of pyubl nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
import ubl.const.schema | bsd-3-clause |
segmentio/npm | node_modules/node-gyp/gyp/pylib/gyp/MSVSUtil.py | 566 | 9386 | # Copyright (c) 2013 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Utility functions shared amongst the Windows generators."""
import copy
import os
_TARGET_TYPE_EXT = {
'executable': '.exe',
'loadable_module': '.dll',
'shared_library': '.dll',
}
def _GetLargePdbShimCcPath():
"""Returns the path of the large_pdb_shim.cc file."""
this_dir = os.path.abspath(os.path.dirname(__file__))
src_dir = os.path.abspath(os.path.join(this_dir, '..', '..'))
win_data_dir = os.path.join(src_dir, 'data', 'win')
large_pdb_shim_cc = os.path.join(win_data_dir, 'large-pdb-shim.cc')
return large_pdb_shim_cc
def _DeepCopySomeKeys(in_dict, keys):
"""Performs a partial deep-copy on |in_dict|, only copying the keys in |keys|.
Arguments:
in_dict: The dictionary to copy.
keys: The keys to be copied. If a key is in this list and doesn't exist in
|in_dict| this is not an error.
Returns:
The partially deep-copied dictionary.
"""
d = {}
for key in keys:
if key not in in_dict:
continue
d[key] = copy.deepcopy(in_dict[key])
return d
def _SuffixName(name, suffix):
"""Add a suffix to the end of a target.
Arguments:
name: name of the target (foo#target)
suffix: the suffix to be added
Returns:
Target name with suffix added (foo_suffix#target)
"""
parts = name.rsplit('#', 1)
parts[0] = '%s_%s' % (parts[0], suffix)
return '#'.join(parts)
def _ShardName(name, number):
"""Add a shard number to the end of a target.
Arguments:
name: name of the target (foo#target)
number: shard number
Returns:
Target name with shard added (foo_1#target)
"""
return _SuffixName(name, str(number))
def ShardTargets(target_list, target_dicts):
"""Shard some targets apart to work around the linkers limits.
Arguments:
target_list: List of target pairs: 'base/base.gyp:base'.
target_dicts: Dict of target properties keyed on target pair.
Returns:
Tuple of the new sharded versions of the inputs.
"""
# Gather the targets to shard, and how many pieces.
targets_to_shard = {}
for t in target_dicts:
shards = int(target_dicts[t].get('msvs_shard', 0))
if shards:
targets_to_shard[t] = shards
# Shard target_list.
new_target_list = []
for t in target_list:
if t in targets_to_shard:
for i in range(targets_to_shard[t]):
new_target_list.append(_ShardName(t, i))
else:
new_target_list.append(t)
# Shard target_dict.
new_target_dicts = {}
for t in target_dicts:
if t in targets_to_shard:
for i in range(targets_to_shard[t]):
name = _ShardName(t, i)
new_target_dicts[name] = copy.copy(target_dicts[t])
new_target_dicts[name]['target_name'] = _ShardName(
new_target_dicts[name]['target_name'], i)
sources = new_target_dicts[name].get('sources', [])
new_sources = []
for pos in range(i, len(sources), targets_to_shard[t]):
new_sources.append(sources[pos])
new_target_dicts[name]['sources'] = new_sources
else:
new_target_dicts[t] = target_dicts[t]
# Shard dependencies.
for t in new_target_dicts:
dependencies = copy.copy(new_target_dicts[t].get('dependencies', []))
new_dependencies = []
for d in dependencies:
if d in targets_to_shard:
for i in range(targets_to_shard[d]):
new_dependencies.append(_ShardName(d, i))
else:
new_dependencies.append(d)
new_target_dicts[t]['dependencies'] = new_dependencies
return (new_target_list, new_target_dicts)
def _GetPdbPath(target_dict, config_name, vars):
"""Returns the path to the PDB file that will be generated by a given
configuration.
The lookup proceeds as follows:
- Look for an explicit path in the VCLinkerTool configuration block.
- Look for an 'msvs_large_pdb_path' variable.
- Use '<(PRODUCT_DIR)/<(product_name).(exe|dll).pdb' if 'product_name' is
specified.
- Use '<(PRODUCT_DIR)/<(target_name).(exe|dll).pdb'.
Arguments:
target_dict: The target dictionary to be searched.
config_name: The name of the configuration of interest.
vars: A dictionary of common GYP variables with generator-specific values.
Returns:
The path of the corresponding PDB file.
"""
config = target_dict['configurations'][config_name]
msvs = config.setdefault('msvs_settings', {})
linker = msvs.get('VCLinkerTool', {})
pdb_path = linker.get('ProgramDatabaseFile')
if pdb_path:
return pdb_path
variables = target_dict.get('variables', {})
pdb_path = variables.get('msvs_large_pdb_path', None)
if pdb_path:
return pdb_path
pdb_base = target_dict.get('product_name', target_dict['target_name'])
pdb_base = '%s%s.pdb' % (pdb_base, _TARGET_TYPE_EXT[target_dict['type']])
pdb_path = vars['PRODUCT_DIR'] + '/' + pdb_base
return pdb_path
def InsertLargePdbShims(target_list, target_dicts, vars):
"""Insert a shim target that forces the linker to use 4KB pagesize PDBs.
This is a workaround for targets with PDBs greater than 1GB in size, the
limit for the 1KB pagesize PDBs created by the linker by default.
Arguments:
target_list: List of target pairs: 'base/base.gyp:base'.
target_dicts: Dict of target properties keyed on target pair.
vars: A dictionary of common GYP variables with generator-specific values.
Returns:
Tuple of the shimmed version of the inputs.
"""
# Determine which targets need shimming.
targets_to_shim = []
for t in target_dicts:
target_dict = target_dicts[t]
# We only want to shim targets that have msvs_large_pdb enabled.
if not int(target_dict.get('msvs_large_pdb', 0)):
continue
# This is intended for executable, shared_library and loadable_module
# targets where every configuration is set up to produce a PDB output.
# If any of these conditions is not true then the shim logic will fail
# below.
targets_to_shim.append(t)
large_pdb_shim_cc = _GetLargePdbShimCcPath()
for t in targets_to_shim:
target_dict = target_dicts[t]
target_name = target_dict.get('target_name')
base_dict = _DeepCopySomeKeys(target_dict,
['configurations', 'default_configuration', 'toolset'])
# This is the dict for copying the source file (part of the GYP tree)
# to the intermediate directory of the project. This is necessary because
# we can't always build a relative path to the shim source file (on Windows
# GYP and the project may be on different drives), and Ninja hates absolute
# paths (it ends up generating the .obj and .obj.d alongside the source
# file, polluting GYPs tree).
copy_suffix = 'large_pdb_copy'
copy_target_name = target_name + '_' + copy_suffix
full_copy_target_name = _SuffixName(t, copy_suffix)
shim_cc_basename = os.path.basename(large_pdb_shim_cc)
shim_cc_dir = vars['SHARED_INTERMEDIATE_DIR'] + '/' + copy_target_name
shim_cc_path = shim_cc_dir + '/' + shim_cc_basename
copy_dict = copy.deepcopy(base_dict)
copy_dict['target_name'] = copy_target_name
copy_dict['type'] = 'none'
copy_dict['sources'] = [ large_pdb_shim_cc ]
copy_dict['copies'] = [{
'destination': shim_cc_dir,
'files': [ large_pdb_shim_cc ]
}]
# This is the dict for the PDB generating shim target. It depends on the
# copy target.
shim_suffix = 'large_pdb_shim'
shim_target_name = target_name + '_' + shim_suffix
full_shim_target_name = _SuffixName(t, shim_suffix)
shim_dict = copy.deepcopy(base_dict)
shim_dict['target_name'] = shim_target_name
shim_dict['type'] = 'static_library'
shim_dict['sources'] = [ shim_cc_path ]
shim_dict['dependencies'] = [ full_copy_target_name ]
# Set up the shim to output its PDB to the same location as the final linker
# target.
for config_name, config in shim_dict.get('configurations').iteritems():
pdb_path = _GetPdbPath(target_dict, config_name, vars)
# A few keys that we don't want to propagate.
for key in ['msvs_precompiled_header', 'msvs_precompiled_source', 'test']:
config.pop(key, None)
msvs = config.setdefault('msvs_settings', {})
# Update the compiler directives in the shim target.
compiler = msvs.setdefault('VCCLCompilerTool', {})
compiler['DebugInformationFormat'] = '3'
compiler['ProgramDataBaseFileName'] = pdb_path
# Set the explicit PDB path in the appropriate configuration of the
# original target.
config = target_dict['configurations'][config_name]
msvs = config.setdefault('msvs_settings', {})
linker = msvs.setdefault('VCLinkerTool', {})
linker['GenerateDebugInformation'] = 'true'
linker['ProgramDatabaseFile'] = pdb_path
# Add the new targets. They must go to the beginning of the list so that
# the dependency generation works as expected in ninja.
target_list.insert(0, full_copy_target_name)
target_list.insert(0, full_shim_target_name)
target_dicts[full_copy_target_name] = copy_dict
target_dicts[full_shim_target_name] = shim_dict
# Update the original target to depend on the shim target.
target_dict.setdefault('dependencies', []).append(full_shim_target_name)
return (target_list, target_dicts) | artistic-2.0 |
oribaldi/graphite-intro | examples/example_graphite_pickle.py | 1 | 2824 | #!/usr/bin/python
"""
An application example that sends app metrics
to Graphite through the Pickle protocol.
Based on the example:
https://github.com/graphite-project/carbon/blob/master/examples/example-pickle-client.py
Author: Oriana Baldizan
Date: 21.12.16
"""
import re
import sys
import time
import socket
import pickle
import struct
import random
DELAY = 30
CARBON_SERVER = '127.0.0.1'
CARBON_PICKLE_PORT = 2004
def get_random_load():
""" Generates random load value """
return random.sample(xrange(10,300), 3)
def get_memcache(gsock):
""" """
data = []
lines = []
timestamp = int(time.time())
for line in open('/proc/meminfo').readlines():
bits = line.split()
# We dont care about the pages.
if len(bits) == 2:
continue
# remove the : from the metric name
metric = bits[0]
metric = metric.replace(':', '')
# Covert the default kb into mb
value = int(bits[1])
value = value / 1024
data.append(("testapp." + metric, (timestamp, value)))
lines.append("testapp.%s %d %d" % (metric, value, timestamp))
message = '\n'.join(lines) + '\n'
print "Sending metrics to Graphite ..."
print message
# Send metrics
package = pickle.dumps(data, 2)
header = struct.pack('!L', len(package))
gsock.sendall(header + package)
def run_app(gsock):
""" Starts the app and metrics collection """
message = ""
while True:
now = int(time.time())
tuples = []
lines = []
# Gather metrics
load = get_random_load()
for u in xrange(1, 5):
# Format: (metric_name, (timestamp, value))
tuples.append( ('testapp.count', (now, u)) )
lines.append("testapp.count %d %d" % (u, now))
message = '\n'.join(lines) + '\n'
print "Sending metrics to Graphite ..."
print message
# Send metrics
package = pickle.dumps(tuples)
header = struct.pack('!L', len(package))
gsock.sendall(header + package)
time.sleep(DELAY)
def main():
""" Starts the app and its connection with Graphite """
# Open Graphite connection
gsock = socket.socket()
try:
gsock.connect( (CARBON_SERVER, CARBON_PICKLE_PORT) )
except socket.error:
# Check if carbon-cache.py is running
raise SystemExit("Couldn't connect to %(server)s on port %(port)s" % {'server': CARBON_SERVER, 'port': CARBON_PICKLE_PORT})
try:
run_app(gsock)
#get_memcache(gsock)
except KeyboardInterrupt:
gsock.close()
sys.stderr.write("\nExiting on CTRL-c\n")
sys.exit(0)
if __name__ == "__main__":
main() | mit |
nealtodd/django | tests/messages_tests/urls.py | 320 | 2569 | from django import forms
from django.conf.urls import url
from django.contrib import messages
from django.contrib.messages.views import SuccessMessageMixin
from django.core.urlresolvers import reverse
from django.http import HttpResponse, HttpResponseRedirect
from django.template import engines
from django.template.response import TemplateResponse
from django.views.decorators.cache import never_cache
from django.views.generic.edit import FormView
TEMPLATE = """{% if messages %}
<ul class="messages">
{% for message in messages %}
<li{% if message.tags %} class="{{ message.tags }}"{% endif %}>
{{ message }}
</li>
{% endfor %}
</ul>
{% endif %}
"""
@never_cache
def add(request, message_type):
# don't default to False here, because we want to test that it defaults
# to False if unspecified
fail_silently = request.POST.get('fail_silently', None)
for msg in request.POST.getlist('messages'):
if fail_silently is not None:
getattr(messages, message_type)(request, msg,
fail_silently=fail_silently)
else:
getattr(messages, message_type)(request, msg)
show_url = reverse('show_message')
return HttpResponseRedirect(show_url)
@never_cache
def add_template_response(request, message_type):
for msg in request.POST.getlist('messages'):
getattr(messages, message_type)(request, msg)
show_url = reverse('show_template_response')
return HttpResponseRedirect(show_url)
@never_cache
def show(request):
template = engines['django'].from_string(TEMPLATE)
return HttpResponse(template.render(request=request))
@never_cache
def show_template_response(request):
template = engines['django'].from_string(TEMPLATE)
return TemplateResponse(request, template)
class ContactForm(forms.Form):
name = forms.CharField(required=True)
slug = forms.SlugField(required=True)
class ContactFormViewWithMsg(SuccessMessageMixin, FormView):
form_class = ContactForm
success_url = show
success_message = "%(name)s was created successfully"
urlpatterns = [
url('^add/(debug|info|success|warning|error)/$', add, name='add_message'),
url('^add/msg/$', ContactFormViewWithMsg.as_view(), name='add_success_msg'),
url('^show/$', show, name='show_message'),
url('^template_response/add/(debug|info|success|warning|error)/$',
add_template_response, name='add_template_response'),
url('^template_response/show/$', show_template_response, name='show_template_response'),
]
| bsd-3-clause |
fkolacek/FIT-VUT | bp-revok/python/lib/python2.7/encodings/cp1026.py | 593 | 13369 | """ Python Character Mapping Codec cp1026 generated from 'MAPPINGS/VENDORS/MICSFT/EBCDIC/CP1026.TXT' with gencodec.py.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_table)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_table)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='cp1026',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Table
decoding_table = (
u'\x00' # 0x00 -> NULL
u'\x01' # 0x01 -> START OF HEADING
u'\x02' # 0x02 -> START OF TEXT
u'\x03' # 0x03 -> END OF TEXT
u'\x9c' # 0x04 -> CONTROL
u'\t' # 0x05 -> HORIZONTAL TABULATION
u'\x86' # 0x06 -> CONTROL
u'\x7f' # 0x07 -> DELETE
u'\x97' # 0x08 -> CONTROL
u'\x8d' # 0x09 -> CONTROL
u'\x8e' # 0x0A -> CONTROL
u'\x0b' # 0x0B -> VERTICAL TABULATION
u'\x0c' # 0x0C -> FORM FEED
u'\r' # 0x0D -> CARRIAGE RETURN
u'\x0e' # 0x0E -> SHIFT OUT
u'\x0f' # 0x0F -> SHIFT IN
u'\x10' # 0x10 -> DATA LINK ESCAPE
u'\x11' # 0x11 -> DEVICE CONTROL ONE
u'\x12' # 0x12 -> DEVICE CONTROL TWO
u'\x13' # 0x13 -> DEVICE CONTROL THREE
u'\x9d' # 0x14 -> CONTROL
u'\x85' # 0x15 -> CONTROL
u'\x08' # 0x16 -> BACKSPACE
u'\x87' # 0x17 -> CONTROL
u'\x18' # 0x18 -> CANCEL
u'\x19' # 0x19 -> END OF MEDIUM
u'\x92' # 0x1A -> CONTROL
u'\x8f' # 0x1B -> CONTROL
u'\x1c' # 0x1C -> FILE SEPARATOR
u'\x1d' # 0x1D -> GROUP SEPARATOR
u'\x1e' # 0x1E -> RECORD SEPARATOR
u'\x1f' # 0x1F -> UNIT SEPARATOR
u'\x80' # 0x20 -> CONTROL
u'\x81' # 0x21 -> CONTROL
u'\x82' # 0x22 -> CONTROL
u'\x83' # 0x23 -> CONTROL
u'\x84' # 0x24 -> CONTROL
u'\n' # 0x25 -> LINE FEED
u'\x17' # 0x26 -> END OF TRANSMISSION BLOCK
u'\x1b' # 0x27 -> ESCAPE
u'\x88' # 0x28 -> CONTROL
u'\x89' # 0x29 -> CONTROL
u'\x8a' # 0x2A -> CONTROL
u'\x8b' # 0x2B -> CONTROL
u'\x8c' # 0x2C -> CONTROL
u'\x05' # 0x2D -> ENQUIRY
u'\x06' # 0x2E -> ACKNOWLEDGE
u'\x07' # 0x2F -> BELL
u'\x90' # 0x30 -> CONTROL
u'\x91' # 0x31 -> CONTROL
u'\x16' # 0x32 -> SYNCHRONOUS IDLE
u'\x93' # 0x33 -> CONTROL
u'\x94' # 0x34 -> CONTROL
u'\x95' # 0x35 -> CONTROL
u'\x96' # 0x36 -> CONTROL
u'\x04' # 0x37 -> END OF TRANSMISSION
u'\x98' # 0x38 -> CONTROL
u'\x99' # 0x39 -> CONTROL
u'\x9a' # 0x3A -> CONTROL
u'\x9b' # 0x3B -> CONTROL
u'\x14' # 0x3C -> DEVICE CONTROL FOUR
u'\x15' # 0x3D -> NEGATIVE ACKNOWLEDGE
u'\x9e' # 0x3E -> CONTROL
u'\x1a' # 0x3F -> SUBSTITUTE
u' ' # 0x40 -> SPACE
u'\xa0' # 0x41 -> NO-BREAK SPACE
u'\xe2' # 0x42 -> LATIN SMALL LETTER A WITH CIRCUMFLEX
u'\xe4' # 0x43 -> LATIN SMALL LETTER A WITH DIAERESIS
u'\xe0' # 0x44 -> LATIN SMALL LETTER A WITH GRAVE
u'\xe1' # 0x45 -> LATIN SMALL LETTER A WITH ACUTE
u'\xe3' # 0x46 -> LATIN SMALL LETTER A WITH TILDE
u'\xe5' # 0x47 -> LATIN SMALL LETTER A WITH RING ABOVE
u'{' # 0x48 -> LEFT CURLY BRACKET
u'\xf1' # 0x49 -> LATIN SMALL LETTER N WITH TILDE
u'\xc7' # 0x4A -> LATIN CAPITAL LETTER C WITH CEDILLA
u'.' # 0x4B -> FULL STOP
u'<' # 0x4C -> LESS-THAN SIGN
u'(' # 0x4D -> LEFT PARENTHESIS
u'+' # 0x4E -> PLUS SIGN
u'!' # 0x4F -> EXCLAMATION MARK
u'&' # 0x50 -> AMPERSAND
u'\xe9' # 0x51 -> LATIN SMALL LETTER E WITH ACUTE
u'\xea' # 0x52 -> LATIN SMALL LETTER E WITH CIRCUMFLEX
u'\xeb' # 0x53 -> LATIN SMALL LETTER E WITH DIAERESIS
u'\xe8' # 0x54 -> LATIN SMALL LETTER E WITH GRAVE
u'\xed' # 0x55 -> LATIN SMALL LETTER I WITH ACUTE
u'\xee' # 0x56 -> LATIN SMALL LETTER I WITH CIRCUMFLEX
u'\xef' # 0x57 -> LATIN SMALL LETTER I WITH DIAERESIS
u'\xec' # 0x58 -> LATIN SMALL LETTER I WITH GRAVE
u'\xdf' # 0x59 -> LATIN SMALL LETTER SHARP S (GERMAN)
u'\u011e' # 0x5A -> LATIN CAPITAL LETTER G WITH BREVE
u'\u0130' # 0x5B -> LATIN CAPITAL LETTER I WITH DOT ABOVE
u'*' # 0x5C -> ASTERISK
u')' # 0x5D -> RIGHT PARENTHESIS
u';' # 0x5E -> SEMICOLON
u'^' # 0x5F -> CIRCUMFLEX ACCENT
u'-' # 0x60 -> HYPHEN-MINUS
u'/' # 0x61 -> SOLIDUS
u'\xc2' # 0x62 -> LATIN CAPITAL LETTER A WITH CIRCUMFLEX
u'\xc4' # 0x63 -> LATIN CAPITAL LETTER A WITH DIAERESIS
u'\xc0' # 0x64 -> LATIN CAPITAL LETTER A WITH GRAVE
u'\xc1' # 0x65 -> LATIN CAPITAL LETTER A WITH ACUTE
u'\xc3' # 0x66 -> LATIN CAPITAL LETTER A WITH TILDE
u'\xc5' # 0x67 -> LATIN CAPITAL LETTER A WITH RING ABOVE
u'[' # 0x68 -> LEFT SQUARE BRACKET
u'\xd1' # 0x69 -> LATIN CAPITAL LETTER N WITH TILDE
u'\u015f' # 0x6A -> LATIN SMALL LETTER S WITH CEDILLA
u',' # 0x6B -> COMMA
u'%' # 0x6C -> PERCENT SIGN
u'_' # 0x6D -> LOW LINE
u'>' # 0x6E -> GREATER-THAN SIGN
u'?' # 0x6F -> QUESTION MARK
u'\xf8' # 0x70 -> LATIN SMALL LETTER O WITH STROKE
u'\xc9' # 0x71 -> LATIN CAPITAL LETTER E WITH ACUTE
u'\xca' # 0x72 -> LATIN CAPITAL LETTER E WITH CIRCUMFLEX
u'\xcb' # 0x73 -> LATIN CAPITAL LETTER E WITH DIAERESIS
u'\xc8' # 0x74 -> LATIN CAPITAL LETTER E WITH GRAVE
u'\xcd' # 0x75 -> LATIN CAPITAL LETTER I WITH ACUTE
u'\xce' # 0x76 -> LATIN CAPITAL LETTER I WITH CIRCUMFLEX
u'\xcf' # 0x77 -> LATIN CAPITAL LETTER I WITH DIAERESIS
u'\xcc' # 0x78 -> LATIN CAPITAL LETTER I WITH GRAVE
u'\u0131' # 0x79 -> LATIN SMALL LETTER DOTLESS I
u':' # 0x7A -> COLON
u'\xd6' # 0x7B -> LATIN CAPITAL LETTER O WITH DIAERESIS
u'\u015e' # 0x7C -> LATIN CAPITAL LETTER S WITH CEDILLA
u"'" # 0x7D -> APOSTROPHE
u'=' # 0x7E -> EQUALS SIGN
u'\xdc' # 0x7F -> LATIN CAPITAL LETTER U WITH DIAERESIS
u'\xd8' # 0x80 -> LATIN CAPITAL LETTER O WITH STROKE
u'a' # 0x81 -> LATIN SMALL LETTER A
u'b' # 0x82 -> LATIN SMALL LETTER B
u'c' # 0x83 -> LATIN SMALL LETTER C
u'd' # 0x84 -> LATIN SMALL LETTER D
u'e' # 0x85 -> LATIN SMALL LETTER E
u'f' # 0x86 -> LATIN SMALL LETTER F
u'g' # 0x87 -> LATIN SMALL LETTER G
u'h' # 0x88 -> LATIN SMALL LETTER H
u'i' # 0x89 -> LATIN SMALL LETTER I
u'\xab' # 0x8A -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
u'\xbb' # 0x8B -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
u'}' # 0x8C -> RIGHT CURLY BRACKET
u'`' # 0x8D -> GRAVE ACCENT
u'\xa6' # 0x8E -> BROKEN BAR
u'\xb1' # 0x8F -> PLUS-MINUS SIGN
u'\xb0' # 0x90 -> DEGREE SIGN
u'j' # 0x91 -> LATIN SMALL LETTER J
u'k' # 0x92 -> LATIN SMALL LETTER K
u'l' # 0x93 -> LATIN SMALL LETTER L
u'm' # 0x94 -> LATIN SMALL LETTER M
u'n' # 0x95 -> LATIN SMALL LETTER N
u'o' # 0x96 -> LATIN SMALL LETTER O
u'p' # 0x97 -> LATIN SMALL LETTER P
u'q' # 0x98 -> LATIN SMALL LETTER Q
u'r' # 0x99 -> LATIN SMALL LETTER R
u'\xaa' # 0x9A -> FEMININE ORDINAL INDICATOR
u'\xba' # 0x9B -> MASCULINE ORDINAL INDICATOR
u'\xe6' # 0x9C -> LATIN SMALL LIGATURE AE
u'\xb8' # 0x9D -> CEDILLA
u'\xc6' # 0x9E -> LATIN CAPITAL LIGATURE AE
u'\xa4' # 0x9F -> CURRENCY SIGN
u'\xb5' # 0xA0 -> MICRO SIGN
u'\xf6' # 0xA1 -> LATIN SMALL LETTER O WITH DIAERESIS
u's' # 0xA2 -> LATIN SMALL LETTER S
u't' # 0xA3 -> LATIN SMALL LETTER T
u'u' # 0xA4 -> LATIN SMALL LETTER U
u'v' # 0xA5 -> LATIN SMALL LETTER V
u'w' # 0xA6 -> LATIN SMALL LETTER W
u'x' # 0xA7 -> LATIN SMALL LETTER X
u'y' # 0xA8 -> LATIN SMALL LETTER Y
u'z' # 0xA9 -> LATIN SMALL LETTER Z
u'\xa1' # 0xAA -> INVERTED EXCLAMATION MARK
u'\xbf' # 0xAB -> INVERTED QUESTION MARK
u']' # 0xAC -> RIGHT SQUARE BRACKET
u'$' # 0xAD -> DOLLAR SIGN
u'@' # 0xAE -> COMMERCIAL AT
u'\xae' # 0xAF -> REGISTERED SIGN
u'\xa2' # 0xB0 -> CENT SIGN
u'\xa3' # 0xB1 -> POUND SIGN
u'\xa5' # 0xB2 -> YEN SIGN
u'\xb7' # 0xB3 -> MIDDLE DOT
u'\xa9' # 0xB4 -> COPYRIGHT SIGN
u'\xa7' # 0xB5 -> SECTION SIGN
u'\xb6' # 0xB6 -> PILCROW SIGN
u'\xbc' # 0xB7 -> VULGAR FRACTION ONE QUARTER
u'\xbd' # 0xB8 -> VULGAR FRACTION ONE HALF
u'\xbe' # 0xB9 -> VULGAR FRACTION THREE QUARTERS
u'\xac' # 0xBA -> NOT SIGN
u'|' # 0xBB -> VERTICAL LINE
u'\xaf' # 0xBC -> MACRON
u'\xa8' # 0xBD -> DIAERESIS
u'\xb4' # 0xBE -> ACUTE ACCENT
u'\xd7' # 0xBF -> MULTIPLICATION SIGN
u'\xe7' # 0xC0 -> LATIN SMALL LETTER C WITH CEDILLA
u'A' # 0xC1 -> LATIN CAPITAL LETTER A
u'B' # 0xC2 -> LATIN CAPITAL LETTER B
u'C' # 0xC3 -> LATIN CAPITAL LETTER C
u'D' # 0xC4 -> LATIN CAPITAL LETTER D
u'E' # 0xC5 -> LATIN CAPITAL LETTER E
u'F' # 0xC6 -> LATIN CAPITAL LETTER F
u'G' # 0xC7 -> LATIN CAPITAL LETTER G
u'H' # 0xC8 -> LATIN CAPITAL LETTER H
u'I' # 0xC9 -> LATIN CAPITAL LETTER I
u'\xad' # 0xCA -> SOFT HYPHEN
u'\xf4' # 0xCB -> LATIN SMALL LETTER O WITH CIRCUMFLEX
u'~' # 0xCC -> TILDE
u'\xf2' # 0xCD -> LATIN SMALL LETTER O WITH GRAVE
u'\xf3' # 0xCE -> LATIN SMALL LETTER O WITH ACUTE
u'\xf5' # 0xCF -> LATIN SMALL LETTER O WITH TILDE
u'\u011f' # 0xD0 -> LATIN SMALL LETTER G WITH BREVE
u'J' # 0xD1 -> LATIN CAPITAL LETTER J
u'K' # 0xD2 -> LATIN CAPITAL LETTER K
u'L' # 0xD3 -> LATIN CAPITAL LETTER L
u'M' # 0xD4 -> LATIN CAPITAL LETTER M
u'N' # 0xD5 -> LATIN CAPITAL LETTER N
u'O' # 0xD6 -> LATIN CAPITAL LETTER O
u'P' # 0xD7 -> LATIN CAPITAL LETTER P
u'Q' # 0xD8 -> LATIN CAPITAL LETTER Q
u'R' # 0xD9 -> LATIN CAPITAL LETTER R
u'\xb9' # 0xDA -> SUPERSCRIPT ONE
u'\xfb' # 0xDB -> LATIN SMALL LETTER U WITH CIRCUMFLEX
u'\\' # 0xDC -> REVERSE SOLIDUS
u'\xf9' # 0xDD -> LATIN SMALL LETTER U WITH GRAVE
u'\xfa' # 0xDE -> LATIN SMALL LETTER U WITH ACUTE
u'\xff' # 0xDF -> LATIN SMALL LETTER Y WITH DIAERESIS
u'\xfc' # 0xE0 -> LATIN SMALL LETTER U WITH DIAERESIS
u'\xf7' # 0xE1 -> DIVISION SIGN
u'S' # 0xE2 -> LATIN CAPITAL LETTER S
u'T' # 0xE3 -> LATIN CAPITAL LETTER T
u'U' # 0xE4 -> LATIN CAPITAL LETTER U
u'V' # 0xE5 -> LATIN CAPITAL LETTER V
u'W' # 0xE6 -> LATIN CAPITAL LETTER W
u'X' # 0xE7 -> LATIN CAPITAL LETTER X
u'Y' # 0xE8 -> LATIN CAPITAL LETTER Y
u'Z' # 0xE9 -> LATIN CAPITAL LETTER Z
u'\xb2' # 0xEA -> SUPERSCRIPT TWO
u'\xd4' # 0xEB -> LATIN CAPITAL LETTER O WITH CIRCUMFLEX
u'#' # 0xEC -> NUMBER SIGN
u'\xd2' # 0xED -> LATIN CAPITAL LETTER O WITH GRAVE
u'\xd3' # 0xEE -> LATIN CAPITAL LETTER O WITH ACUTE
u'\xd5' # 0xEF -> LATIN CAPITAL LETTER O WITH TILDE
u'0' # 0xF0 -> DIGIT ZERO
u'1' # 0xF1 -> DIGIT ONE
u'2' # 0xF2 -> DIGIT TWO
u'3' # 0xF3 -> DIGIT THREE
u'4' # 0xF4 -> DIGIT FOUR
u'5' # 0xF5 -> DIGIT FIVE
u'6' # 0xF6 -> DIGIT SIX
u'7' # 0xF7 -> DIGIT SEVEN
u'8' # 0xF8 -> DIGIT EIGHT
u'9' # 0xF9 -> DIGIT NINE
u'\xb3' # 0xFA -> SUPERSCRIPT THREE
u'\xdb' # 0xFB -> LATIN CAPITAL LETTER U WITH CIRCUMFLEX
u'"' # 0xFC -> QUOTATION MARK
u'\xd9' # 0xFD -> LATIN CAPITAL LETTER U WITH GRAVE
u'\xda' # 0xFE -> LATIN CAPITAL LETTER U WITH ACUTE
u'\x9f' # 0xFF -> CONTROL
)
### Encoding table
encoding_table=codecs.charmap_build(decoding_table)
| apache-2.0 |
mlaitinen/odoo | addons/website/tests/test_converter.py | 280 | 8947 | # -*- coding: utf-8 -*-
import textwrap
import unittest2
from lxml import etree, html
from lxml.builder import E
from openerp.tests import common
from openerp.addons.base.ir import ir_qweb
from openerp.addons.website.models.ir_qweb import html_to_text
from openerp.addons.website.models.website import slugify, unslug
class TestUnslug(unittest2.TestCase):
def test_unslug(self):
tests = {
'': (None, None),
'foo': (None, None),
'foo-': (None, None),
'-': (None, None),
'foo-1': ('foo', 1),
'foo-bar-1': ('foo-bar', 1),
'foo--1': ('foo', -1),
'1': (None, 1),
'1-1': ('1', 1),
'--1': (None, None),
'foo---1': (None, None),
'foo1': (None, None),
}
for slug, expected in tests.iteritems():
self.assertEqual(unslug(slug), expected)
class TestHTMLToText(unittest2.TestCase):
def test_rawstring(self):
self.assertEqual(
"foobar",
html_to_text(E.div("foobar")))
def test_br(self):
self.assertEqual(
"foo\nbar",
html_to_text(E.div("foo", E.br(), "bar")))
self.assertEqual(
"foo\n\nbar\nbaz",
html_to_text(E.div(
"foo", E.br(), E.br(),
"bar", E.br(),
"baz")))
def test_p(self):
self.assertEqual(
"foo\n\nbar\n\nbaz",
html_to_text(E.div(
"foo",
E.p("bar"),
"baz")))
self.assertEqual(
"foo",
html_to_text(E.div(E.p("foo"))))
self.assertEqual(
"foo\n\nbar",
html_to_text(E.div("foo", E.p("bar"))))
self.assertEqual(
"foo\n\nbar",
html_to_text(E.div(E.p("foo"), "bar")))
self.assertEqual(
"foo\n\nbar\n\nbaz",
html_to_text(E.div(
E.p("foo"),
E.p("bar"),
E.p("baz"),
)))
def test_div(self):
self.assertEqual(
"foo\nbar\nbaz",
html_to_text(E.div(
"foo",
E.div("bar"),
"baz"
)))
self.assertEqual(
"foo",
html_to_text(E.div(E.div("foo"))))
self.assertEqual(
"foo\nbar",
html_to_text(E.div("foo", E.div("bar"))))
self.assertEqual(
"foo\nbar",
html_to_text(E.div(E.div("foo"), "bar")))
self.assertEqual(
"foo\nbar\nbaz",
html_to_text(E.div(
"foo",
E.div("bar"),
E.div("baz")
)))
def test_other_block(self):
self.assertEqual(
"foo\nbar\nbaz",
html_to_text(E.div(
"foo",
E.section("bar"),
"baz"
)))
def test_inline(self):
self.assertEqual(
"foobarbaz",
html_to_text(E.div("foo", E.span("bar"), "baz")))
def test_whitespace(self):
self.assertEqual(
"foo bar\nbaz",
html_to_text(E.div(
"foo\nbar",
E.br(),
"baz")
))
self.assertEqual(
"foo bar\nbaz",
html_to_text(E.div(
E.div(E.span("foo"), " bar"),
"baz")))
class TestConvertBack(common.TransactionCase):
def setUp(self):
super(TestConvertBack, self).setUp()
def field_rountrip_result(self, field, value, expected):
model = 'website.converter.test'
Model = self.registry(model)
id = Model.create(
self.cr, self.uid, {
field: value
})
[record] = Model.browse(self.cr, self.uid, [id])
e = etree.Element('span')
field_value = 'record.%s' % field
e.set('t-field', field_value)
rendered = self.registry('website.qweb').render_tag_field(
e, {'field': field_value}, '', ir_qweb.QWebContext(self.cr, self.uid, {
'record': record,
}, context={'inherit_branding': True}))
element = html.fromstring(
rendered, parser=html.HTMLParser(encoding='utf-8'))
converter = self.registry('website.qweb').get_converter_for(
element.get('data-oe-type'))
value_back = converter.from_html(
self.cr, self.uid, model, Model._fields[field], element)
if isinstance(expected, str):
expected = expected.decode('utf-8')
self.assertEqual(value_back, expected)
def field_roundtrip(self, field, value):
self.field_rountrip_result(field, value, value)
def test_integer(self):
self.field_roundtrip('integer', 42)
def test_float(self):
self.field_roundtrip('float', 42.567890)
self.field_roundtrip('float', 324542.567890)
def test_numeric(self):
self.field_roundtrip('numeric', 42.77)
def test_char(self):
self.field_roundtrip('char', "foo bar")
self.field_roundtrip('char', "ⒸⓄⓇⒼⒺ")
def test_selection(self):
self.field_roundtrip('selection', 3)
def test_selection_str(self):
self.field_roundtrip('selection_str', 'B')
def test_text(self):
self.field_roundtrip('text', textwrap.dedent("""\
You must obey the dance commander
Givin' out the order for fun
You must obey the dance commander
You know that he's the only one
Who gives the orders here,
Alright
Who gives the orders here,
Alright
It would be awesome
If we could dance-a
It would be awesome, yeah
Let's take the chance-a
It would be awesome, yeah
Let's start the show
Because you never know
You never know
You never know until you go"""))
def test_m2o(self):
""" the M2O field conversion (from html) is markedly different from
others as it directly writes into the m2o and returns nothing at all.
"""
model = 'website.converter.test'
field = 'many2one'
Sub = self.registry('website.converter.test.sub')
sub_id = Sub.create(self.cr, self.uid, {'name': "Foo"})
Model = self.registry(model)
id = Model.create(self.cr, self.uid, {field: sub_id})
[record] = Model.browse(self.cr, self.uid, [id])
e = etree.Element('span')
field_value = 'record.%s' % field
e.set('t-field', field_value)
rendered = self.registry('website.qweb').render_tag_field(
e, {'field': field_value}, '', ir_qweb.QWebContext(self.cr, self.uid, {
'record': record,
}, context={'inherit_branding': True}))
element = html.fromstring(rendered, parser=html.HTMLParser(encoding='utf-8'))
# emulate edition
element.text = "New content"
converter = self.registry('website.qweb').get_converter_for(
element.get('data-oe-type'))
value_back = converter.from_html(
self.cr, self.uid, model, Model._fields[field], element)
self.assertIsNone(
value_back, "the m2o converter should return None to avoid spurious"
" or useless writes on the parent record")
self.assertEqual(
Sub.browse(self.cr, self.uid, sub_id).name,
"New content",
"element edition should have been written directly to the m2o record"
)
class TestTitleToSlug(unittest2.TestCase):
"""
Those tests should pass with or without python-slugify
See website/models/website.py slugify method
"""
def test_spaces(self):
self.assertEqual(
"spaces",
slugify(u" spaces ")
)
def test_unicode(self):
self.assertEqual(
"heterogeneite",
slugify(u"hétérogénéité")
)
def test_underscore(self):
self.assertEqual(
"one-two",
slugify(u"one_two")
)
def test_caps(self):
self.assertEqual(
"camelcase",
slugify(u"CamelCase")
)
def test_special_chars(self):
self.assertEqual(
"o-d-o-o",
slugify(u"o!#d{|\o/@~o&%^?")
)
def test_str_to_unicode(self):
self.assertEqual(
"espana",
slugify("España")
)
def test_numbers(self):
self.assertEqual(
"article-1",
slugify(u"Article 1")
)
def test_all(self):
self.assertEqual(
"do-you-know-martine-a-la-plage",
slugify(u"Do YOU know 'Martine à la plage' ?")
)
| agpl-3.0 |
abridgett/boto | boto/kms/__init__.py | 113 | 1640 | # Copyright (c) 2014 Amazon.com, Inc. or its affiliates.
# All Rights Reserved
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
from boto.regioninfo import RegionInfo, get_regions
def regions():
"""
Get all available regions for the AWS Key Management Service.
:rtype: list
:return: A list of :class:`boto.regioninfo.RegionInfo`
"""
from boto.kms.layer1 import KMSConnection
return get_regions('kms', connection_cls=KMSConnection)
def connect_to_region(region_name, **kw_params):
for region in regions():
if region.name == region_name:
return region.connect(**kw_params)
return None
| mit |
shingonoide/odoo | addons/website_crm_partner_assign/controllers/main.py | 271 | 7541 | # -*- coding: utf-8 -*-
import werkzeug
from openerp import SUPERUSER_ID
from openerp.addons.web import http
from openerp.addons.web.http import request
from openerp.addons.website.models.website import slug, unslug
from openerp.tools.translate import _
class WebsiteCrmPartnerAssign(http.Controller):
_references_per_page = 40
@http.route([
'/partners',
'/partners/page/<int:page>',
'/partners/grade/<model("res.partner.grade"):grade>',
'/partners/grade/<model("res.partner.grade"):grade>/page/<int:page>',
'/partners/country/<model("res.country"):country>',
'/partners/country/<model("res.country"):country>/page/<int:page>',
'/partners/grade/<model("res.partner.grade"):grade>/country/<model("res.country"):country>',
'/partners/grade/<model("res.partner.grade"):grade>/country/<model("res.country"):country>/page/<int:page>',
], type='http', auth="public", website=True)
def partners(self, country=None, grade=None, page=0, **post):
country_all = post.pop('country_all', False)
partner_obj = request.registry['res.partner']
country_obj = request.registry['res.country']
search = post.get('search', '')
base_partner_domain = [('is_company', '=', True), ('grade_id.website_published', '=', True), ('website_published', '=', True)]
if search:
base_partner_domain += ['|', ('name', 'ilike', search), ('website_description', 'ilike', search)]
# group by grade
grade_domain = list(base_partner_domain)
if not country and not country_all:
country_code = request.session['geoip'].get('country_code')
if country_code:
country_ids = country_obj.search(request.cr, request.uid, [('code', '=', country_code)], context=request.context)
if country_ids:
country = country_obj.browse(request.cr, request.uid, country_ids[0], context=request.context)
if country:
grade_domain += [('country_id', '=', country.id)]
grades = partner_obj.read_group(
request.cr, SUPERUSER_ID, grade_domain, ["id", "grade_id"],
groupby="grade_id", orderby="grade_id DESC", context=request.context)
grades_partners = partner_obj.search(
request.cr, SUPERUSER_ID, grade_domain,
context=request.context, count=True)
# flag active grade
for grade_dict in grades:
grade_dict['active'] = grade and grade_dict['grade_id'][0] == grade.id
grades.insert(0, {
'grade_id_count': grades_partners,
'grade_id': (0, _("All Categories")),
'active': bool(grade is None),
})
# group by country
country_domain = list(base_partner_domain)
if grade:
country_domain += [('grade_id', '=', grade.id)]
countries = partner_obj.read_group(
request.cr, SUPERUSER_ID, country_domain, ["id", "country_id"],
groupby="country_id", orderby="country_id", context=request.context)
countries_partners = partner_obj.search(
request.cr, SUPERUSER_ID, country_domain,
context=request.context, count=True)
# flag active country
for country_dict in countries:
country_dict['active'] = country and country_dict['country_id'] and country_dict['country_id'][0] == country.id
countries.insert(0, {
'country_id_count': countries_partners,
'country_id': (0, _("All Countries")),
'active': bool(country is None),
})
# current search
if grade:
base_partner_domain += [('grade_id', '=', grade.id)]
if country:
base_partner_domain += [('country_id', '=', country.id)]
# format pager
if grade and not country:
url = '/partners/grade/' + slug(grade)
elif country and not grade:
url = '/partners/country/' + slug(country)
elif country and grade:
url = '/partners/grade/' + slug(grade) + '/country/' + slug(country)
else:
url = '/partners'
url_args = {}
if search:
url_args['search'] = search
if country_all:
url_args['country_all'] = True
partner_count = partner_obj.search_count(
request.cr, SUPERUSER_ID, base_partner_domain,
context=request.context)
pager = request.website.pager(
url=url, total=partner_count, page=page, step=self._references_per_page, scope=7,
url_args=url_args)
# search partners matching current search parameters
partner_ids = partner_obj.search(
request.cr, SUPERUSER_ID, base_partner_domain,
order="grade_id DESC",
context=request.context) # todo in trunk: order="grade_id DESC, implemented_count DESC", offset=pager['offset'], limit=self._references_per_page
partners = partner_obj.browse(request.cr, SUPERUSER_ID, partner_ids, request.context)
# remove me in trunk
partners = sorted(partners, key=lambda x: (x.grade_id.sequence if x.grade_id else 0, len([i for i in x.implemented_partner_ids if i.website_published])), reverse=True)
partners = partners[pager['offset']:pager['offset'] + self._references_per_page]
google_map_partner_ids = ','.join(map(str, [p.id for p in partners]))
values = {
'countries': countries,
'current_country': country,
'grades': grades,
'current_grade': grade,
'partners': partners,
'google_map_partner_ids': google_map_partner_ids,
'pager': pager,
'searches': post,
'search_path': "%s" % werkzeug.url_encode(post),
}
return request.website.render("website_crm_partner_assign.index", values)
# Do not use semantic controller due to SUPERUSER_ID
@http.route(['/partners/<partner_id>'], type='http', auth="public", website=True)
def partners_detail(self, partner_id, partner_name='', **post):
_, partner_id = unslug(partner_id)
current_grade, current_country = None, None
grade_id = post.get('grade_id')
country_id = post.get('country_id')
if grade_id:
grade_ids = request.registry['res.partner.grade'].exists(request.cr, request.uid, int(grade_id), context=request.context)
if grade_ids:
current_grade = request.registry['res.partner.grade'].browse(request.cr, request.uid, grade_ids[0], context=request.context)
if country_id:
country_ids = request.registry['res.country'].exists(request.cr, request.uid, int(country_id), context=request.context)
if country_ids:
current_country = request.registry['res.country'].browse(request.cr, request.uid, country_ids[0], context=request.context)
if partner_id:
partner = request.registry['res.partner'].browse(request.cr, SUPERUSER_ID, partner_id, context=request.context)
if partner.exists() and partner.website_published:
values = {
'main_object': partner,
'partner': partner,
'current_grade': current_grade,
'current_country': current_country
}
return request.website.render("website_crm_partner_assign.partner", values)
return self.partners(**post)
| agpl-3.0 |
clonetwin26/buck | third-party/py/pex/pex/resolvable.py | 52 | 9765 | # Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
import os
import re
from abc import abstractmethod, abstractproperty
from pkg_resources import Requirement, safe_extra
from .base import maybe_requirement, requirement_is_exact
from .compatibility import string as compatibility_string
from .compatibility import AbstractClass
from .installer import InstallerBase, Packager
from .package import Package
from .resolver_options import ResolverOptionsBuilder, ResolverOptionsInterface
# Extract extras as specified per "declaring extras":
# https://pythonhosted.org/setuptools/setuptools.html
_EXTRAS_PATTERN = re.compile(r'(?P<main>.*)\[(?P<extras>.*)\]$')
def strip_extras(resolvable_string):
match = _EXTRAS_PATTERN.match(resolvable_string)
if match:
resolvable_string, extras = match.groupdict()['main'], match.groupdict()['extras']
extras = [safe_extra(extra.strip()) for extra in extras.split(',')]
else:
extras = []
return resolvable_string, extras
class Resolvable(AbstractClass):
"""An entity that can be resolved into a package."""
class Error(Exception): pass
class InvalidRequirement(Error): pass
_REGISTRY = []
@classmethod
def register(cls, implementation):
"""Register an implementation of a Resolvable.
:param implementation: The resolvable implementation.
:type implementation: :class:`Resolvable`
"""
cls._REGISTRY.append(implementation)
@classmethod
def get(cls, resolvable_string, options_builder=None):
"""Get a :class:`Resolvable` from a string.
:returns: A :class:`Resolvable` or ``None`` if no implementation was appropriate.
"""
options_builder = options_builder or ResolverOptionsBuilder()
for resolvable_impl in cls._REGISTRY:
try:
return resolvable_impl.from_string(resolvable_string, options_builder)
except cls.InvalidRequirement:
continue
raise cls.InvalidRequirement('Unknown requirement type: %s' % resolvable_string)
# @abstractmethod - Only available in Python 3.3+
@classmethod
def from_string(cls, requirement_string, options_builder):
"""Produce a resolvable from this requirement string.
:returns: Instance of the particular Resolvable implementation.
:raises InvalidRequirement: If requirement_string is not a valid string representation
of the resolvable.
"""
raise cls.InvalidRequirement('Resolvable is abstract.')
def __init__(self, options):
if not isinstance(options, ResolverOptionsInterface):
raise TypeError('Resolvable must be initialized with a ResolverOptionsInterface, got %s' % (
type(options)))
self._options = options
@property
def options(self):
"""The ResolverOptions for this Resolvable."""
return self._options
@abstractmethod
def compatible(self, iterator):
"""Given a finder of type :class:`Iterator` (possibly ignored), determine which packages
are compatible with this resolvable.
:returns: An iterable of compatible :class:`Package` objects.
"""
@abstractmethod
def packages(self):
"""Return a list of :class:`Package` objects that this resolvable resolves.
:returns: An iterable of compatible :class:`Package` objects.
"""
@abstractproperty
def name(self):
"""The distribution key associated with this resolvable, i.e. the name of the packages
this resolvable will produce."""
# TODO(wickman) Call this "cacheable" instead?
@abstractproperty
def exact(self):
"""Whether or not this resolvable specifies an exact (cacheable) requirement."""
# TODO(wickman) Currently 'interpreter' is unused but it is reserved for environment
# marker evaluation per PEP426 and:
# https://bitbucket.org/pypa/setuptools/issue/353/allow-distributionrequires-be-evaluated
def extras(self, interpreter=None):
"""Return the "extras" tags associated with this resolvable if any."""
return []
class ResolvableRepository(Resolvable):
"""A VCS repository resolvable, e.g. 'git+', 'svn+', 'hg+', 'bzr+' packages."""
COMPATIBLE_VCS = frozenset(['git', 'svn', 'hg', 'bzr'])
@classmethod
def from_string(cls, requirement_string, options_builder):
if any(requirement_string.startswith('%s+' % vcs) for vcs in cls.COMPATIBLE_VCS):
# further delegate
pass
# TODO(wickman) Implement: Issue #93.
raise cls.InvalidRequirement('Versioning system URLs not supported.')
def __init__(self, options):
super(ResolvableRepository, self).__init__(options)
def compatible(self, iterator):
return []
def packages(self):
return []
@property
def name(self):
raise NotImplemented
@property
def exact(self):
return True
class ResolvablePackage(Resolvable):
"""A package (.tar.gz, .egg, .whl, etc) resolvable."""
# TODO(wickman) Implement extras parsing for ResolvablePackage
@classmethod
def from_string(cls, requirement_string, options_builder):
requirement_string, extras = strip_extras(requirement_string)
package = Package.from_href(requirement_string)
if package is None:
raise cls.InvalidRequirement('Requirement string does not appear to be a package.')
return cls(package, options_builder.build(package.name), extras=extras)
def __init__(self, package, options, extras=None):
self.package = package
self._extras = extras
super(ResolvablePackage, self).__init__(options)
def compatible(self, iterator):
return []
def packages(self):
return [self.package]
@property
def name(self):
return self.package.name
@property
def exact(self):
return True
def extras(self, interpreter=None):
return self._extras
def __eq__(self, other):
return isinstance(other, ResolvablePackage) and self.package == other.package
def __hash__(self):
return hash(self.package)
def __str__(self):
return str(self.package)
class ResolvableRequirement(Resolvable):
"""A requirement (e.g. 'setuptools', 'Flask>=0.8,<0.9', 'pex[whl]')."""
@classmethod
def from_string(cls, requirement_string, options_builder):
try:
req = maybe_requirement(requirement_string)
except ValueError:
raise cls.InvalidRequirement('%s does not appear to be a requirement string.' %
requirement_string)
return cls(req, options_builder.build(req.key))
def __init__(self, requirement, options):
self.requirement = requirement
super(ResolvableRequirement, self).__init__(options)
def compatible(self, iterator):
sorter = self.options.get_sorter()
return sorter.sort(package for package in iterator.iter(self.requirement))
def packages(self):
iterator = self.options.get_iterator()
sorter = self.options.get_sorter()
return sorter.sort(iterator.iter(self.requirement))
@property
def name(self):
return self.requirement.key
@property
def exact(self):
return requirement_is_exact(self.requirement)
def extras(self, interpreter=None):
return list(self.requirement.extras)
def __eq__(self, other):
return isinstance(other, ResolvableRequirement) and self.requirement == other.requirement
def __hash__(self):
return hash(self.requirement)
def __str__(self):
return str(self.requirement)
class ResolvableDirectory(ResolvablePackage):
"""A source directory (with setup.py) resolvable."""
@classmethod
def is_installable(cls, requirement_string):
if not os.path.isdir(requirement_string):
return False
return os.path.isfile(os.path.join(requirement_string, 'setup.py'))
@classmethod
def from_string(cls, requirement_string, options_builder):
requirement_string, extras = strip_extras(requirement_string)
if cls.is_installable(requirement_string):
try:
# TODO(wickman) This is one case where interpreter is necessary to be fully correct. This
# may indicate that packages() should take interpreter like extras does. Once we have
# metadata in setup.cfg or whatever, then we can get the interpreter out of the equation.
sdist = Packager(requirement_string).sdist()
except InstallerBase.Error:
raise cls.InvalidRequirement('Could not create source distribution for %s' %
requirement_string)
package = Package.from_href(sdist)
return ResolvablePackage(package, options_builder.build(package.name), extras=extras)
else:
raise cls.InvalidRequirement('%s does not appear to be an installable directory.'
% requirement_string)
Resolvable.register(ResolvableDirectory)
Resolvable.register(ResolvableRepository)
Resolvable.register(ResolvablePackage)
Resolvable.register(ResolvableRequirement)
# TODO(wickman) Because we explicitly acknowledge all implementations of Resolvable here,
# perhaps move away from a registry pattern and integrate into Resolvable classmethod.
def resolvables_from_iterable(iterable, builder):
"""Given an iterable of resolvable-like objects, return list of Resolvable objects.
:param iterable: An iterable of :class:`Resolvable`, :class:`Requirement`, :class:`Package`,
or `str` to map into an iterable of :class:`Resolvable` objects.
:returns: A list of :class:`Resolvable` objects.
"""
def translate(obj):
if isinstance(obj, Resolvable):
return obj
elif isinstance(obj, Requirement):
return ResolvableRequirement(obj, builder.build(obj.key))
elif isinstance(obj, Package):
return ResolvablePackage(obj, builder.build(obj.name))
elif isinstance(obj, compatibility_string):
return Resolvable.get(obj, builder)
else:
raise ValueError('Do not know how to resolve %s' % type(obj))
return list(map(translate, iterable))
| apache-2.0 |
leighpauls/k2cro4 | third_party/python_26/Lib/distutils/bcppcompiler.py | 53 | 15091 | """distutils.bcppcompiler
Contains BorlandCCompiler, an implementation of the abstract CCompiler class
for the Borland C++ compiler.
"""
# This implementation by Lyle Johnson, based on the original msvccompiler.py
# module and using the directions originally published by Gordon Williams.
# XXX looks like there's a LOT of overlap between these two classes:
# someone should sit down and factor out the common code as
# WindowsCCompiler! --GPW
# This module should be kept compatible with Python 2.1.
__revision__ = "$Id: bcppcompiler.py 61000 2008-02-23 17:40:11Z christian.heimes $"
import os
from distutils.errors import \
DistutilsExecError, DistutilsPlatformError, \
CompileError, LibError, LinkError, UnknownFileError
from distutils.ccompiler import \
CCompiler, gen_preprocess_options, gen_lib_options
from distutils.file_util import write_file
from distutils.dep_util import newer
from distutils import log
class BCPPCompiler(CCompiler) :
"""Concrete class that implements an interface to the Borland C/C++
compiler, as defined by the CCompiler abstract class.
"""
compiler_type = 'bcpp'
# Just set this so CCompiler's constructor doesn't barf. We currently
# don't use the 'set_executables()' bureaucracy provided by CCompiler,
# as it really isn't necessary for this sort of single-compiler class.
# Would be nice to have a consistent interface with UnixCCompiler,
# though, so it's worth thinking about.
executables = {}
# Private class data (need to distinguish C from C++ source for compiler)
_c_extensions = ['.c']
_cpp_extensions = ['.cc', '.cpp', '.cxx']
# Needed for the filename generation methods provided by the
# base class, CCompiler.
src_extensions = _c_extensions + _cpp_extensions
obj_extension = '.obj'
static_lib_extension = '.lib'
shared_lib_extension = '.dll'
static_lib_format = shared_lib_format = '%s%s'
exe_extension = '.exe'
def __init__ (self,
verbose=0,
dry_run=0,
force=0):
CCompiler.__init__ (self, verbose, dry_run, force)
# These executables are assumed to all be in the path.
# Borland doesn't seem to use any special registry settings to
# indicate their installation locations.
self.cc = "bcc32.exe"
self.linker = "ilink32.exe"
self.lib = "tlib.exe"
self.preprocess_options = None
self.compile_options = ['/tWM', '/O2', '/q', '/g0']
self.compile_options_debug = ['/tWM', '/Od', '/q', '/g0']
self.ldflags_shared = ['/Tpd', '/Gn', '/q', '/x']
self.ldflags_shared_debug = ['/Tpd', '/Gn', '/q', '/x']
self.ldflags_static = []
self.ldflags_exe = ['/Gn', '/q', '/x']
self.ldflags_exe_debug = ['/Gn', '/q', '/x','/r']
# -- Worker methods ------------------------------------------------
def compile(self, sources,
output_dir=None, macros=None, include_dirs=None, debug=0,
extra_preargs=None, extra_postargs=None, depends=None):
macros, objects, extra_postargs, pp_opts, build = \
self._setup_compile(output_dir, macros, include_dirs, sources,
depends, extra_postargs)
compile_opts = extra_preargs or []
compile_opts.append ('-c')
if debug:
compile_opts.extend (self.compile_options_debug)
else:
compile_opts.extend (self.compile_options)
for obj in objects:
try:
src, ext = build[obj]
except KeyError:
continue
# XXX why do the normpath here?
src = os.path.normpath(src)
obj = os.path.normpath(obj)
# XXX _setup_compile() did a mkpath() too but before the normpath.
# Is it possible to skip the normpath?
self.mkpath(os.path.dirname(obj))
if ext == '.res':
# This is already a binary file -- skip it.
continue # the 'for' loop
if ext == '.rc':
# This needs to be compiled to a .res file -- do it now.
try:
self.spawn (["brcc32", "-fo", obj, src])
except DistutilsExecError, msg:
raise CompileError, msg
continue # the 'for' loop
# The next two are both for the real compiler.
if ext in self._c_extensions:
input_opt = ""
elif ext in self._cpp_extensions:
input_opt = "-P"
else:
# Unknown file type -- no extra options. The compiler
# will probably fail, but let it just in case this is a
# file the compiler recognizes even if we don't.
input_opt = ""
output_opt = "-o" + obj
# Compiler command line syntax is: "bcc32 [options] file(s)".
# Note that the source file names must appear at the end of
# the command line.
try:
self.spawn ([self.cc] + compile_opts + pp_opts +
[input_opt, output_opt] +
extra_postargs + [src])
except DistutilsExecError, msg:
raise CompileError, msg
return objects
# compile ()
def create_static_lib (self,
objects,
output_libname,
output_dir=None,
debug=0,
target_lang=None):
(objects, output_dir) = self._fix_object_args (objects, output_dir)
output_filename = \
self.library_filename (output_libname, output_dir=output_dir)
if self._need_link (objects, output_filename):
lib_args = [output_filename, '/u'] + objects
if debug:
pass # XXX what goes here?
try:
self.spawn ([self.lib] + lib_args)
except DistutilsExecError, msg:
raise LibError, msg
else:
log.debug("skipping %s (up-to-date)", output_filename)
# create_static_lib ()
def link (self,
target_desc,
objects,
output_filename,
output_dir=None,
libraries=None,
library_dirs=None,
runtime_library_dirs=None,
export_symbols=None,
debug=0,
extra_preargs=None,
extra_postargs=None,
build_temp=None,
target_lang=None):
# XXX this ignores 'build_temp'! should follow the lead of
# msvccompiler.py
(objects, output_dir) = self._fix_object_args (objects, output_dir)
(libraries, library_dirs, runtime_library_dirs) = \
self._fix_lib_args (libraries, library_dirs, runtime_library_dirs)
if runtime_library_dirs:
log.warn("I don't know what to do with 'runtime_library_dirs': %s",
str(runtime_library_dirs))
if output_dir is not None:
output_filename = os.path.join (output_dir, output_filename)
if self._need_link (objects, output_filename):
# Figure out linker args based on type of target.
if target_desc == CCompiler.EXECUTABLE:
startup_obj = 'c0w32'
if debug:
ld_args = self.ldflags_exe_debug[:]
else:
ld_args = self.ldflags_exe[:]
else:
startup_obj = 'c0d32'
if debug:
ld_args = self.ldflags_shared_debug[:]
else:
ld_args = self.ldflags_shared[:]
# Create a temporary exports file for use by the linker
if export_symbols is None:
def_file = ''
else:
head, tail = os.path.split (output_filename)
modname, ext = os.path.splitext (tail)
temp_dir = os.path.dirname(objects[0]) # preserve tree structure
def_file = os.path.join (temp_dir, '%s.def' % modname)
contents = ['EXPORTS']
for sym in (export_symbols or []):
contents.append(' %s=_%s' % (sym, sym))
self.execute(write_file, (def_file, contents),
"writing %s" % def_file)
# Borland C++ has problems with '/' in paths
objects2 = map(os.path.normpath, objects)
# split objects in .obj and .res files
# Borland C++ needs them at different positions in the command line
objects = [startup_obj]
resources = []
for file in objects2:
(base, ext) = os.path.splitext(os.path.normcase(file))
if ext == '.res':
resources.append(file)
else:
objects.append(file)
for l in library_dirs:
ld_args.append("/L%s" % os.path.normpath(l))
ld_args.append("/L.") # we sometimes use relative paths
# list of object files
ld_args.extend(objects)
# XXX the command-line syntax for Borland C++ is a bit wonky;
# certain filenames are jammed together in one big string, but
# comma-delimited. This doesn't mesh too well with the
# Unix-centric attitude (with a DOS/Windows quoting hack) of
# 'spawn()', so constructing the argument list is a bit
# awkward. Note that doing the obvious thing and jamming all
# the filenames and commas into one argument would be wrong,
# because 'spawn()' would quote any filenames with spaces in
# them. Arghghh!. Apparently it works fine as coded...
# name of dll/exe file
ld_args.extend([',',output_filename])
# no map file and start libraries
ld_args.append(',,')
for lib in libraries:
# see if we find it and if there is a bcpp specific lib
# (xxx_bcpp.lib)
libfile = self.find_library_file(library_dirs, lib, debug)
if libfile is None:
ld_args.append(lib)
# probably a BCPP internal library -- don't warn
else:
# full name which prefers bcpp_xxx.lib over xxx.lib
ld_args.append(libfile)
# some default libraries
ld_args.append ('import32')
ld_args.append ('cw32mt')
# def file for export symbols
ld_args.extend([',',def_file])
# add resource files
ld_args.append(',')
ld_args.extend(resources)
if extra_preargs:
ld_args[:0] = extra_preargs
if extra_postargs:
ld_args.extend(extra_postargs)
self.mkpath (os.path.dirname (output_filename))
try:
self.spawn ([self.linker] + ld_args)
except DistutilsExecError, msg:
raise LinkError, msg
else:
log.debug("skipping %s (up-to-date)", output_filename)
# link ()
# -- Miscellaneous methods -----------------------------------------
def find_library_file (self, dirs, lib, debug=0):
# List of effective library names to try, in order of preference:
# xxx_bcpp.lib is better than xxx.lib
# and xxx_d.lib is better than xxx.lib if debug is set
#
# The "_bcpp" suffix is to handle a Python installation for people
# with multiple compilers (primarily Distutils hackers, I suspect
# ;-). The idea is they'd have one static library for each
# compiler they care about, since (almost?) every Windows compiler
# seems to have a different format for static libraries.
if debug:
dlib = (lib + "_d")
try_names = (dlib + "_bcpp", lib + "_bcpp", dlib, lib)
else:
try_names = (lib + "_bcpp", lib)
for dir in dirs:
for name in try_names:
libfile = os.path.join(dir, self.library_filename(name))
if os.path.exists(libfile):
return libfile
else:
# Oops, didn't find it in *any* of 'dirs'
return None
# overwrite the one from CCompiler to support rc and res-files
def object_filenames (self,
source_filenames,
strip_dir=0,
output_dir=''):
if output_dir is None: output_dir = ''
obj_names = []
for src_name in source_filenames:
# use normcase to make sure '.rc' is really '.rc' and not '.RC'
(base, ext) = os.path.splitext (os.path.normcase(src_name))
if ext not in (self.src_extensions + ['.rc','.res']):
raise UnknownFileError, \
"unknown file type '%s' (from '%s')" % \
(ext, src_name)
if strip_dir:
base = os.path.basename (base)
if ext == '.res':
# these can go unchanged
obj_names.append (os.path.join (output_dir, base + ext))
elif ext == '.rc':
# these need to be compiled to .res-files
obj_names.append (os.path.join (output_dir, base + '.res'))
else:
obj_names.append (os.path.join (output_dir,
base + self.obj_extension))
return obj_names
# object_filenames ()
def preprocess (self,
source,
output_file=None,
macros=None,
include_dirs=None,
extra_preargs=None,
extra_postargs=None):
(_, macros, include_dirs) = \
self._fix_compile_args(None, macros, include_dirs)
pp_opts = gen_preprocess_options(macros, include_dirs)
pp_args = ['cpp32.exe'] + pp_opts
if output_file is not None:
pp_args.append('-o' + output_file)
if extra_preargs:
pp_args[:0] = extra_preargs
if extra_postargs:
pp_args.extend(extra_postargs)
pp_args.append(source)
# We need to preprocess: either we're being forced to, or the
# source file is newer than the target (or the target doesn't
# exist).
if self.force or output_file is None or newer(source, output_file):
if output_file:
self.mkpath(os.path.dirname(output_file))
try:
self.spawn(pp_args)
except DistutilsExecError, msg:
print msg
raise CompileError, msg
# preprocess()
| bsd-3-clause |
jnerin/ansible | lib/ansible/modules/web_infrastructure/ansible_tower/tower_job_template.py | 34 | 8207 | #!/usr/bin/python
# coding: utf-8 -*-
# (c) 2017, Wayne Witzel III <[email protected]>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: tower_job_template
author: "Wayne Witzel III (@wwitzel3)"
version_added: "2.3"
short_description: create, update, or destroy Ansible Tower job_template.
description:
- Create, update, or destroy Ansible Tower job templates. See
U(https://www.ansible.com/tower) for an overview.
options:
name:
description:
- Name to use for the job_template.
required: True
description:
description:
- Description to use for the job_template.
required: False
default: null
job_type:
description:
- The job_type to use for the job_template.
required: True
choices: ["run", "check", "scan"]
inventory:
description:
- Inventory to use for the job_template.
required: False
default: null
project:
description:
- Project to use for the job_template.
required: True
playbook:
description:
- Playbook to use for the job_template.
required: True
machine_credential:
description:
- Machine_credential to use for the job_template.
required: False
default: null
cloud_credential:
description:
- Cloud_credential to use for the job_template.
required: False
default: null
network_credential:
description:
- The network_credential to use for the job_template.
required: False
default: null
forks:
description:
- The number of parallel or simultaneous processes to use while executing the playbook.
required: False
default: null
limit:
description:
- A host pattern to further constrain the list of hosts managed or affected by the playbook
required: False
default: null
verbosity:
description:
- Control the output level Ansible produces as the playbook runs.
required: False
choices: ["verbose", "debug"]
default: null
job_tags:
description:
- The job_tags to use for the job_template.
required: False
default: null
skip_tags:
description:
- The skip_tags to use for the job_template.
required: False
default: null
host_config_key:
description:
- Allow provisioning callbacks using this host config key.
required: False
default: null
extra_vars_path:
description:
- Path to the extra_vars yaml file.
required: False
default: null
ask_extra_vars:
description:
- Prompt user for extra_vars on launch.
required: False
default: False
ask_tags:
description:
- Prompt user for job tags on launch.
required: False
default: False
ask_job_type:
description:
- Prompt user for job type on launch.
required: False
default: False
ask_inventory:
description:
- Propmt user for inventory on launch.
required: False
default: False
ask_credential:
description:
- Prompt user for credential on launch.
required: False
default: False
become_enabled:
description:
- Should become_enabled.
required: False
default: False
state:
description:
- Desired state of the resource.
required: False
default: "present"
choices: ["present", "absent"]
extends_documentation_fragment: tower
'''
EXAMPLES = '''
- name: Create tower Ping job template
tower_job_template:
name: Ping
job_type: run
inventory: Local
project: Demo
playbook: ping.yml
machine_credential: Local
state: present
tower_config_file: "~/tower_cli.cfg"
'''
from ansible.module_utils.ansible_tower import tower_argument_spec, tower_auth_config, tower_check_mode, HAS_TOWER_CLI
try:
import tower_cli
import tower_cli.utils.exceptions as exc
from tower_cli.conf import settings
except ImportError:
pass
def update_fields(p):
'''This updates the module field names
to match the field names tower-cli expects to make
calling of the modify/delete methods easier.
'''
params = p.copy()
field_map = {
'ask_extra_vars': 'ask_variables_on_launch',
'ask_limit': 'ask_limit_on_launch',
'ask_tags': 'ask_tags_on_launch',
'ask_job_type': 'ask_job_type_on_launch',
'machine_credential': 'credential',
}
params_update = {}
for old_k, new_k in field_map.items():
v = params.pop(old_k)
params_update[new_k] = v
extra_vars = params.get('extra_vars_path')
if extra_vars is not None:
params_update['extra_vars'] = ['@' + extra_vars]
params.update(params_update)
return params
def update_resources(module, p):
params = p.copy()
identity_map = {
'project': 'name',
'inventory': 'name',
'machine_credential': 'name',
'network_credential': 'name',
'cloud_credential': 'name',
}
for k, v in identity_map.items():
try:
if params[k]:
key = 'credential' if '_credential' in k else k
result = tower_cli.get_resource(key).get(**{v: params[k]})
params[k] = result['id']
except (exc.NotFound) as excinfo:
module.fail_json(msg='Failed to update job template: {0}'.format(excinfo), changed=False)
return params
def main():
argument_spec = tower_argument_spec()
argument_spec.update(dict(
name=dict(required=True),
description=dict(),
job_type=dict(choices=['run', 'check', 'scan'], required=True),
inventory=dict(),
project=dict(required=True),
playbook=dict(required=True),
machine_credential=dict(),
cloud_credential=dict(),
network_credential=dict(),
forks=dict(type='int'),
limit=dict(),
verbosity=dict(choices=['verbose', 'debug']),
job_tags=dict(),
skip_tags=dict(),
host_config_key=dict(),
extra_vars_path=dict(type='path', required=False),
ask_extra_vars=dict(type='bool', default=False),
ask_limit=dict(type='bool', default=False),
ask_tags=dict(type='bool', default=False),
ask_job_type=dict(type='bool', default=False),
ask_inventory=dict(type='bool', default=False),
ask_credential=dict(type='bool', default=False),
become_enabled=dict(type='bool', default=False),
state=dict(choices=['present', 'absent'], default='present'),
))
module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True)
if not HAS_TOWER_CLI:
module.fail_json(msg='ansible-tower-cli required for this module')
name = module.params.get('name')
state = module.params.get('state')
json_output = {'job_template': name, 'state': state}
tower_auth = tower_auth_config(module)
with settings.runtime_values(**tower_auth):
tower_check_mode(module)
jt = tower_cli.get_resource('job_template')
params = update_resources(module, module.params)
params = update_fields(params)
params['create_on_missing'] = True
try:
if state == 'present':
result = jt.modify(**params)
json_output['id'] = result['id']
elif state == 'absent':
result = jt.delete(**params)
except (exc.ConnectionError, exc.BadRequest, exc.NotFound) as excinfo:
module.fail_json(msg='Failed to update job template: {0}'.format(excinfo), changed=False)
json_output['changed'] = result['changed']
module.exit_json(**json_output)
from ansible.module_utils.basic import AnsibleModule
if __name__ == '__main__':
main()
| gpl-3.0 |
Zen-CODE/kivy | kivy/uix/vkeyboard.py | 3 | 29246 | '''
VKeyboard
=========
.. image:: images/vkeyboard.jpg
:align: right
.. versionadded:: 1.0.8
VKeyboard is an onscreen keyboard for Kivy. Its operation is intended to be
transparent to the user. Using the widget directly is NOT recommended. Read the
section `Request keyboard`_ first.
Modes
-----
This virtual keyboard has a docked and free mode:
* docked mode (:attr:`VKeyboard.docked` = True)
Generally used when only one person is using the computer, like a tablet or
personal computer etc.
* free mode: (:attr:`VKeyboard.docked` = False)
Mostly for multitouch surfaces. This mode allows multiple virtual
keyboards to be used on the screen.
If the docked mode changes, you need to manually call
:meth:`VKeyboard.setup_mode` otherwise the change will have no impact.
During that call, the VKeyboard, implemented on top of a
:class:`~kivy.uix.scatter.Scatter`, will change the
behavior of the scatter and position the keyboard near the target (if target
and docked mode is set).
Layouts
-------
The virtual keyboard is able to load a custom layout. If you create a new
layout and put the JSON in :file:`<kivy_data_dir>/keyboards/<layoutid>.json`,
you can load it by setting :attr:`VKeyboard.layout` to your layoutid.
The JSON must be structured like this::
{
"title": "Title of your layout",
"description": "Description of your layout",
"cols": 15,
"rows": 5,
...
}
Then, you need to describe the keys in each row, for either a "normal",
"shift" or a "special" (added in version 1.9.0) mode. Keys for this row
data must be named `normal_<row>`, `shift_<row>` and `special_<row>`.
Replace `row` with the row number.
Inside each row, you will describe the key. A key is a 4 element list in
the format::
[ <text displayed on the keyboard>, <text to put when the key is pressed>,
<text that represents the keycode>, <size of cols> ]
Here are example keys::
# f key
["f", "f", "f", 1]
# capslock
["\u21B9", "\t", "tab", 1.5]
Finally, complete the JSON::
{
...
"normal_1": [
["`", "`", "`", 1], ["1", "1", "1", 1], ["2", "2", "2", 1],
["3", "3", "3", 1], ["4", "4", "4", 1], ["5", "5", "5", 1],
["6", "6", "6", 1], ["7", "7", "7", 1], ["8", "8", "8", 1],
["9", "9", "9", 1], ["0", "0", "0", 1], ["+", "+", "+", 1],
["=", "=", "=", 1], ["\u232b", null, "backspace", 2]
],
"shift_1": [ ... ],
"normal_2": [ ... ],
"special_2": [ ... ],
...
}
Request Keyboard
----------------
The instantiation of the virtual keyboard is controlled by the configuration.
Check `keyboard_mode` and `keyboard_layout` in the :doc:`api-kivy.config`.
If you intend to create a widget that requires a keyboard, do not use the
virtual keyboard directly, but prefer to use the best method available on
the platform. Check the :meth:`~kivy.core.window.WindowBase.request_keyboard`
method in the :doc:`api-kivy.core.window`.
If you want a specific layout when you request the keyboard, you should write
something like this (from 1.8.0, numeric.json can be in the same directory as
your main.py)::
keyboard = Window.request_keyboard(
self._keyboard_close, self)
if keyboard.widget:
vkeyboard = self._keyboard.widget
vkeyboard.layout = 'numeric.json'
'''
__all__ = ('VKeyboard', )
from kivy import kivy_data_dir
from kivy.vector import Vector
from kivy.config import Config
from kivy.uix.scatter import Scatter
from kivy.uix.label import Label
from kivy.properties import ObjectProperty, NumericProperty, StringProperty, \
BooleanProperty, DictProperty, OptionProperty, ListProperty
from kivy.logger import Logger
from kivy.graphics import Color, BorderImage, Canvas
from kivy.core.image import Image
from kivy.resources import resource_find
from kivy.clock import Clock
from os.path import join, splitext, basename
from os import listdir
from json import loads
default_layout_path = join(kivy_data_dir, 'keyboards')
class VKeyboard(Scatter):
'''
VKeyboard is an onscreen keyboard with multitouch support.
Its layout is entirely customizable and you can switch between available
layouts using a button in the bottom right of the widget.
:Events:
`on_key_down`: keycode, internal, modifiers
Fired when the keyboard received a key down event (key press).
`on_key_up`: keycode, internal, modifiers
Fired when the keyboard received a key up event (key release).
'''
target = ObjectProperty(None, allownone=True)
'''Target widget associated with the VKeyboard. If set, it will be used to
send keyboard events. If the VKeyboard mode is "free", it will also be used
to set the initial position.
:attr:`target` is an :class:`~kivy.properties.ObjectProperty` instance and
defaults to None.
'''
callback = ObjectProperty(None, allownone=True)
'''Callback can be set to a function that will be called if the
VKeyboard is closed by the user.
:attr:`target` is an :class:`~kivy.properties.ObjectProperty` instance and
defaults to None.
'''
layout = StringProperty(None)
'''Layout to use for the VKeyboard. By default, it will be the
layout set in the configuration, according to the `keyboard_layout`
in `[kivy]` section.
.. versionchanged:: 1.8.0
If layout is a .json filename, it will loaded and added to the
available_layouts.
:attr:`layout` is a :class:`~kivy.properties.StringProperty` and defaults
to None.
'''
layout_path = StringProperty(default_layout_path)
'''Path from which layouts are read.
:attr:`layout` is a :class:`~kivy.properties.StringProperty` and
defaults to :file:`<kivy_data_dir>/keyboards/`
'''
available_layouts = DictProperty({})
'''Dictionary of all available layouts. Keys are the layout ID, and the
value is the JSON (translated into a Python object).
:attr:`available_layouts` is a :class:`~kivy.properties.DictProperty` and
defaults to {}.
'''
docked = BooleanProperty(False)
'''Indicate whether the VKeyboard is docked on the screen or not. If you
change it, you must manually call :meth:`setup_mode` otherwise it will have
no impact. If the VKeyboard is created by the Window, the docked mode will
be automatically set by the configuration, using the `keyboard_mode` token
in `[kivy]` section.
:attr:`docked` is a :class:`~kivy.properties.BooleanProperty` and defaults
to False.
'''
margin_hint = ListProperty([.05, .06, .05, .06])
'''Margin hint, used as spacing between keyboard background and keys
content. The margin is composed of four values, between 0 and 1::
margin_hint = [top, right, bottom, left]
The margin hints will be multiplied by width and height, according to their
position.
:attr:`margin_hint` is a :class:`~kivy.properties.ListProperty` and
defaults to [.05, .06, .05, .06]
'''
key_margin = ListProperty([2, 2, 2, 2])
'''Key margin, used to create space between keys. The margin is composed of
four values, in pixels::
key_margin = [top, right, bottom, left]
:attr:`key_margin` is a :class:`~kivy.properties.ListProperty` and defaults
to [2, 2, 2, 2]
'''
background_color = ListProperty([1, 1, 1, 1])
'''Background color, in the format (r, g, b, a). If a background is
set, the color will be combined with the background texture.
:attr:`background_color` is a :class:`~kivy.properties.ListProperty` and
defaults to [1, 1, 1, 1].
'''
background = StringProperty(
'atlas://data/images/defaulttheme/vkeyboard_background')
'''Filename of the background image.
:attr:`background` a :class:`~kivy.properties.StringProperty` and defaults
to :file:`atlas://data/images/defaulttheme/vkeyboard_background`.
'''
background_disabled = StringProperty(
'atlas://data/images/defaulttheme/vkeyboard_disabled_background')
'''Filename of the background image when vkeyboard is disabled.
.. versionadded:: 1.8.0
:attr:`background_disabled` is a
:class:`~kivy.properties.StringProperty` and defaults to
:file:`atlas://data/images/defaulttheme/vkeyboard__disabled_background`.
'''
key_background_color = ListProperty([1, 1, 1, 1])
'''Key background color, in the format (r, g, b, a). If a key background is
set, the color will be combined with the key background texture.
:attr:`key_background_color` is a :class:`~kivy.properties.ListProperty`
and defaults to [1, 1, 1, 1].
'''
key_background_normal = StringProperty(
'atlas://data/images/defaulttheme/vkeyboard_key_normal')
'''Filename of the key background image for use when no touches are active
on the widget.
:attr:`key_background_normal` a :class:`~kivy.properties.StringProperty`
and defaults to
:file:`atlas://data/images/defaulttheme/vkeyboard_key_normal`.
'''
key_disabled_background_normal = StringProperty(
'atlas://data/images/defaulttheme/vkeyboard_key_normal')
'''Filename of the key background image for use when no touches are active
on the widget and vkeyboard is disabled.
..versionadded:: 1.8.0
:attr:`key_disabled_background_normal` a
:class:`~kivy.properties.StringProperty` and defaults to
:file:`atlas://data/images/defaulttheme/vkeyboard_disabled_key_normal`.
'''
key_background_down = StringProperty(
'atlas://data/images/defaulttheme/vkeyboard_key_down')
'''Filename of the key background image for use when a touch is active
on the widget.
:attr:`key_background_down` a :class:`~kivy.properties.StringProperty`
and defaults to
:file:`atlas://data/images/defaulttheme/vkeyboard_key_down`.
'''
background_border = ListProperty([16, 16, 16, 16])
'''Background image border. Used for controlling the
:attr:`~kivy.graphics.vertex_instructions.BorderImage.border` property of
the background.
:attr:`background_border` is a :class:`~kivy.properties.ListProperty` and
defaults to [16, 16, 16, 16]
'''
key_border = ListProperty([8, 8, 8, 8])
'''Key image border. Used for controlling the
:attr:`~kivy.graphics.vertex_instructions.BorderImage.border` property of
the key.
:attr:`key_border` is a :class:`~kivy.properties.ListProperty` and
defaults to [16, 16, 16, 16]
'''
# XXX internal variables
layout_mode = OptionProperty('normal',
options=('normal', 'shift', 'special'))
layout_geometry = DictProperty({})
have_capslock = BooleanProperty(False)
have_shift = BooleanProperty(False)
have_special = BooleanProperty(False)
active_keys = DictProperty({})
font_size = NumericProperty('20dp')
font_name = StringProperty('data/fonts/DejaVuSans.ttf')
repeat_touch = ObjectProperty(allownone=True)
__events__ = ('on_key_down', 'on_key_up')
def __init__(self, **kwargs):
# XXX move to style.kv
kwargs.setdefault('size_hint', (None, None))
kwargs.setdefault('scale_min', .4)
kwargs.setdefault('scale_max', 1.6)
kwargs.setdefault('size', (700, 200))
kwargs.setdefault('docked', False)
self._trigger_update_layout_mode = Clock.create_trigger(
self._update_layout_mode)
self._trigger_load_layouts = Clock.create_trigger(
self._load_layouts)
self._trigger_load_layout = Clock.create_trigger(
self._load_layout)
self.bind(
docked=self.setup_mode,
have_shift=self._trigger_update_layout_mode,
have_capslock=self._trigger_update_layout_mode,
have_special=self._trigger_update_layout_mode,
layout_path=self._trigger_load_layouts,
layout=self._trigger_load_layout)
super(VKeyboard, self).__init__(**kwargs)
# load all the layouts found in the layout_path directory
self._load_layouts()
# ensure we have default layouts
available_layouts = self.available_layouts
if not available_layouts:
Logger.critical('VKeyboard: unable to load default layouts')
# load the default layout from configuration
if self.layout is None:
self.layout = Config.get('kivy', 'keyboard_layout')
else:
# ensure the current layout is found on the available layout
self._trigger_load_layout()
# update layout mode (shift or normal)
self._trigger_update_layout_mode()
# create a top layer to draw active keys on
with self.canvas:
self.background_key_layer = Canvas()
self.active_keys_layer = Canvas()
def on_disabled(self, intance, value):
self.refresh_keys()
def _update_layout_mode(self, *l):
# update mode according to capslock and shift key
mode = self.have_capslock != self.have_shift
mode = 'shift' if mode else 'normal'
if self.have_special:
mode = "special"
if mode != self.layout_mode:
self.layout_mode = mode
self.refresh(False)
def _load_layout(self, *largs):
# ensure new layouts are loaded first
if self._trigger_load_layouts.is_triggered:
self._load_layouts()
self._trigger_load_layouts.cancel()
value = self.layout
available_layouts = self.available_layouts
# it's a filename, try to load it directly
if self.layout[-5:] == '.json':
if value not in available_layouts:
fn = resource_find(self.layout)
self._load_layout_fn(fn, self.layout)
if not available_layouts:
return
if value not in available_layouts and value != 'qwerty':
Logger.error(
'Vkeyboard: <%s> keyboard layout mentioned in '
'conf file was not found, fallback on qwerty' %
value)
self.layout = 'qwerty'
self.refresh(True)
def _load_layouts(self, *largs):
# first load available layouts from json files
# XXX fix to be able to reload layout when path is changing
value = self.layout_path
for fn in listdir(value):
self._load_layout_fn(join(value, fn),
basename(splitext(fn)[0]))
def _load_layout_fn(self, fn, name):
available_layouts = self.available_layouts
if fn[-5:] != '.json':
return
with open(fn, 'r') as fd:
json_content = fd.read()
layout = loads(json_content)
available_layouts[name] = layout
def setup_mode(self, *largs):
'''Call this method when you want to readjust the keyboard according to
options: :attr:`docked` or not, with attached :attr:`target` or not:
* If :attr:`docked` is True, it will call :meth:`setup_mode_dock`
* If :attr:`docked` is False, it will call :meth:`setup_mode_free`
Feel free to overload these methods to create new
positioning behavior.
'''
if self.docked:
self.setup_mode_dock()
else:
self.setup_mode_free()
def setup_mode_dock(self, *largs):
'''Setup the keyboard in docked mode.
Dock mode will reset the rotation, disable translation, rotation and
scale. Scale and position will be automatically adjusted to attach the
keyboard to the bottom of the screen.
.. note::
Don't call this method directly, use :meth:`setup_mode` instead.
'''
self.do_translation = False
self.do_rotation = False
self.do_scale = False
self.rotation = 0
win = self.get_parent_window()
scale = win.width / float(self.width)
self.scale = scale
self.pos = 0, 0
win.bind(on_resize=self._update_dock_mode)
def _update_dock_mode(self, win, *largs):
scale = win.width / float(self.width)
self.scale = scale
self.pos = 0, 0
def setup_mode_free(self):
'''Setup the keyboard in free mode.
Free mode is designed to let the user control the position and
orientation of the keyboard. The only real usage is for a multiuser
environment, but you might found other ways to use it.
If a :attr:`target` is set, it will place the vkeyboard under the
target.
.. note::
Don't call this method directly, use :meth:`setup_mode` instead.
'''
self.do_translation = True
self.do_rotation = True
self.do_scale = True
target = self.target
if not target:
return
# NOTE all math will be done in window point of view
# determine rotation of the target
a = Vector(1, 0)
b = Vector(target.to_window(0, 0))
c = Vector(target.to_window(1, 0)) - b
self.rotation = -a.angle(c)
# determine the position of center/top of the keyboard
dpos = Vector(self.to_window(self.width / 2., self.height))
# determine the position of center/bottom of the target
cpos = Vector(target.to_window(target.center_x, target.y))
# the goal now is to map both point, calculate the diff between them
diff = dpos - cpos
# we still have an issue, self.pos represent the bounding box,
# not the 0,0 coordinate of the scatter. we need to apply also
# the diff between them (inside and outside coordinate matrix).
# It's hard to explain, but do a scheme on a paper, write all
# the vector i'm calculating, and you'll understand. :)
diff2 = Vector(self.x + self.width / 2., self.y + self.height) - \
Vector(self.to_parent(self.width / 2., self.height))
diff -= diff2
# now we have a good "diff", set it as a pos.
self.pos = -diff
def change_layout(self):
# XXX implement popup with all available layouts
pass
def refresh(self, force=False):
'''(internal) Recreate the entire widget and graphics according to the
selected layout.
'''
self.clear_widgets()
if force:
self.refresh_keys_hint()
self.refresh_keys()
self.refresh_active_keys_layer()
def refresh_active_keys_layer(self):
self.active_keys_layer.clear()
active_keys = self.active_keys
layout_geometry = self.layout_geometry
background = resource_find(self.key_background_down)
texture = Image(background, mipmap=True).texture
with self.active_keys_layer:
Color(1, 1, 1)
for line_nb, index in active_keys.values():
pos, size = layout_geometry['LINE_%d' % line_nb][index]
BorderImage(texture=texture, pos=pos, size=size,
border=self.key_border)
def refresh_keys_hint(self):
layout = self.available_layouts[self.layout]
layout_cols = layout['cols']
layout_rows = layout['rows']
layout_geometry = self.layout_geometry
mtop, mright, mbottom, mleft = self.margin_hint
# get relative EFFICIENT surface of the layout without external margins
el_hint = 1. - mleft - mright
eh_hint = 1. - mtop - mbottom
ex_hint = 0 + mleft
ey_hint = 0 + mbottom
# get relative unit surface
uw_hint = (1. / layout_cols) * el_hint
uh_hint = (1. / layout_rows) * eh_hint
layout_geometry['U_HINT'] = (uw_hint, uh_hint)
# calculate individual key RELATIVE surface and pos (without key
# margin)
current_y_hint = ey_hint + eh_hint
for line_nb in range(1, layout_rows + 1):
current_y_hint -= uh_hint
# get line_name
line_name = '%s_%d' % (self.layout_mode, line_nb)
line_hint = 'LINE_HINT_%d' % line_nb
layout_geometry[line_hint] = []
current_x_hint = ex_hint
# go through the list of keys (tuples of 4)
for key in layout[line_name]:
# calculate relative pos, size
layout_geometry[line_hint].append([
(current_x_hint, current_y_hint),
(key[3] * uw_hint, uh_hint)])
current_x_hint += key[3] * uw_hint
self.layout_geometry = layout_geometry
def refresh_keys(self):
layout = self.available_layouts[self.layout]
layout_rows = layout['rows']
layout_geometry = self.layout_geometry
w, h = self.size
kmtop, kmright, kmbottom, kmleft = self.key_margin
uw_hint, uh_hint = layout_geometry['U_HINT']
for line_nb in range(1, layout_rows + 1):
llg = layout_geometry['LINE_%d' % line_nb] = []
llg_append = llg.append
for key in layout_geometry['LINE_HINT_%d' % line_nb]:
x_hint, y_hint = key[0]
w_hint, h_hint = key[1]
kx = x_hint * w
ky = y_hint * h
kw = w_hint * w
kh = h_hint * h
# now adjust, considering the key margin
kx = int(kx + kmleft)
ky = int(ky + kmbottom)
kw = int(kw - kmleft - kmright)
kh = int(kh - kmbottom - kmtop)
pos = (kx, ky)
size = (kw, kh)
llg_append((pos, size))
self.layout_geometry = layout_geometry
self.draw_keys()
def draw_keys(self):
layout = self.available_layouts[self.layout]
layout_rows = layout['rows']
layout_geometry = self.layout_geometry
layout_mode = self.layout_mode
# draw background
w, h = self.size
background = resource_find(self.background_disabled
if self.disabled else
self.background)
texture = Image(background, mipmap=True).texture
self.background_key_layer.clear()
with self.background_key_layer:
Color(*self.background_color)
BorderImage(texture=texture, size=self.size,
border=self.background_border)
# XXX seperate drawing the keys and the fonts to avoid
# XXX reloading the texture each time
# first draw keys without the font
key_normal = resource_find(self.key_background_disabled_normal
if self.disabled else
self.key_background_normal)
texture = Image(key_normal, mipmap=True).texture
with self.background_key_layer:
for line_nb in range(1, layout_rows + 1):
for pos, size in layout_geometry['LINE_%d' % line_nb]:
BorderImage(texture=texture, pos=pos, size=size,
border=self.key_border)
# then draw the text
# calculate font_size
font_size = int(w) / 46
# draw
for line_nb in range(1, layout_rows + 1):
key_nb = 0
for pos, size in layout_geometry['LINE_%d' % line_nb]:
# retrieve the relative text
text = layout[layout_mode + '_' + str(line_nb)][key_nb][0]
l = Label(text=text, font_size=font_size, pos=pos, size=size,
font_name=self.font_name)
self.add_widget(l)
key_nb += 1
def on_key_down(self, *largs):
pass
def on_key_up(self, *largs):
pass
def get_key_at_pos(self, x, y):
w, h = self.size
x_hint = x / w
# focus on the surface without margins
layout_geometry = self.layout_geometry
layout = self.available_layouts[self.layout]
layout_rows = layout['rows']
mtop, mright, mbottom, mleft = self.margin_hint
# get the line of the layout
e_height = h - (mbottom + mtop) * h # efficient height in pixels
line_height = e_height / layout_rows # line height in px
y = y - mbottom * h
line_nb = layout_rows - int(y / line_height)
if line_nb > layout_rows:
line_nb = layout_rows
if line_nb < 1:
line_nb = 1
# get the key within the line
key_index = ''
current_key_index = 0
for key in layout_geometry['LINE_HINT_%d' % line_nb]:
if x_hint >= key[0][0] and x_hint < key[0][0] + key[1][0]:
key_index = current_key_index
break
else:
current_key_index += 1
if key_index == '':
return None
# get the full character
key = layout['%s_%d' % (self.layout_mode, line_nb)][key_index]
return [key, (line_nb, key_index)]
def collide_margin(self, x, y):
'''Do a collision test, and return True if the (x, y) is inside the
vkeyboard margin.
'''
mtop, mright, mbottom, mleft = self.margin_hint
x_hint = x / self.width
y_hint = y / self.height
if x_hint > mleft and x_hint < 1. - mright \
and y_hint > mbottom and y_hint < 1. - mtop:
return False
return True
def process_key_on(self, touch):
x, y = self.to_local(*touch.pos)
key = self.get_key_at_pos(x, y)
if not key:
return
key_data = key[0]
displayed_char, internal, special_char, size = key_data
line_nb, key_index = key[1]
# save pressed key on the touch
ud = touch.ud[self.uid] = {}
ud['key'] = key
# for caps lock or shift only:
uid = touch.uid
if special_char is not None:
# Do not repeat special keys
if special_char in ('capslock', 'shift', 'layout', 'special'):
Clock.unschedule(self._start_repeat_key)
self.repeat_touch = None
if special_char == 'capslock':
self.have_capslock = not self.have_capslock
uid = -1
elif special_char == 'shift':
self.have_shift = True
elif special_char == 'special':
self.have_special = True
elif special_char == 'layout':
self.change_layout()
# send info to the bus
b_keycode = special_char
b_modifiers = self._get_modifiers()
self.dispatch('on_key_down', b_keycode, internal, b_modifiers)
# save key as an active key for drawing
self.active_keys[uid] = key[1]
self.refresh_active_keys_layer()
def process_key_up(self, touch):
uid = touch.uid
if self.uid not in touch.ud:
return
# save pressed key on the touch
key_data, key = touch.ud[self.uid]['key']
displayed_char, internal, special_char, size = key_data
# send info to the bus
b_keycode = special_char
b_modifiers = self._get_modifiers()
self.dispatch('on_key_up', b_keycode, internal, b_modifiers)
if special_char == 'capslock':
uid = -1
if uid in self.active_keys:
self.active_keys.pop(uid, None)
if special_char == 'shift':
self.have_shift = False
elif special_char == 'special':
self.have_special = False
if special_char == 'capslock' and self.have_capslock:
self.active_keys[-1] = key
self.refresh_active_keys_layer()
def _get_modifiers(self):
ret = []
if self.have_shift:
ret.append('shift')
if self.have_capslock:
ret.append('capslock')
return ret
def _start_repeat_key(self, *kwargs):
Clock.schedule_interval(self._repeat_key, 0.05)
def _repeat_key(self, *kwargs):
self.process_key_on(self.repeat_touch)
def on_touch_down(self, touch):
x, y = touch.pos
if not self.collide_point(x, y):
return
if self.disabled:
return True
x, y = self.to_local(x, y)
if not self.collide_margin(x, y):
if self.repeat_touch is None:
Clock.schedule_once(self._start_repeat_key, 0.5)
self.repeat_touch = touch
self.process_key_on(touch)
touch.grab(self, exclusive=True)
else:
super(VKeyboard, self).on_touch_down(touch)
return True
def on_touch_up(self, touch):
if touch.grab_current is self:
self.process_key_up(touch)
Clock.unschedule(self._start_repeat_key)
if touch == self.repeat_touch:
Clock.unschedule(self._repeat_key)
self.repeat_touch = None
return super(VKeyboard, self).on_touch_up(touch)
if __name__ == '__main__':
from kivy.base import runTouchApp
vk = VKeyboard(layout='azerty')
runTouchApp(vk)
| mit |
lddubeau/wed | selenium_test/steps/typing.py | 2 | 4498 | import re
from selenium.webdriver.common.action_chains import ActionChains
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.common.by import By
from nose.tools import assert_equal # pylint: disable=E0611
from behave import step_matcher
import wedutil
# Don't complain about redefined functions
# pylint: disable=E0102
@when(u'the user deletes all text letter by letter in an element')
def step_impl(context):
driver = context.driver
element, context.emptied_element = driver.execute_script("""
var el = document.querySelector(".__start_label._title_label");
return [el, el.parentNode];
""")
context.element_to_test_for_text = context.emptied_element
keys = [Keys.ARROW_RIGHT] + [Keys.DELETE] * 20
ActionChains(driver)\
.click(element)\
.send_keys(*keys)\
.perform()
@given(u'that the user has deleted all the text in an element')
def step_impl(context):
context.execute_steps(u"""
When the user deletes all text letter by letter in an element
Then a placeholder is present in the element
""")
#
# This was an attempt at turning on an input method in the browser. As
# of 20130926, does not seem possible to do.
#
# @when(u'the user turns on the input method')
# def step_impl(context):
# driver = context.driver
# ActionChains(driver)\
# .key_down(Keys.CONTROL)\
# .key_down(Keys.ALT)\
# .send_keys(Keys.SPACE)\
# .key_up(Keys.ALT)\
# .key_up(Keys.CONTROL)\
# .perform()
@when(u'the user types "{text}"')
def step_impl(context, text):
driver = context.driver
ActionChains(driver)\
.send_keys(text)\
.perform()
@then(u'a placeholder is present in the element')
def step_impl(context):
driver = context.driver
util = context.util
element = context.emptied_element
util.wait(lambda *_: element.find_element(By.CLASS_NAME, "_placeholder"))
@then(u'"{text}" is in the text')
def step_impl(context, text):
driver = context.driver
util = context.util
def condition(*_):
el_text = util.get_text_excluding_children(
context.element_to_test_for_text)
return el_text.find(text) != -1
util.wait(condition)
step_matcher('re')
@then(u'ESCAPE is not in the text')
def step_impl(context):
util = context.util
def condition(*_):
el_text = util.get_text_excluding_children(
context.element_to_test_for_text)
return el_text.find(u"\u001b") == -1
util.wait(condition)
@when(u'the user types (?P<choice>ENTER|ESCAPE|DELETE|BACKSPACE|F1)')
def step_impl(context, choice):
driver = context.driver
key = getattr(Keys, choice)
ActionChains(driver)\
.send_keys(key)\
.perform()
@when(u'the user undoes')
def step_impl(context):
context.util.ctrl_equivalent_x("z")
@then(u'the last letter of the element\'s text is deleted')
def step_impl(context):
driver = context.driver
util = context.util
initial_pos = context.caret_position_before_arrow
util.wait(lambda *_: initial_pos != wedutil.caret_screen_pos(driver))
initial = context.clicked_element_parent_initial_text
parent = context.clicked_element_parent
final = util.get_text_excluding_children(parent)
assert_equal(initial[:-1], final, "edited text")
@then(ur'the (?P<ordinal>first|second) (?P<what>".*?"|paragraph) in body has '
ur'the text "(?P<text>.*)"')
def step_impl(context, ordinal, what, text):
util = context.util
index = 0 if ordinal == "first" else 1
if what == "paragraph":
what = "p"
else:
what = what[1:-1] # drop the quotes.
els = util.find_elements((By.CSS_SELECTOR, ".body ." +
what.replace(":", ur"\:")))
def cond(*_):
return util.get_text_excluding_children(els[index]) == text
util.wait(cond)
@when(ur'the user closes the pasting modal by accepting it')
def step_impl(context):
button = context.util.find_element(
(By.CSS_SELECTOR, ".modal.in .btn-primary"))
button.click()
@then(ur'the text is pasted after the last paragraph')
def step_impl(context):
def cond(driver):
text = driver.execute_script("""
var ps = wed_editor.dataRoot.querySelectorAll("body>p");
var p = ps[ps.length - 1];
return p.nextSibling.textContent;
""")
return text == context.expected_selection_serialization
context.util.wait(cond)
| mpl-2.0 |
stanchan/jenkins-job-builder | tests/triggers/test_triggers.py | 37 | 1098 | # Joint copyright:
# - Copyright 2012,2013 Wikimedia Foundation
# - Copyright 2012,2013 Antoine "hashar" Musso
# - Copyright 2013 Arnaud Fabre
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
from testtools import TestCase
from testscenarios.testcase import TestWithScenarios
from jenkins_jobs.modules import triggers
from tests.base import get_scenarios, BaseTestCase
class TestCaseModuleTriggers(TestWithScenarios, TestCase, BaseTestCase):
fixtures_path = os.path.join(os.path.dirname(__file__), 'fixtures')
scenarios = get_scenarios(fixtures_path)
klass = triggers.Triggers
| apache-2.0 |
tbinjiayou/Odoo | addons/hr_attendance/__openerp__.py | 52 | 2119 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Attendances',
'version': '1.1',
'category': 'Human Resources',
'description': """
This module aims to manage employee's attendances.
==================================================
Keeps account of the attendances of the employees on the basis of the
actions(Sign in/Sign out) performed by them.
""",
'author': 'OpenERP SA',
'website': 'https://www.odoo.com/page/employees',
'images': ['images/hr_attendances.jpeg'],
'depends': ['hr', 'report'],
'data': [
'security/ir_rule.xml',
'security/ir.model.access.csv',
'hr_attendance_view.xml',
'hr_attendance_report.xml',
'wizard/hr_attendance_error_view.xml',
'res_config_view.xml',
'views/report_attendanceerrors.xml',
'views/hr_attendance.xml',
],
'demo': ['hr_attendance_demo.xml'],
'test': [
'test/attendance_process.yml',
'test/hr_attendance_report.yml',
],
'installable': True,
'auto_install': False,
#web
'qweb': ["static/src/xml/attendance.xml"],
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
bobeirasa/virtualenvs | pygeckozabbix/lib/python2.7/site-packages/setuptools/tests/test_bdist_egg.py | 286 | 1954 | """develop tests
"""
import sys
import os, re, shutil, tempfile, unittest
import tempfile
import site
from distutils.errors import DistutilsError
from setuptools.compat import StringIO
from setuptools.command.bdist_egg import bdist_egg
from setuptools.command import easy_install as easy_install_pkg
from setuptools.dist import Distribution
SETUP_PY = """\
from setuptools import setup
setup(name='foo', py_modules=['hi'])
"""
class TestDevelopTest(unittest.TestCase):
def setUp(self):
self.dir = tempfile.mkdtemp()
self.old_cwd = os.getcwd()
os.chdir(self.dir)
f = open('setup.py', 'w')
f.write(SETUP_PY)
f.close()
f = open('hi.py', 'w')
f.write('1\n')
f.close()
if sys.version >= "2.6":
self.old_base = site.USER_BASE
site.USER_BASE = tempfile.mkdtemp()
self.old_site = site.USER_SITE
site.USER_SITE = tempfile.mkdtemp()
def tearDown(self):
os.chdir(self.old_cwd)
shutil.rmtree(self.dir)
if sys.version >= "2.6":
shutil.rmtree(site.USER_BASE)
shutil.rmtree(site.USER_SITE)
site.USER_BASE = self.old_base
site.USER_SITE = self.old_site
def test_bdist_egg(self):
dist = Distribution(dict(
script_name='setup.py',
script_args=['bdist_egg'],
name='foo',
py_modules=['hi']
))
os.makedirs(os.path.join('build', 'src'))
old_stdout = sys.stdout
sys.stdout = o = StringIO()
try:
dist.parse_command_line()
dist.run_commands()
finally:
sys.stdout = old_stdout
# let's see if we got our egg link at the right place
[content] = os.listdir('dist')
self.assertTrue(re.match('foo-0.0.0-py[23].\d.egg$', content))
def test_suite():
return unittest.makeSuite(TestDevelopTest)
| mit |
madhanrm/kubernetes | hack/boilerplate/boilerplate_test.py | 629 | 1362 | #!/usr/bin/env python
# Copyright 2016 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import boilerplate
import unittest
import StringIO
import os
import sys
class TestBoilerplate(unittest.TestCase):
"""
Note: run this test from the hack/boilerplate directory.
$ python -m unittest boilerplate_test
"""
def test_boilerplate(self):
os.chdir("test/")
class Args(object):
def __init__(self):
self.filenames = []
self.rootdir = "."
self.boilerplate_dir = "../"
self.verbose = True
# capture stdout
old_stdout = sys.stdout
sys.stdout = StringIO.StringIO()
boilerplate.args = Args()
ret = boilerplate.main()
output = sorted(sys.stdout.getvalue().split())
sys.stdout = old_stdout
self.assertEquals(
output, ['././fail.go', '././fail.py'])
| apache-2.0 |
xin3liang/platform_external_chromium_org | build/android/install_emulator_deps.py | 43 | 10192 | #!/usr/bin/env python
# Copyright (c) 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Installs deps for using SDK emulator for testing.
The script will download the SDK and system images, if they are not present, and
install and enable KVM, if virtualization has been enabled in the BIOS.
"""
import logging
import optparse
import os
import re
import shutil
import sys
from pylib import cmd_helper
from pylib import constants
from pylib import pexpect
from pylib.utils import run_tests_helper
# Android API level
DEFAULT_ANDROID_API_LEVEL = constants.ANDROID_SDK_VERSION
# From the Android Developer's website.
# Keep this up to date; the user can install older API levels as necessary.
SDK_BASE_URL = 'http://dl.google.com/android/adt'
SDK_ZIP = 'adt-bundle-linux-x86_64-20131030.zip'
# pylint: disable=C0301
# Android x86 system image from the Intel website:
# http://software.intel.com/en-us/articles/intel-eula-x86-android-4-2-jelly-bean-bin
# These don't exist prior to Android-15.
# As of 08 Nov 2013, Android-19 is not yet available either.
X86_IMG_URLS = {
15: 'https://software.intel.com/sites/landingpage/android/sysimg_x86-15_r01.zip',
16: 'https://software.intel.com/sites/landingpage/android/sysimg_x86-16_r01.zip',
17: 'https://software.intel.com/sites/landingpage/android/sysimg_x86-17_r01.zip',
18: 'https://software.intel.com/sites/landingpage/android/sysimg_x86-18_r01.zip',
19: 'https://software.intel.com/sites/landingpage/android/sysimg_x86-19_r01.zip'}
#pylint: enable=C0301
def CheckSDK():
"""Check if SDK is already installed.
Returns:
True if the emulator SDK directory (src/android_emulator_sdk/) exists.
"""
return os.path.exists(constants.EMULATOR_SDK_ROOT)
def CheckSDKPlatform(api_level=DEFAULT_ANDROID_API_LEVEL):
"""Check if the "SDK Platform" for the specified API level is installed.
This is necessary in order for the emulator to run when the target
is specified.
Args:
api_level: the Android API level to check; defaults to the latest API.
Returns:
True if the platform is already installed.
"""
android_binary = os.path.join(constants.EMULATOR_SDK_ROOT,
'sdk', 'tools', 'android')
pattern = re.compile('id: [0-9]+ or "android-%d"' % api_level)
try:
exit_code, stdout = cmd_helper.GetCmdStatusAndOutput(
[android_binary, 'list'])
if exit_code != 0:
raise Exception('\'android list\' command failed')
for line in stdout.split('\n'):
if pattern.match(line):
return True
return False
except OSError:
logging.exception('Unable to execute \'android list\'')
return False
def CheckX86Image(api_level=DEFAULT_ANDROID_API_LEVEL):
"""Check if Android system images have been installed.
Args:
api_level: the Android API level to check for; defaults to the latest API.
Returns:
True if sdk/system-images/android-<api_level>/x86 exists inside
EMULATOR_SDK_ROOT.
"""
api_target = 'android-%d' % api_level
return os.path.exists(os.path.join(constants.EMULATOR_SDK_ROOT,
'sdk', 'system-images',
api_target, 'x86'))
def CheckKVM():
"""Quickly check whether KVM is enabled.
Returns:
True iff /dev/kvm exists (Linux only).
"""
return os.path.exists('/dev/kvm')
def RunKvmOk():
"""Run kvm-ok as root to check that KVM is properly enabled after installation
of the required packages.
Returns:
True iff KVM is enabled (/dev/kvm exists). On failure, returns False
but also print detailed information explaining why KVM isn't enabled
(e.g. CPU doesn't support it, or BIOS disabled it).
"""
try:
# Note: kvm-ok is in /usr/sbin, so always use 'sudo' to run it.
return not cmd_helper.RunCmd(['sudo', 'kvm-ok'])
except OSError:
logging.info('kvm-ok not installed')
return False
def GetSDK():
"""Download the SDK and unzip it into EMULATOR_SDK_ROOT."""
logging.info('Download Android SDK.')
sdk_url = '%s/%s' % (SDK_BASE_URL, SDK_ZIP)
try:
cmd_helper.RunCmd(['curl', '-o', '/tmp/sdk.zip', sdk_url])
print 'curled unzipping...'
rc = cmd_helper.RunCmd(['unzip', '-o', '/tmp/sdk.zip', '-d', '/tmp/'])
if rc:
raise Exception('ERROR: could not download/unzip Android SDK.')
# Get the name of the sub-directory that everything will be extracted to.
dirname, _ = os.path.splitext(SDK_ZIP)
zip_dir = '/tmp/%s' % dirname
# Move the extracted directory to EMULATOR_SDK_ROOT
shutil.move(zip_dir, constants.EMULATOR_SDK_ROOT)
finally:
os.unlink('/tmp/sdk.zip')
def InstallKVM():
"""Installs KVM packages."""
rc = cmd_helper.RunCmd(['sudo', 'apt-get', 'install', 'kvm'])
if rc:
logging.critical('ERROR: Did not install KVM. Make sure hardware '
'virtualization is enabled in BIOS (i.e. Intel VT-x or '
'AMD SVM).')
# TODO(navabi): Use modprobe kvm-amd on AMD processors.
rc = cmd_helper.RunCmd(['sudo', 'modprobe', 'kvm-intel'])
if rc:
logging.critical('ERROR: Did not add KVM module to Linux Kernel. Make sure '
'hardware virtualization is enabled in BIOS.')
# Now check to ensure KVM acceleration can be used.
if not RunKvmOk():
logging.critical('ERROR: Can not use KVM acceleration. Make sure hardware '
'virtualization is enabled in BIOS (i.e. Intel VT-x or '
'AMD SVM).')
def GetX86Image(api_level=DEFAULT_ANDROID_API_LEVEL):
"""Download x86 system image from Intel's website.
Args:
api_level: the Android API level to download for.
"""
logging.info('Download x86 system image directory into sdk directory.')
# TODO(andrewhayden): Use python tempfile lib instead
temp_file = '/tmp/x86_img_android-%d.zip' % api_level
if api_level not in X86_IMG_URLS:
raise Exception('ERROR: no URL known for x86 image for android-%s' %
api_level)
try:
cmd_helper.RunCmd(['curl', '-o', temp_file, X86_IMG_URLS[api_level]])
rc = cmd_helper.RunCmd(['unzip', '-o', temp_file, '-d', '/tmp/'])
if rc:
raise Exception('ERROR: Could not download/unzip image zip.')
api_target = 'android-%d' % api_level
sys_imgs = os.path.join(constants.EMULATOR_SDK_ROOT, 'sdk',
'system-images', api_target, 'x86')
logging.info('Deploying system image to %s' % sys_imgs)
shutil.move('/tmp/x86', sys_imgs)
finally:
os.unlink(temp_file)
def GetSDKPlatform(api_level=DEFAULT_ANDROID_API_LEVEL):
"""Update the SDK to include the platform specified.
Args:
api_level: the Android API level to download
"""
android_binary = os.path.join(constants.EMULATOR_SDK_ROOT,
'sdk', 'tools', 'android')
pattern = re.compile('\s*([0-9]+)- SDK Platform Android [\.,0-9]+, API %d.*' %
api_level)
# Example:
# 2- SDK Platform Android 4.3, API 18, revision 2
exit_code, stdout = cmd_helper.GetCmdStatusAndOutput(
[android_binary, 'list', 'sdk'])
if exit_code != 0:
raise Exception('\'android list sdk\' command return %d' % exit_code)
for line in stdout.split('\n'):
match = pattern.match(line)
if match:
index = match.group(1)
print('package %s corresponds to platform level %d' % (index, api_level))
# update sdk --no-ui --filter $INDEX
update_command = [android_binary,
'update', 'sdk', '--no-ui', '--filter', index]
update_command_str = ' '.join(update_command)
logging.info('running update command: %s' % update_command_str)
update_process = pexpect.spawn(update_command_str)
# TODO(andrewhayden): Do we need to bug the user about this?
if update_process.expect('Do you accept the license') != 0:
raise Exception('License agreement check failed')
update_process.sendline('y')
if update_process.expect('Done. 1 package installed.') == 0:
print('Successfully installed platform for API level %d' % api_level)
return
else:
raise Exception('Failed to install platform update')
raise Exception('Could not find android-%d update for the SDK!' % api_level)
def main(argv):
opt_parser = optparse.OptionParser(
description='Install dependencies for running the Android emulator')
opt_parser.add_option('--api-level', dest='api_level',
help='The API level (e.g., 19 for Android 4.4) to ensure is available',
type='int', default=DEFAULT_ANDROID_API_LEVEL)
opt_parser.add_option('-v', dest='verbose', action='store_true',
help='enable verbose logging')
options, _ = opt_parser.parse_args(argv[1:])
# run_tests_helper will set logging to INFO or DEBUG
# We achieve verbose output by configuring it with 2 (==DEBUG)
verbosity = 1
if (options.verbose):
verbosity = 2
logging.basicConfig(level=logging.INFO,
format='# %(asctime)-15s: %(message)s')
run_tests_helper.SetLogLevel(verbose_count=verbosity)
# Calls below will download emulator SDK and/or system images only if needed.
if CheckSDK():
logging.info('android_emulator_sdk/ already exists, skipping download.')
else:
GetSDK()
# Check target. The target has to be installed in order to run the emulator.
if CheckSDKPlatform(options.api_level):
logging.info('SDK platform android-%d already present, skipping.' %
options.api_level)
else:
logging.info('SDK platform android-%d not present, installing.' %
options.api_level)
GetSDKPlatform(options.api_level)
# Download the x86 system image only if needed.
if CheckX86Image(options.api_level):
logging.info('x86 image for android-%d already present, skipping.' %
options.api_level)
else:
GetX86Image(options.api_level)
# Make sure KVM packages are installed and enabled.
if CheckKVM():
logging.info('KVM already installed and enabled.')
else:
InstallKVM()
if __name__ == '__main__':
sys.exit(main(sys.argv))
| bsd-3-clause |
Alwnikrotikz/l5rcm | sinks/sink_2.py | 3 | 4477 | # Copyright (C) 2011 Daniele Simonetti
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
from PySide import QtCore, QtGui
import dialogs
class Sink2(QtCore.QObject):
def __init__(self, parent = None):
super(Sink2, self).__init__(parent)
self.form = parent
def act_buy_merit(self):
form = self.form
dlg = dialogs.BuyPerkDialog(form.pc, 'merit',
form.dstore, form)
dlg.exec_()
form.update_from_model()
def act_buy_flaw(self):
form = self.form
dlg = dialogs.BuyPerkDialog(form.pc, 'flaw',
form.dstore, form)
dlg.exec_()
form.update_from_model()
def act_edit_merit(self):
form = self.form
sel_idx = form.merit_view.selectionModel().currentIndex()
if not sel_idx.isValid():
return
sel_itm = form.merit_view.model().data(sel_idx, QtCore.Qt.UserRole)
dlg = dialogs.BuyPerkDialog(form.pc, 'merit',
form.dstore, form)
dlg.set_edit_mode(True)
dlg.load_item(sel_itm)
dlg.exec_()
form.update_from_model()
def act_edit_flaw(self):
form = self.form
sel_idx = form.flaw_view.selectionModel().currentIndex()
if not sel_idx.isValid():
return
sel_itm = form.flaw_view.model().data(sel_idx, QtCore.Qt.UserRole)
dlg = dialogs.BuyPerkDialog(form.pc, 'flaw',
form.dstore, form)
dlg.set_edit_mode(True)
dlg.load_item(sel_itm)
dlg.exec_()
form.update_from_model()
def act_del_merit(self):
form = self.form
sel_idx = form.merit_view.selectionModel().currentIndex()
if not sel_idx.isValid():
return
sel_itm = form.merit_view.model().data(sel_idx, QtCore.Qt.UserRole)
form.remove_advancement_item(sel_itm.adv)
def act_del_flaw(self):
form = self.form
sel_idx = form.flaw_view.selectionModel().currentIndex()
if not sel_idx.isValid():
return
sel_itm = form.flaw_view.model().data(sel_idx, QtCore.Qt.UserRole)
form.remove_advancement_item(sel_itm.adv)
def act_buy_kata(self):
form = self.form
dlg = dialogs.KataDialog( form.pc, form.dstore, form )
dlg.exec_()
form.update_from_model()
def act_buy_kiho(self):
form = self.form
dlg = dialogs.KihoDialog( form.pc, form.dstore, form )
dlg.exec_()
form.update_from_model()
def act_buy_tattoo(self):
form = self.form
dlg = dialogs.TattooDialog( form.pc, form.dstore, form )
dlg.exec_()
form.update_from_model()
def act_del_kata(self):
form = self.form
sel_idx = form.kata_view.selectionModel().currentIndex()
if not sel_idx.isValid():
return
sel_itm = form.ka_table_view.model().data(sel_idx, QtCore.Qt.UserRole)
form.remove_advancement_item(sel_itm.adv)
def act_del_kiho(self):
form = self.form
print('act_del_kiho')
sel_idx = form.kiho_view.selectionModel().currentIndex()
if not sel_idx.isValid():
print('index not valid')
return
sel_itm = form.ki_table_view.model().data(sel_idx, QtCore.Qt.UserRole)
print('to remove', sel_itm)
form.remove_advancement_item(sel_itm.adv)
| gpl-3.0 |
Maximilian-Reuter/SickRage | lib/markupsafe/tests.py | 674 | 6107 | # -*- coding: utf-8 -*-
import gc
import sys
import unittest
from markupsafe import Markup, escape, escape_silent
from markupsafe._compat import text_type
class MarkupTestCase(unittest.TestCase):
def test_adding(self):
# adding two strings should escape the unsafe one
unsafe = '<script type="application/x-some-script">alert("foo");</script>'
safe = Markup('<em>username</em>')
assert unsafe + safe == text_type(escape(unsafe)) + text_type(safe)
def test_string_interpolation(self):
# string interpolations are safe to use too
assert Markup('<em>%s</em>') % '<bad user>' == \
'<em><bad user></em>'
assert Markup('<em>%(username)s</em>') % {
'username': '<bad user>'
} == '<em><bad user></em>'
assert Markup('%i') % 3.14 == '3'
assert Markup('%.2f') % 3.14 == '3.14'
def test_type_behavior(self):
# an escaped object is markup too
assert type(Markup('foo') + 'bar') is Markup
# and it implements __html__ by returning itself
x = Markup("foo")
assert x.__html__() is x
def test_html_interop(self):
# it also knows how to treat __html__ objects
class Foo(object):
def __html__(self):
return '<em>awesome</em>'
def __unicode__(self):
return 'awesome'
__str__ = __unicode__
assert Markup(Foo()) == '<em>awesome</em>'
assert Markup('<strong>%s</strong>') % Foo() == \
'<strong><em>awesome</em></strong>'
def test_tuple_interpol(self):
self.assertEqual(Markup('<em>%s:%s</em>') % (
'<foo>',
'<bar>',
), Markup(u'<em><foo>:<bar></em>'))
def test_dict_interpol(self):
self.assertEqual(Markup('<em>%(foo)s</em>') % {
'foo': '<foo>',
}, Markup(u'<em><foo></em>'))
self.assertEqual(Markup('<em>%(foo)s:%(bar)s</em>') % {
'foo': '<foo>',
'bar': '<bar>',
}, Markup(u'<em><foo>:<bar></em>'))
def test_escaping(self):
# escaping and unescaping
assert escape('"<>&\'') == '"<>&''
assert Markup("<em>Foo & Bar</em>").striptags() == "Foo & Bar"
assert Markup("<test>").unescape() == "<test>"
def test_formatting(self):
for actual, expected in (
(Markup('%i') % 3.14, '3'),
(Markup('%.2f') % 3.14159, '3.14'),
(Markup('%s %s %s') % ('<', 123, '>'), '< 123 >'),
(Markup('<em>{awesome}</em>').format(awesome='<awesome>'),
'<em><awesome></em>'),
(Markup('{0[1][bar]}').format([0, {'bar': '<bar/>'}]),
'<bar/>'),
(Markup('{0[1][bar]}').format([0, {'bar': Markup('<bar/>')}]),
'<bar/>')):
assert actual == expected, "%r should be %r!" % (actual, expected)
# This is new in 2.7
if sys.version_info >= (2, 7):
def test_formatting_empty(self):
formatted = Markup('{}').format(0)
assert formatted == Markup('0')
def test_custom_formatting(self):
class HasHTMLOnly(object):
def __html__(self):
return Markup('<foo>')
class HasHTMLAndFormat(object):
def __html__(self):
return Markup('<foo>')
def __html_format__(self, spec):
return Markup('<FORMAT>')
assert Markup('{0}').format(HasHTMLOnly()) == Markup('<foo>')
assert Markup('{0}').format(HasHTMLAndFormat()) == Markup('<FORMAT>')
def test_complex_custom_formatting(self):
class User(object):
def __init__(self, id, username):
self.id = id
self.username = username
def __html_format__(self, format_spec):
if format_spec == 'link':
return Markup('<a href="/user/{0}">{1}</a>').format(
self.id,
self.__html__(),
)
elif format_spec:
raise ValueError('Invalid format spec')
return self.__html__()
def __html__(self):
return Markup('<span class=user>{0}</span>').format(self.username)
user = User(1, 'foo')
assert Markup('<p>User: {0:link}').format(user) == \
Markup('<p>User: <a href="/user/1"><span class=user>foo</span></a>')
def test_all_set(self):
import markupsafe as markup
for item in markup.__all__:
getattr(markup, item)
def test_escape_silent(self):
assert escape_silent(None) == Markup()
assert escape(None) == Markup(None)
assert escape_silent('<foo>') == Markup(u'<foo>')
def test_splitting(self):
self.assertEqual(Markup('a b').split(), [
Markup('a'),
Markup('b')
])
self.assertEqual(Markup('a b').rsplit(), [
Markup('a'),
Markup('b')
])
self.assertEqual(Markup('a\nb').splitlines(), [
Markup('a'),
Markup('b')
])
def test_mul(self):
self.assertEqual(Markup('a') * 3, Markup('aaa'))
class MarkupLeakTestCase(unittest.TestCase):
def test_markup_leaks(self):
counts = set()
for count in range(20):
for item in range(1000):
escape("foo")
escape("<foo>")
escape(u"foo")
escape(u"<foo>")
counts.add(len(gc.get_objects()))
assert len(counts) == 1, 'ouch, c extension seems to leak objects'
def suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(MarkupTestCase))
# this test only tests the c extension
if not hasattr(escape, 'func_code'):
suite.addTest(unittest.makeSuite(MarkupLeakTestCase))
return suite
if __name__ == '__main__':
unittest.main(defaultTest='suite')
# vim:sts=4:sw=4:et:
| gpl-3.0 |
superchilli/webapp | venv/lib/python2.7/site-packages/sqlalchemy/dialects/mysql/oursql.py | 33 | 8125 | # mysql/oursql.py
# Copyright (C) 2005-2017 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""
.. dialect:: mysql+oursql
:name: OurSQL
:dbapi: oursql
:connectstring: mysql+oursql://<user>:<password>@<host>[:<port>]/<dbname>
:url: http://packages.python.org/oursql/
Unicode
-------
Please see :ref:`mysql_unicode` for current recommendations on unicode
handling.
"""
import re
from .base import (BIT, MySQLDialect, MySQLExecutionContext)
from ... import types as sqltypes, util
class _oursqlBIT(BIT):
def result_processor(self, dialect, coltype):
"""oursql already converts mysql bits, so."""
return None
class MySQLExecutionContext_oursql(MySQLExecutionContext):
@property
def plain_query(self):
return self.execution_options.get('_oursql_plain_query', False)
class MySQLDialect_oursql(MySQLDialect):
driver = 'oursql'
if util.py2k:
supports_unicode_binds = True
supports_unicode_statements = True
supports_native_decimal = True
supports_sane_rowcount = True
supports_sane_multi_rowcount = True
execution_ctx_cls = MySQLExecutionContext_oursql
colspecs = util.update_copy(
MySQLDialect.colspecs,
{
sqltypes.Time: sqltypes.Time,
BIT: _oursqlBIT,
}
)
@classmethod
def dbapi(cls):
return __import__('oursql')
def do_execute(self, cursor, statement, parameters, context=None):
"""Provide an implementation of
*cursor.execute(statement, parameters)*."""
if context and context.plain_query:
cursor.execute(statement, plain_query=True)
else:
cursor.execute(statement, parameters)
def do_begin(self, connection):
connection.cursor().execute('BEGIN', plain_query=True)
def _xa_query(self, connection, query, xid):
if util.py2k:
arg = connection.connection._escape_string(xid)
else:
charset = self._connection_charset
arg = connection.connection._escape_string(
xid.encode(charset)).decode(charset)
arg = "'%s'" % arg
connection.execution_options(
_oursql_plain_query=True).execute(query % arg)
# Because mysql is bad, these methods have to be
# reimplemented to use _PlainQuery. Basically, some queries
# refuse to return any data if they're run through
# the parameterized query API, or refuse to be parameterized
# in the first place.
def do_begin_twophase(self, connection, xid):
self._xa_query(connection, 'XA BEGIN %s', xid)
def do_prepare_twophase(self, connection, xid):
self._xa_query(connection, 'XA END %s', xid)
self._xa_query(connection, 'XA PREPARE %s', xid)
def do_rollback_twophase(self, connection, xid, is_prepared=True,
recover=False):
if not is_prepared:
self._xa_query(connection, 'XA END %s', xid)
self._xa_query(connection, 'XA ROLLBACK %s', xid)
def do_commit_twophase(self, connection, xid, is_prepared=True,
recover=False):
if not is_prepared:
self.do_prepare_twophase(connection, xid)
self._xa_query(connection, 'XA COMMIT %s', xid)
# Q: why didn't we need all these "plain_query" overrides earlier ?
# am i on a newer/older version of OurSQL ?
def has_table(self, connection, table_name, schema=None):
return MySQLDialect.has_table(
self,
connection.connect().execution_options(_oursql_plain_query=True),
table_name,
schema
)
def get_table_options(self, connection, table_name, schema=None, **kw):
return MySQLDialect.get_table_options(
self,
connection.connect().execution_options(_oursql_plain_query=True),
table_name,
schema=schema,
**kw
)
def get_columns(self, connection, table_name, schema=None, **kw):
return MySQLDialect.get_columns(
self,
connection.connect().execution_options(_oursql_plain_query=True),
table_name,
schema=schema,
**kw
)
def get_view_names(self, connection, schema=None, **kw):
return MySQLDialect.get_view_names(
self,
connection.connect().execution_options(_oursql_plain_query=True),
schema=schema,
**kw
)
def get_table_names(self, connection, schema=None, **kw):
return MySQLDialect.get_table_names(
self,
connection.connect().execution_options(_oursql_plain_query=True),
schema
)
def get_schema_names(self, connection, **kw):
return MySQLDialect.get_schema_names(
self,
connection.connect().execution_options(_oursql_plain_query=True),
**kw
)
def initialize(self, connection):
return MySQLDialect.initialize(
self,
connection.execution_options(_oursql_plain_query=True)
)
def _show_create_table(self, connection, table, charset=None,
full_name=None):
return MySQLDialect._show_create_table(
self,
connection.contextual_connect(close_with_result=True).
execution_options(_oursql_plain_query=True),
table, charset, full_name
)
def is_disconnect(self, e, connection, cursor):
if isinstance(e, self.dbapi.ProgrammingError):
return e.errno is None and 'cursor' not in e.args[1] \
and e.args[1].endswith('closed')
else:
return e.errno in (2006, 2013, 2014, 2045, 2055)
def create_connect_args(self, url):
opts = url.translate_connect_args(database='db', username='user',
password='passwd')
opts.update(url.query)
util.coerce_kw_type(opts, 'port', int)
util.coerce_kw_type(opts, 'compress', bool)
util.coerce_kw_type(opts, 'autoping', bool)
util.coerce_kw_type(opts, 'raise_on_warnings', bool)
util.coerce_kw_type(opts, 'default_charset', bool)
if opts.pop('default_charset', False):
opts['charset'] = None
else:
util.coerce_kw_type(opts, 'charset', str)
opts['use_unicode'] = opts.get('use_unicode', True)
util.coerce_kw_type(opts, 'use_unicode', bool)
# FOUND_ROWS must be set in CLIENT_FLAGS to enable
# supports_sane_rowcount.
opts.setdefault('found_rows', True)
ssl = {}
for key in ['ssl_ca', 'ssl_key', 'ssl_cert',
'ssl_capath', 'ssl_cipher']:
if key in opts:
ssl[key[4:]] = opts[key]
util.coerce_kw_type(ssl, key[4:], str)
del opts[key]
if ssl:
opts['ssl'] = ssl
return [[], opts]
def _get_server_version_info(self, connection):
dbapi_con = connection.connection
version = []
r = re.compile(r'[.\-]')
for n in r.split(dbapi_con.server_info):
try:
version.append(int(n))
except ValueError:
version.append(n)
return tuple(version)
def _extract_error_code(self, exception):
return exception.errno
def _detect_charset(self, connection):
"""Sniff out the character set in use for connection results."""
return connection.connection.charset
def _compat_fetchall(self, rp, charset=None):
"""oursql isn't super-broken like MySQLdb, yaaay."""
return rp.fetchall()
def _compat_fetchone(self, rp, charset=None):
"""oursql isn't super-broken like MySQLdb, yaaay."""
return rp.fetchone()
def _compat_first(self, rp, charset=None):
return rp.first()
dialect = MySQLDialect_oursql
| mit |
2014c2g5/2014c2 | exts/wsgi/static/Brython2.1.0-20140419-113919/Lib/xml/sax/xmlreader.py | 824 | 12612 | """An XML Reader is the SAX 2 name for an XML parser. XML Parsers
should be based on this code. """
from . import handler
from ._exceptions import SAXNotSupportedException, SAXNotRecognizedException
# ===== XMLREADER =====
class XMLReader:
"""Interface for reading an XML document using callbacks.
XMLReader is the interface that an XML parser's SAX2 driver must
implement. This interface allows an application to set and query
features and properties in the parser, to register event handlers
for document processing, and to initiate a document parse.
All SAX interfaces are assumed to be synchronous: the parse
methods must not return until parsing is complete, and readers
must wait for an event-handler callback to return before reporting
the next event."""
def __init__(self):
self._cont_handler = handler.ContentHandler()
self._dtd_handler = handler.DTDHandler()
self._ent_handler = handler.EntityResolver()
self._err_handler = handler.ErrorHandler()
def parse(self, source):
"Parse an XML document from a system identifier or an InputSource."
raise NotImplementedError("This method must be implemented!")
def getContentHandler(self):
"Returns the current ContentHandler."
return self._cont_handler
def setContentHandler(self, handler):
"Registers a new object to receive document content events."
self._cont_handler = handler
def getDTDHandler(self):
"Returns the current DTD handler."
return self._dtd_handler
def setDTDHandler(self, handler):
"Register an object to receive basic DTD-related events."
self._dtd_handler = handler
def getEntityResolver(self):
"Returns the current EntityResolver."
return self._ent_handler
def setEntityResolver(self, resolver):
"Register an object to resolve external entities."
self._ent_handler = resolver
def getErrorHandler(self):
"Returns the current ErrorHandler."
return self._err_handler
def setErrorHandler(self, handler):
"Register an object to receive error-message events."
self._err_handler = handler
def setLocale(self, locale):
"""Allow an application to set the locale for errors and warnings.
SAX parsers are not required to provide localization for errors
and warnings; if they cannot support the requested locale,
however, they must raise a SAX exception. Applications may
request a locale change in the middle of a parse."""
raise SAXNotSupportedException("Locale support not implemented")
def getFeature(self, name):
"Looks up and returns the state of a SAX2 feature."
raise SAXNotRecognizedException("Feature '%s' not recognized" % name)
def setFeature(self, name, state):
"Sets the state of a SAX2 feature."
raise SAXNotRecognizedException("Feature '%s' not recognized" % name)
def getProperty(self, name):
"Looks up and returns the value of a SAX2 property."
raise SAXNotRecognizedException("Property '%s' not recognized" % name)
def setProperty(self, name, value):
"Sets the value of a SAX2 property."
raise SAXNotRecognizedException("Property '%s' not recognized" % name)
class IncrementalParser(XMLReader):
"""This interface adds three extra methods to the XMLReader
interface that allow XML parsers to support incremental
parsing. Support for this interface is optional, since not all
underlying XML parsers support this functionality.
When the parser is instantiated it is ready to begin accepting
data from the feed method immediately. After parsing has been
finished with a call to close the reset method must be called to
make the parser ready to accept new data, either from feed or
using the parse method.
Note that these methods must _not_ be called during parsing, that
is, after parse has been called and before it returns.
By default, the class also implements the parse method of the XMLReader
interface using the feed, close and reset methods of the
IncrementalParser interface as a convenience to SAX 2.0 driver
writers."""
def __init__(self, bufsize=2**16):
self._bufsize = bufsize
XMLReader.__init__(self)
def parse(self, source):
from . import saxutils
source = saxutils.prepare_input_source(source)
self.prepareParser(source)
file = source.getByteStream()
buffer = file.read(self._bufsize)
while buffer:
self.feed(buffer)
buffer = file.read(self._bufsize)
self.close()
def feed(self, data):
"""This method gives the raw XML data in the data parameter to
the parser and makes it parse the data, emitting the
corresponding events. It is allowed for XML constructs to be
split across several calls to feed.
feed may raise SAXException."""
raise NotImplementedError("This method must be implemented!")
def prepareParser(self, source):
"""This method is called by the parse implementation to allow
the SAX 2.0 driver to prepare itself for parsing."""
raise NotImplementedError("prepareParser must be overridden!")
def close(self):
"""This method is called when the entire XML document has been
passed to the parser through the feed method, to notify the
parser that there are no more data. This allows the parser to
do the final checks on the document and empty the internal
data buffer.
The parser will not be ready to parse another document until
the reset method has been called.
close may raise SAXException."""
raise NotImplementedError("This method must be implemented!")
def reset(self):
"""This method is called after close has been called to reset
the parser so that it is ready to parse new documents. The
results of calling parse or feed after close without calling
reset are undefined."""
raise NotImplementedError("This method must be implemented!")
# ===== LOCATOR =====
class Locator:
"""Interface for associating a SAX event with a document
location. A locator object will return valid results only during
calls to DocumentHandler methods; at any other time, the
results are unpredictable."""
def getColumnNumber(self):
"Return the column number where the current event ends."
return -1
def getLineNumber(self):
"Return the line number where the current event ends."
return -1
def getPublicId(self):
"Return the public identifier for the current event."
return None
def getSystemId(self):
"Return the system identifier for the current event."
return None
# ===== INPUTSOURCE =====
class InputSource:
"""Encapsulation of the information needed by the XMLReader to
read entities.
This class may include information about the public identifier,
system identifier, byte stream (possibly with character encoding
information) and/or the character stream of an entity.
Applications will create objects of this class for use in the
XMLReader.parse method and for returning from
EntityResolver.resolveEntity.
An InputSource belongs to the application, the XMLReader is not
allowed to modify InputSource objects passed to it from the
application, although it may make copies and modify those."""
def __init__(self, system_id = None):
self.__system_id = system_id
self.__public_id = None
self.__encoding = None
self.__bytefile = None
self.__charfile = None
def setPublicId(self, public_id):
"Sets the public identifier of this InputSource."
self.__public_id = public_id
def getPublicId(self):
"Returns the public identifier of this InputSource."
return self.__public_id
def setSystemId(self, system_id):
"Sets the system identifier of this InputSource."
self.__system_id = system_id
def getSystemId(self):
"Returns the system identifier of this InputSource."
return self.__system_id
def setEncoding(self, encoding):
"""Sets the character encoding of this InputSource.
The encoding must be a string acceptable for an XML encoding
declaration (see section 4.3.3 of the XML recommendation).
The encoding attribute of the InputSource is ignored if the
InputSource also contains a character stream."""
self.__encoding = encoding
def getEncoding(self):
"Get the character encoding of this InputSource."
return self.__encoding
def setByteStream(self, bytefile):
"""Set the byte stream (a Python file-like object which does
not perform byte-to-character conversion) for this input
source.
The SAX parser will ignore this if there is also a character
stream specified, but it will use a byte stream in preference
to opening a URI connection itself.
If the application knows the character encoding of the byte
stream, it should set it with the setEncoding method."""
self.__bytefile = bytefile
def getByteStream(self):
"""Get the byte stream for this input source.
The getEncoding method will return the character encoding for
this byte stream, or None if unknown."""
return self.__bytefile
def setCharacterStream(self, charfile):
"""Set the character stream for this input source. (The stream
must be a Python 2.0 Unicode-wrapped file-like that performs
conversion to Unicode strings.)
If there is a character stream specified, the SAX parser will
ignore any byte stream and will not attempt to open a URI
connection to the system identifier."""
self.__charfile = charfile
def getCharacterStream(self):
"Get the character stream for this input source."
return self.__charfile
# ===== ATTRIBUTESIMPL =====
class AttributesImpl:
def __init__(self, attrs):
"""Non-NS-aware implementation.
attrs should be of the form {name : value}."""
self._attrs = attrs
def getLength(self):
return len(self._attrs)
def getType(self, name):
return "CDATA"
def getValue(self, name):
return self._attrs[name]
def getValueByQName(self, name):
return self._attrs[name]
def getNameByQName(self, name):
if name not in self._attrs:
raise KeyError(name)
return name
def getQNameByName(self, name):
if name not in self._attrs:
raise KeyError(name)
return name
def getNames(self):
return list(self._attrs.keys())
def getQNames(self):
return list(self._attrs.keys())
def __len__(self):
return len(self._attrs)
def __getitem__(self, name):
return self._attrs[name]
def keys(self):
return list(self._attrs.keys())
def __contains__(self, name):
return name in self._attrs
def get(self, name, alternative=None):
return self._attrs.get(name, alternative)
def copy(self):
return self.__class__(self._attrs)
def items(self):
return list(self._attrs.items())
def values(self):
return list(self._attrs.values())
# ===== ATTRIBUTESNSIMPL =====
class AttributesNSImpl(AttributesImpl):
def __init__(self, attrs, qnames):
"""NS-aware implementation.
attrs should be of the form {(ns_uri, lname): value, ...}.
qnames of the form {(ns_uri, lname): qname, ...}."""
self._attrs = attrs
self._qnames = qnames
def getValueByQName(self, name):
for (nsname, qname) in self._qnames.items():
if qname == name:
return self._attrs[nsname]
raise KeyError(name)
def getNameByQName(self, name):
for (nsname, qname) in self._qnames.items():
if qname == name:
return nsname
raise KeyError(name)
def getQNameByName(self, name):
return self._qnames[name]
def getQNames(self):
return list(self._qnames.values())
def copy(self):
return self.__class__(self._attrs, self._qnames)
def _test():
XMLReader()
IncrementalParser()
Locator()
if __name__ == "__main__":
_test()
| gpl-2.0 |
StartupsPoleEmploi/labonneboite | labonneboite/scripts/data_scripts/get_nb_clic_per_siret_pse.py | 1 | 3258 | import os
import json
from datetime import date
import pandas as pd
from labonneboite.scripts.impact_retour_emploi.daily_json_activity_parser import ActivityLogParser, siret
class NoDataException(Exception):
pass
def get_clics_per_siret_main(activity_log_parser, file_path):
'''
Main function which will take the list of all json files,
create a dataframe from it,
and which, from the dataframe, save data in the 3 tables created above
'''
for file_name in activity_log_parser.json_logs_files_names_to_parse:
# Create dataframe from json file
print(f'activity dataframe for file {file_name}: start')
activity_df = activity_log_parser.get_activity_log_dataframe(file_name)
print(f'activity dataframe for file {file_name}: end')
# Insert into logs_activity
print(f'Get clic per siret per day for file {file_name}: start')
df = get_clics_per_siret(activity_df)
insert_in_csv(df, file_path)
print(f'Get clic per siret per day for file for file {file_name}: end')
# PSE school wanted to know how many 'clics' on companies have been made
# We consider a "clic on a company" to be an event in : details, afficher-details, ajout-favory
# So for these 3 categories, we group by siret and date the number of occurences for each event
def get_clics_per_siret(activity_df):
'''
details = consulter une page entreprise
afficher-details = déplier fiche entreprise
'''
clics_of_interest = ['details', 'afficher-details', 'ajout-favori']
df = activity_df[activity_df['nom'].isin(clics_of_interest)]
df['siret'] = df.apply(lambda row: siret(row), axis=1)
df = df.groupby(['siret', 'date'])['nom'].apply(lambda x: list(x)).reset_index()
df['nb-clics-afficher-details'] = df['nom'].apply(lambda x: len([_ for _ in x if _ == 'afficher-details']))
df['nb-clics-details'] = df['nom'].apply(lambda x: len([_ for _ in x if _ == 'details']))
df['nb-clics-ajout-favori'] = df['nom'].apply(lambda x: len([_ for _ in x if _ == 'ajout-favori']))
cols_of_interest = [
"date",
"siret",
"nb-clics-details",
"nb-clics-afficher-details",
"nb-clics-ajout-favori"
]
df = df[cols_of_interest]
nb_lines = df.shape[0]
print(f'Number of lines to insert into logs_activity : {nb_lines}')
return df
def insert_in_csv(df, file_path):
# save file after each chunk of the dpae file used
file_exists = os.path.isfile(file_path)
if file_exists:
with open(file_path, 'a') as f:
df.to_csv(f, header=False, sep='|')
else:
df.to_csv(file_path, encoding='utf-8', sep='|')
def get_filepath(activity_log_parser):
today = date.today()
clean_date = today.strftime("%Y-%m-%d")
file_path = f'{activity_log_parser.json_logs_folder_path}/clics_per_siret_pse-{clean_date}.csv'
if os.path.isfile(file_path):
os.remove(file_path)
return file_path
def run_main():
activity_log = ActivityLogParser()
activity_log.get_json_logs_activity(need_all_files=True)
filepath = get_filepath(activity_log)
get_clics_per_siret_main(activity_log, filepath)
if __name__ == '__main__':
run_main()
| agpl-3.0 |
shoelzer/buildbot | master/buildbot/__init__.py | 7 | 2037 | # This file is part of Buildbot. Buildbot is free software: you can
# redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, version 2.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Copyright Buildbot Team Members
#
# Keep in sync with slave/buildslave/__init__.py
#
# We can't put this method in utility modules, because they import dependency packages
from __future__ import division
from __future__ import print_function
import os
import re
from subprocess import PIPE
from subprocess import Popen
from subprocess import STDOUT
def getVersion(init_file):
"""
Return BUILDBOT_VERSION environment variable, content of VERSION file, git
tag or 'latest'
"""
try:
return os.environ['BUILDBOT_VERSION']
except KeyError:
pass
try:
cwd = os.path.dirname(os.path.abspath(init_file))
fn = os.path.join(cwd, 'VERSION')
with open(fn) as f:
return f.read().strip()
except IOError:
pass
# accept version to be coded with 2 or 3 parts (X.Y or X.Y.Z),
# no matter the number of digits for X, Y and Z
VERSION_MATCH = re.compile(r'(\d+\.\d+(\.\d+)?(\w|-)*)')
try:
p = Popen(['git', 'describe', '--tags', '--always'], stdout=PIPE, stderr=STDOUT, cwd=cwd)
out = p.communicate()[0]
if (not p.returncode) and out:
v = VERSION_MATCH.search(str(out))
if v:
return v.group(1)
except OSError:
pass
return "latest"
version = getVersion(__file__)
__version__ = version
| gpl-2.0 |
koblitz/django-guardian | guardian/shortcuts.py | 10 | 18949 | """
Convenient shortcuts to manage or check object permissions.
"""
from django.contrib.contenttypes.models import ContentType
from django.db import models
from django.db.models import Q
from django.shortcuts import _get_queryset
from itertools import groupby
from guardian.core import ObjectPermissionChecker
from guardian.exceptions import MixedContentTypeError
from guardian.exceptions import WrongAppError
from guardian.models import UserObjectPermission, GroupObjectPermission
from guardian.utils import get_identity
from guardian.models import Permission, User, Group
def assign(perm, user_or_group, obj=None):
"""
Assigns permission to user/group and object pair.
:param perm: proper permission for given ``obj``, as string (in format:
``app_label.codename`` or ``codename``). If ``obj`` is not given, must
be in format ``app_label.codename``.
:param user_or_group: instance of ``User``, ``AnonymousUser`` or ``Group``;
passing any other object would raise
``guardian.exceptions.NotUserNorGroup`` exception
:param obj: persisted Django's ``Model`` instance or ``None`` if assigning
global permission. Default is ``None``.
We can assign permission for ``Model`` instance for specific user:
>>> from django.contrib.sites.models import Site
>>> from guardian.models import User, Group
>>> from guardian.shortcuts import assign
>>> site = Site.objects.get_current()
>>> user = User.objects.create(username='joe')
>>> assign("change_site", user, site)
<UserObjectPermission: example.com | joe | change_site>
>>> user.has_perm("change_site", site)
True
... or we can assign permission for group:
>>> group = Group.objects.create(name='joe-group')
>>> user.groups.add(group)
>>> assign("delete_site", group, site)
<GroupObjectPermission: example.com | joe-group | delete_site>
>>> user.has_perm("delete_site", site)
True
**Global permissions**
This function may also be used to assign standard, *global* permissions if
``obj`` parameter is omitted. Added Permission would be returned in that
case:
>>> assign("sites.change_site", user)
<Permission: sites | site | Can change site>
"""
user, group = get_identity(user_or_group)
# If obj is None we try to operate on global permissions
if obj is None:
try:
app_label, codename = perm.split('.', 1)
except ValueError:
raise ValueError("For global permissions, first argument must be in"
" format: 'app_label.codename' (is %r)" % perm)
perm = Permission.objects.get(content_type__app_label=app_label,
codename=codename)
if user:
user.user_permissions.add(perm)
return perm
if group:
group.permissions.add(perm)
return perm
perm = perm.split('.')[-1]
if user:
return UserObjectPermission.objects.assign(perm, user, obj)
if group:
return GroupObjectPermission.objects.assign(perm, group, obj)
def remove_perm(perm, user_or_group=None, obj=None):
"""
Removes permission from user/group and object pair.
:param perm: proper permission for given ``obj``, as string (in format:
``app_label.codename`` or ``codename``). If ``obj`` is not given, must
be in format ``app_label.codename``.
:param user_or_group: instance of ``User``, ``AnonymousUser`` or ``Group``;
passing any other object would raise
``guardian.exceptions.NotUserNorGroup`` exception
:param obj: persisted Django's ``Model`` instance or ``None`` if assigning
global permission. Default is ``None``.
"""
user, group = get_identity(user_or_group)
if obj is None:
try:
app_label, codename = perm.split('.', 1)
except ValueError:
raise ValueError("For global permissions, first argument must be in"
" format: 'app_label.codename' (is %r)" % perm)
perm = Permission.objects.get(content_type__app_label=app_label,
codename=codename)
if user:
user.user_permissions.remove(perm)
return
elif group:
group.permissions.remove(perm)
return
perm = perm.split('.')[-1]
if user:
UserObjectPermission.objects.remove_perm(perm, user, obj)
if group:
GroupObjectPermission.objects.remove_perm(perm, group, obj)
def get_perms(user_or_group, obj):
"""
Returns permissions for given user/group and object pair, as list of
strings.
"""
check = ObjectPermissionChecker(user_or_group)
return check.get_perms(obj)
def get_perms_for_model(cls):
"""
Returns queryset of all Permission objects for the given class. It is
possible to pass Model as class or instance.
"""
if isinstance(cls, str):
app_label, model_name = cls.split('.')
model = models.get_model(app_label, model_name)
else:
model = cls
ctype = ContentType.objects.get_for_model(model)
return Permission.objects.filter(content_type=ctype)
def get_users_with_perms(obj, attach_perms=False, with_superusers=False,
with_group_users=True):
"""
Returns queryset of all ``User`` objects with *any* object permissions for
the given ``obj``.
:param obj: persisted Django's ``Model`` instance
:param attach_perms: Default: ``False``. If set to ``True`` result would be
dictionary of ``User`` instances with permissions' codenames list as
values. This would fetch users eagerly!
:param with_superusers: Default: ``False``. If set to ``True`` result would
contain all superusers.
:param with_group_users: Default: ``True``. If set to ``False`` result would
**not** contain those users who have only group permissions for given
``obj``.
Example::
>>> from django.contrib.flatpages.models import FlatPage
>>> from guardian.models import User
>>> from guardian.shortcuts import assign, get_users_with_perms
>>>
>>> page = FlatPage.objects.create(title='Some page', path='/some/page/')
>>> joe = User.objects.create_user('joe', '[email protected]', 'joesecret')
>>> assign('change_flatpage', joe, page)
>>>
>>> get_users_with_perms(page)
[<User: joe>]
>>>
>>> get_users_with_perms(page, attach_perms=True)
{<User: joe>: [u'change_flatpage']}
"""
ctype = ContentType.objects.get_for_model(obj)
if not attach_perms:
# It's much easier without attached perms so we do it first if that is
# the case
qset = Q(
userobjectpermission__content_type=ctype,
userobjectpermission__object_pk=obj.pk)
if with_group_users:
qset = qset | Q(
groups__groupobjectpermission__content_type=ctype,
groups__groupobjectpermission__object_pk=obj.pk,
)
if with_superusers:
qset = qset | Q(is_superuser=True)
return User.objects.filter(qset).distinct()
else:
# TODO: Do not hit db for each user!
users = {}
for user in get_users_with_perms(obj,
with_group_users=with_group_users):
users[user] = get_perms(user, obj)
return users
def get_groups_with_perms(obj, attach_perms=False):
"""
Returns queryset of all ``Group`` objects with *any* object permissions for
the given ``obj``.
:param obj: persisted Django's ``Model`` instance
:param attach_perms: Default: ``False``. If set to ``True`` result would be
dictionary of ``Group`` instances with permissions' codenames list as
values. This would fetch groups eagerly!
Example::
>>> from django.contrib.flatpages.models import FlatPage
>>> from guardian.shortcuts import assign, get_groups_with_perms
>>> from guardian.models import Group
>>>
>>> page = FlatPage.objects.create(title='Some page', path='/some/page/')
>>> admins = Group.objects.create(name='Admins')
>>> assign('change_flatpage', group, page)
>>>
>>> get_groups_with_perms(page)
[<Group: admins>]
>>>
>>> get_groups_with_perms(page, attach_perms=True)
{<Group: admins>: [u'change_flatpage']}
"""
ctype = ContentType.objects.get_for_model(obj)
if not attach_perms:
# It's much easier without attached perms so we do it first if that is
# the case
groups = Group.objects\
.filter(
groupobjectpermission__content_type=ctype,
groupobjectpermission__object_pk=obj.pk,
)\
.distinct()
return groups
else:
# TODO: Do not hit db for each group!
groups = {}
for group in get_groups_with_perms(obj):
if not group in groups:
groups[group] = get_perms(group, obj)
return groups
def get_objects_for_user(user, perms, klass=None, use_groups=True, any_perm=False):
"""
Returns queryset of objects for which a given ``user`` has *all*
permissions present at ``perms``.
:param user: ``User`` instance for which objects would be returned
:param perms: single permission string, or sequence of permission strings
which should be checked.
If ``klass`` parameter is not given, those should be full permission
names rather than only codenames (i.e. ``auth.change_user``). If more than
one permission is present within sequence, their content type **must** be
the same or ``MixedContentTypeError`` exception would be raised.
:param klass: may be a Model, Manager or QuerySet object. If not given
this parameter would be computed based on given ``params``.
:param use_groups: if ``False``, wouldn't check user's groups object
permissions. Default is ``True``.
:param any_perm: if True, any of permission in sequence is accepted
:raises MixedContentTypeError: when computed content type for ``perms``
and/or ``klass`` clashes.
:raises WrongAppError: if cannot compute app label for given ``perms``/
``klass``.
Example::
>>> from guardian.shortcuts import get_objects_for_user
>>> joe = User.objects.get(username='joe')
>>> get_objects_for_user(joe, 'auth.change_group')
[]
>>> from guardian.shortcuts import assign
>>> group = Group.objects.create('some group')
>>> assign('auth.change_group', joe, group)
>>> get_objects_for_user(joe, 'auth.change_group')
[<Group some group>]
The permission string can also be an iterable. Continuing with the previous example:
>>> get_objects_for_user(joe, ['auth.change_group', 'auth.delete_group'])
[]
>>> get_objects_for_user(joe, ['auth.change_group', 'auth.delete_group'], any_perm=True)
[<Group some group>]
>>> assign('auth.delete_group', joe, group)
>>> get_objects_for_user(joe, ['auth.change_group', 'auth.delete_group'])
[<Group some group>]
"""
if isinstance(perms, basestring):
perms = [perms]
ctype = None
app_label = None
codenames = set()
# Compute codenames set and ctype if possible
for perm in perms:
if '.' in perm:
new_app_label, codename = perm.split('.', 1)
if app_label is not None and app_label != new_app_label:
raise MixedContentTypeError("Given perms must have same app "
"label (%s != %s)" % (app_label, new_app_label))
else:
app_label = new_app_label
else:
codename = perm
codenames.add(codename)
if app_label is not None:
new_ctype = ContentType.objects.get(app_label=app_label,
permission__codename=codename)
if ctype is not None and ctype != new_ctype:
raise MixedContentTypeError("ContentType was once computed "
"to be %s and another one %s" % (ctype, new_ctype))
else:
ctype = new_ctype
# Compute queryset and ctype if still missing
if ctype is None and klass is None:
raise WrongAppError("Cannot determine content type")
elif ctype is None and klass is not None:
queryset = _get_queryset(klass)
ctype = ContentType.objects.get_for_model(queryset.model)
elif ctype is not None and klass is None:
queryset = _get_queryset(ctype.model_class())
else:
queryset = _get_queryset(klass)
if ctype.model_class() != queryset.model:
raise MixedContentTypeError("Content type for given perms and "
"klass differs")
# At this point, we should have both ctype and queryset and they should
# match which means: ctype.model_class() == queryset.model
# we should also have ``codenames`` list
# First check if user is superuser and if so, return queryset immediately
if user.is_superuser:
return queryset
# Now we should extract list of pk values for which we would filter queryset
user_obj_perms = UserObjectPermission.objects\
.filter(user=user)\
.filter(permission__content_type=ctype)\
.filter(permission__codename__in=codenames)\
.values_list('object_pk', 'permission__codename')
data = list(user_obj_perms)
if use_groups:
groups_obj_perms = GroupObjectPermission.objects\
.filter(group__user=user)\
.filter(permission__content_type=ctype)\
.filter(permission__codename__in=codenames)\
.values_list('object_pk', 'permission__codename')
data += list(groups_obj_perms)
keyfunc = lambda t: t[0] # sorting/grouping by pk (first in result tuple)
data = sorted(data, key=keyfunc)
pk_list = []
for pk, group in groupby(data, keyfunc):
obj_codenames = set((e[1] for e in group))
if any_perm or codenames.issubset(obj_codenames):
pk_list.append(pk)
objects = queryset.filter(pk__in=pk_list)
return objects
def get_objects_for_group(group, perms, klass=None, any_perm=False):
"""
Returns queryset of objects for which a given ``group`` has *all*
permissions present at ``perms``.
:param group: ``Group`` instance for which objects would be returned.
:param perms: single permission string, or sequence of permission strings
which should be checked.
If ``klass`` parameter is not given, those should be full permission
names rather than only codenames (i.e. ``auth.change_user``). If more than
one permission is present within sequence, their content type **must** be
the same or ``MixedContentTypeError`` exception would be raised.
:param klass: may be a Model, Manager or QuerySet object. If not given
this parameter would be computed based on given ``params``.
:param any_perm: if True, any of permission in sequence is accepted
:raises MixedContentTypeError: when computed content type for ``perms``
and/or ``klass`` clashes.
:raises WrongAppError: if cannot compute app label for given ``perms``/
``klass``.
Example:
Let's assume we have a ``Task`` model belonging to the ``tasker`` app with
the default add_task, change_task and delete_task permissions provided
by Django::
>>> from guardian.shortcuts import get_objects_for_group
>>> from tasker import Task
>>> group = Group.objects.create('some group')
>>> task = Task.objects.create('some task')
>>> get_objects_for_group(group, 'tasker.add_task')
[]
>>> from guardian.shortcuts import assign
>>> assign('tasker.add_task', group, task)
>>> get_objects_for_group(group, 'tasker.add_task')
[<Task some task>]
The permission string can also be an iterable. Continuing with the previous example:
>>> get_objects_for_group(group, ['tasker.add_task', 'tasker.delete_task'])
[]
>>> assign('tasker.delete_task', group, task)
>>> get_objects_for_group(group, ['tasker.add_task', 'tasker.delete_task'])
[<Task some task>]
"""
if isinstance(perms, basestring):
perms = [perms]
ctype = None
app_label = None
codenames = set()
# Compute codenames set and ctype if possible
for perm in perms:
if '.' in perm:
new_app_label, codename = perm.split('.', 1)
if app_label is not None and app_label != new_app_label:
raise MixedContentTypeError("Given perms must have same app "
"label (%s != %s)" % (app_label, new_app_label))
else:
app_label = new_app_label
else:
codename = perm
codenames.add(codename)
if app_label is not None:
new_ctype = ContentType.objects.get(app_label=app_label,
permission__codename=codename)
if ctype is not None and ctype != new_ctype:
raise MixedContentTypeError("ContentType was once computed "
"to be %s and another one %s" % (ctype, new_ctype))
else:
ctype = new_ctype
# Compute queryset and ctype if still missing
if ctype is None and klass is None:
raise WrongAppError("Cannot determine content type")
elif ctype is None and klass is not None:
queryset = _get_queryset(klass)
ctype = ContentType.objects.get_for_model(queryset.model)
elif ctype is not None and klass is None:
queryset = _get_queryset(ctype.model_class())
else:
queryset = _get_queryset(klass)
if ctype.model_class() != queryset.model:
raise MixedContentTypeError("Content type for given perms and "
"klass differs")
# At this point, we should have both ctype and queryset and they should
# match which means: ctype.model_class() == queryset.model
# we should also have ``codenames`` list
# Now we should extract list of pk values for which we would filter queryset
groups_obj_perms = GroupObjectPermission.objects\
.filter(group=group)\
.filter(permission__content_type=ctype)\
.filter(permission__codename__in=codenames)\
.values_list('object_pk', 'permission__codename')
data = list(groups_obj_perms)
keyfunc = lambda t: t[0] # sorting/grouping by pk (first in result tuple)
data = sorted(data, key=keyfunc)
pk_list = []
for pk, group in groupby(data, keyfunc):
obj_codenames = set((e[1] for e in group))
if any_perm or codenames.issubset(obj_codenames):
pk_list.append(pk)
objects = queryset.filter(pk__in=pk_list)
return objects
| bsd-2-clause |
danmar/cppcheck | htmlreport/test_htmlreport.py | 2 | 3595 | #!/usr/bin/env python
"""Test cppcheck-htmlreport."""
import os
import contextlib
import shutil
import subprocess
import sys
import tempfile
if sys.version_info < (2, 7):
# For TestCase.assertIn().
import unittest2 as unittest
else:
import unittest
ROOT_DIR = os.path.split(os.path.abspath(os.path.dirname(__file__)))[0]
CPPCHECK_BIN = os.path.join(ROOT_DIR, 'cppcheck')
HTML_REPORT_BIN = os.path.join(os.path.abspath(os.path.dirname(__file__)),
'cppcheck-htmlreport')
class TestHTMLReport(unittest.TestCase):
def testReportError(self):
for xml_version in ['2']:
self.checkReportError(xml_version)
def checkReportError(self, xml_version):
with runCheck(
os.path.join(ROOT_DIR, 'samples', 'memleak', 'bad.c'),
xml_version=xml_version
) as (report, output_directory):
self.assertIn('<html', report)
self.assertIn('Memory leak:', report)
self.assertIn('bad.c', report)
detail_filename = os.path.join(output_directory, '0.html')
self.assertTrue(
os.path.exists(detail_filename))
with open(detail_filename) as input_file:
detail_contents = input_file.read()
self.assertIn('<html', detail_contents)
self.assertIn('Memory leak:', detail_contents)
def testReportNoError(self):
for xml_version in ['2']:
self.checkReportNoError(xml_version)
def checkReportNoError(self, xml_version):
with runCheck(
os.path.join(ROOT_DIR, 'samples', 'memleak', 'good.c'),
xml_version=xml_version
) as (report, output_directory):
self.assertIn('<html', report)
self.assertNotIn('Memory leak:', report)
self.assertNotIn('good.c', report)
self.assertFalse(
os.path.exists(os.path.join(output_directory, '0.html')))
def testMissingInclude(self):
with runCheck(
xml_filename=os.path.join(ROOT_DIR, 'htmlreport', 'example.xml'),
) as (report, output_directory):
self.assertIn('<html', report)
self.assertIn('Uninitialized variable:', report)
self.assertIn('example.cc', report)
self.assertTrue(
os.path.exists(os.path.join(output_directory, '0.html')))
@contextlib.contextmanager
def runCheck(source_filename=None, xml_version='1', xml_filename=None):
"""Run cppcheck and cppcheck-htmlreport.
Yield a tuple containing the resulting HTML report index and the directory
path.
"""
output_directory = tempfile.mkdtemp(dir='.')
if xml_filename is None:
assert source_filename
xml_filename = os.path.join(output_directory, 'output.xml')
with open(xml_filename, 'w') as output_file:
subprocess.check_call(
[CPPCHECK_BIN, '--xml', source_filename,
'--xml-version=' + xml_version],
stderr=output_file)
assert os.path.exists(xml_filename)
subprocess.check_call(
[sys.executable, HTML_REPORT_BIN,
'--file=' + os.path.realpath(xml_filename),
'--report-dir=' + os.path.realpath(output_directory)],
cwd=os.path.join(ROOT_DIR, 'htmlreport'))
with open(os.path.join(output_directory, 'index.html')) as index_file:
index_contents = index_file.read()
yield index_contents, output_directory
shutil.rmtree(output_directory)
if __name__ == '__main__':
unittest.main()
| gpl-3.0 |
MangoMangoDevelopment/neptune | lib/ros_comm-1.12.0/tools/roslaunch/test/unit/test_roslaunch_launch.py | 2 | 7592 | # Software License Agreement (BSD License)
#
# Copyright (c) 2008, Willow Garage, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Willow Garage, Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import os
import sys
import unittest
## Test roslaunch.launch
class TestRoslaunchLaunch(unittest.TestCase):
def setUp(self):
self.printerrlog_msg = None
def my_printerrlog(self, msg):
self.printerrlog_msg = msg
def test_validate_master_launch(self):
import roslaunch.launch
from roslaunch.core import Master
from roslaunch.launch import validate_master_launch
roslaunch.launch.printerrlog = self.my_printerrlog
# Good configurations
os.environ['ROS_MASTER_URI'] = 'http://localhost:11311'
m = Master(uri='http://localhost:11311')
validate_master_launch(m, True)
self.assertEquals(None, self.printerrlog_msg)
validate_master_launch(m, False)
self.assertEquals(None, self.printerrlog_msg)
# roscore with mismatched port in environment
os.environ['ROS_MASTER_URI'] = 'http://localhost:11312'
validate_master_launch(m, True)
self.assert_('port' in self.printerrlog_msg)
self.printerrlog_msg = None
# roscore with mismatched hostname in environment
os.environ['ROS_MASTER_URI'] = 'http://fake:11311'
validate_master_launch(m, True)
self.assert_('host' in self.printerrlog_msg)
self.printerrlog_msg = None
# roslaunch with remote master that cannot be contacted
os.environ['ROS_MASTER_URI'] = 'http://fake:11311'
self.assertEquals(None, self.printerrlog_msg)
# environment doesn't matter for remaining tests
os.environ['ROS_MASTER_URI'] = 'http://localhost:11311'
m = Master(uri="http://fake:11311")
# roscore with hostname that points elsewhere, warn user. This
# generally could only happen if the user has a bad local host
# config.
validate_master_launch(m, True)
self.assert_("WARNING" in self.printerrlog_msg)
self.printerrlog_msg = None
# roscore with host that is not ours
m = Master(uri="http://willowgarage.com:11311")
validate_master_launch(m, True)
self.assert_("WARNING" in self.printerrlog_msg)
self.printerrlog_msg = None
# roslaunch with remote master that is out of contact, fail
try:
validate_master_launch(m, False)
self.fail("should not pass if remote master cannot be contacted")
except roslaunch.RLException:
pass
def test__unify_clear_params(self):
from roslaunch.launch import _unify_clear_params
self.assertEquals([], _unify_clear_params([]))
for t in [['/foo'], ['/foo/'], ['/foo/', '/foo'],
['/foo/', '/foo/'], ['/foo/', '/foo/bar', '/foo/'],
['/foo/', '/foo/bar', '/foo/bar/baz']]:
self.assertEquals(['/foo/'], _unify_clear_params(t))
for t in [['/'], ['/', '/foo/'], ['/foo/', '/', '/baz', '/car/dog']]:
self.assertEquals(['/'], _unify_clear_params(t))
self.assertEquals(['/foo/', '/bar/', '/baz/'], _unify_clear_params(['/foo', '/bar', '/baz']))
self.assertEquals(['/foo/', '/bar/', '/baz/'], _unify_clear_params(['/foo', '/bar', '/baz', '/bar/delta', '/baz/foo']))
self.assertEquals(['/foo/bar/'], _unify_clear_params(['/foo/bar', '/foo/bar/baz']))
def test__hostname_to_rosname(self):
from roslaunch.launch import _hostname_to_rosname
self.assertEquals("host_ann", _hostname_to_rosname('ann'))
self.assertEquals("host_ann", _hostname_to_rosname('ANN'))
self.assertEquals("host_", _hostname_to_rosname(''))
self.assertEquals("host_1", _hostname_to_rosname('1'))
self.assertEquals("host__", _hostname_to_rosname('_'))
self.assertEquals("host__", _hostname_to_rosname('-'))
self.assertEquals("host_foo_laptop", _hostname_to_rosname('foo-laptop'))
def test_roslaunchListeners(self):
import roslaunch.launch
class L(roslaunch.launch.ROSLaunchListener):
def process_died(self, process_name, exit_code):
self.process_name = process_name
self.exit_code = exit_code
class LBad(roslaunch.launch.ROSLaunchListener):
def process_died(self, process_name, exit_code):
raise Exception("foo")
listeners = roslaunch.launch._ROSLaunchListeners()
l1 = L()
l2 = L()
lbad = L()
l3 = L()
# test with no listeners
listeners.process_died('p0', 0)
# test with 1 listener
listeners.add_process_listener(l1)
listeners.process_died('p1', 1)
self.assertEquals(l1.process_name, 'p1')
self.assertEquals(l1.exit_code, 1)
# test with 2 listeners
listeners.add_process_listener(l2)
listeners.process_died('p2', 2)
for l in [l1, l2]:
self.assertEquals(l.process_name, 'p2')
self.assertEquals(l.exit_code, 2)
listeners.add_process_listener(lbad)
# make sure that this catches errors
listeners.process_died('p3', 3)
for l in [l1, l2]:
self.assertEquals(l.process_name, 'p3')
self.assertEquals(l.exit_code, 3)
# also add a third listener to make sure that listeners continues after lbad throws
listeners.add_process_listener(l3)
listeners.process_died('p4', 4)
for l in [l1, l2, l3]:
self.assertEquals(l.process_name, 'p4')
self.assertEquals(l.exit_code, 4)
# this is just to get coverage, it's an empty class
def test_ROSRemoteRunnerIF():
from roslaunch.launch import ROSRemoteRunnerIF
r = ROSRemoteRunnerIF()
r.setup()
r.add_process_listener(1)
r.launch_remote_nodes()
def test_ROSLaunchListener():
from roslaunch.launch import ROSLaunchListener
r = ROSLaunchListener()
r.process_died(1, 2)
| bsd-3-clause |
google-code-export/dojango | dojango/conf/settings.py | 3 | 7697 | import os
from django.conf import settings
DEBUG = getattr(settings, "DEBUG", False)
DEFAULT_CHARSET = getattr(settings, 'DEFAULT_CHARSET', 'utf-8')
DOJO_VERSION = getattr(settings, "DOJANGO_DOJO_VERSION", "1.6.0")
DOJO_PROFILE = getattr(settings, "DOJANGO_DOJO_PROFILE", "google")
DOJO_MEDIA_URL = getattr(settings, "DOJANGO_DOJO_MEDIA_URL", 'dojo-media')
BASE_MEDIA_URL = getattr(settings, "DOJANGO_BASE_MEDIA_URL", '/dojango/%s' % DOJO_MEDIA_URL)
BUILD_MEDIA_URL = getattr(settings, "DOJANGO_BUILD_MEDIA_URL", '%s/release' % BASE_MEDIA_URL)
BASE_MEDIA_ROOT = getattr(settings, "DOJANGO_BASE_MEDIA_ROOT", os.path.abspath(os.path.dirname(__file__)+'/../dojo-media/'))
BASE_DOJO_ROOT = getattr(settings, "DOJANGO_BASE_DOJO_ROOT", BASE_MEDIA_ROOT + "/src")
# as default the dijit theme folder is used
DOJO_THEME_URL = getattr(settings, "DOJANGO_DOJO_THEME_URL", False)
DOJO_THEME = getattr(settings, "DOJANGO_DOJO_THEME", "claro")
DOJO_DEBUG = getattr(settings, "DOJANGO_DOJO_DEBUG", DEBUG) # using the default django DEBUG setting
DOJO_SECURE_JSON = getattr(settings, "DOJANGO_DOJO_SECURE_JSON", True) # if you are using dojo version < 1.2.0 you have set it to False
CDN_USE_SSL = getattr(settings, "DOJANGO_CDN_USE_SSL", False) # is dojo served via https from google? doesn't work for aol!
# set the urls for actual possible paths for dojo
# one dojo profile must at least contain a path that defines the base url of a dojo installation
# the following settings can be set for each dojo profile:
# - base_url: where do the dojo files reside (without the version folder!)
# - use_xd: use the crossdomain-build? used to build the correct filename (e.g. dojo.xd.js)
# - versions: this list defines all possible versions that are available in the defined profile
# - uncompressed: use the uncompressed version of dojo (dojo.xd.js.uncompressed.js)
# - use_gfx: there is a special case, when using dojox.gfx from aol (see http://dev.aol.com/dojo)
# - is_local: marks a profile being local. this is needed when using the dojo module loader
# - is_local_build: profile being a locally builded version
_aol_versions = ('0.9.0', '1.0.0', '1.0.2', '1.1.0', '1.1.1', '1.2.0', '1.2.3', '1.3', '1.3.0', '1.3.1', '1.3.2', '1.4', '1.4.0', '1.4.1', '1.4.3', '1.5', '1.5.0', '1.6', '1.6.0')
_aol_gfx_versions = ('0.9.0', '1.0.0', '1.0.2', '1.1.0', '1.1.1',)
_google_versions = ('1.1.1', '1.2', '1.2.0', '1.2.3', '1.3', '1.3.0', '1.3.1', '1.3.2', '1.4', '1.4.0', '1.4.1', '1.4.3', '1.5', '1.5.0', '1.6', '1.6.0')
DOJO_PROFILES = {
'google': {'base_url':(CDN_USE_SSL and 'https' or 'http') + '://ajax.googleapis.com/ajax/libs/dojo', 'use_xd':True, 'versions':_google_versions}, # google just supports version >= 1.1.1
'google_uncompressed': {'base_url':(CDN_USE_SSL and 'https' or 'http') + '://ajax.googleapis.com/ajax/libs/dojo', 'use_xd':True, 'uncompressed':True, 'versions':_google_versions},
'aol': {'base_url':'http://o.aolcdn.com/dojo', 'use_xd':True, 'versions':_aol_versions},
'aol_uncompressed': {'base_url':'http://o.aolcdn.com/dojo', 'use_xd':True, 'uncompressed':True, 'versions':_aol_versions},
'aol_gfx': {'base_url':'http://o.aolcdn.com/dojo', 'use_xd':True, 'use_gfx':True, 'versions':_aol_gfx_versions},
'aol_gfx-uncompressed': {'base_url':'http://o.aolcdn.com/dojo', 'use_xd':True, 'use_gfx':True, 'uncompressed':True, 'versions':_aol_gfx_versions},
'local': {'base_url': '%(BASE_MEDIA_URL)s', 'is_local':True}, # we don't have a restriction on version names, name them as you like
'local_release': {'base_url': '%(BUILD_MEDIA_URL)s', 'is_local':True, 'is_local_build':True}, # this will be available after the first dojo build!
'local_release_uncompressed': {'base_url': '%(BUILD_MEDIA_URL)s', 'uncompressed':True, 'is_local':True, 'is_local_build':True} # same here
}
# we just want users to append/overwrite own profiles
DOJO_PROFILES.update(getattr(settings, "DOJANGO_DOJO_PROFILES", {}))
# =============================================================================================
# =================================== NEEDED FOR DOJO BUILD ===================================
# =============================================================================================
# general doc: http://dojotoolkit.org/book/dojo-book-0-9/part-4-meta-dojo/package-system-and-custom-builds
# see http://www.sitepen.com/blog/2008/04/02/dojo-mini-optimization-tricks-with-the-dojo-toolkit/ for details
DOJO_BUILD_VERSION = getattr(settings, "DOJANGO_DOJO_BUILD_VERSION", '1.6.0')
# this is the default build profile, that is used, when calling "./manage.py dojobuild"
# "./manage.py dojobuild dojango" would have the same effect
DOJO_BUILD_PROFILE = getattr(settings, "DOJANGO_DOJO_BUILD_PROFILE", "dojango")
# This dictionary defines your build profiles you can use within the custom command "./manage.py dojobuild
# You can set your own build profile within the main settings.py of the project by defining a dictionary
# DOJANGO_DOJO_BUILD_PROFILES, that sets the following key/value pairs for each defined profile name:
# profile_file: which dojo profile file is used for the build (see dojango.profile.js how it has to look)
# options: these are the options that are passed to the build command (see the dojo doc for details)
# OPTIONAL SETTINGS (see DOJO_BUILD_PROFILES_DEFAULT):
# base_root: in which directory will the dojo version be builded to?
# used_src_version: which version should be used for the dojo build (e.g. 1.1.1)
# build_version: what is the version name of the builded release (e.g. dojango1.1.1) - this option can be overwritten by the commandline parameter --build_version=...
# minify_extreme_skip_files: a tupel of files/folders (each expressed as regular expression) that should be kept when doing a minify extreme (useful when you have several layers and don't want some files)
# this tupel will be appended to the default folders/files that are skipped: see SKIP_FILES in management/commands/dojobuild.py
DOJO_BUILD_PROFILES = {
'dojango': {
'options': 'profileFile="%(BASE_MEDIA_ROOT)s/dojango.profile.js" action=release optimize=shrinksafe.keepLines cssOptimize=comments.keepLines',
},
'dojango_optimized': {
'options': 'profileFile="%(BASE_MEDIA_ROOT)s/dojango_optimized.profile.js" action=release optimize=shrinksafe.keepLines cssOptimize=comments.keepLines',
'build_version': '%(DOJO_BUILD_VERSION)s-dojango-optimized-with-dojo',
},
}
# these defaults are mixed into each DOJO_BUILD_PROFILES element
# but you can overwrite each attribute within your own build profile element
# e.g. DOJANGO_BUILD_PROFILES = {'used_src_version': '1.2.2', ....}
DOJO_BUILD_PROFILES_DEFAULT = getattr(settings, "DOJANGO_DOJO_BUILD_PROFILES_DEFAULT", {
# build the release in the media directory of dojango
# use a formatting string, so this can be set in the project's settings.py without getting the dojango settings
'base_root': '%(BASE_MEDIA_ROOT)s/release',
'used_src_version': '%(DOJO_BUILD_VERSION)s',
'build_version': '%(DOJO_BUILD_VERSION)s-dojango-with-dojo',
})
# TODO: we should also enable the already pre-delivered dojo default profiles
# you can add/overwrite your own build profiles
DOJO_BUILD_PROFILES.update(getattr(settings, "DOJANGO_DOJO_BUILD_PROFILES", {}))
DOJO_BUILD_JAVA_EXEC = getattr(settings, 'DOJANGO_DOJO_BUILD_JAVA_EXEC', 'java')
# a version string that must have the following form: '1.0.0', '1.2.1', ....
# this setting is used witin the dojobuild, because the build process changed since version 1.2.0
DOJO_BUILD_USED_VERSION = getattr(settings, 'DOJANGO_DOJO_BUILD_USED_VERSION', DOJO_BUILD_VERSION)
| bsd-3-clause |
BillWang139967/Starfish | x-luo/run_daemon.py | 1 | 1767 | #!/usr/bin/env python
import os
import sys
import importlib
from xlib import daemon
root_path = os.path.split(os.path.realpath(__file__))[0]
os.chdir(root_path)
module = ["agent","server"]
def usage():
print "usage: run_daemon.py agent|server start|stop|restart|status"
sys.exit(2)
def main():
if len(sys.argv) != 3:
usage()
daemon_name = sys.argv[1]
if daemon_name not in module:
usage()
pkg = importlib.import_module('handlers.{pkg_name}'.format(pkg_name=daemon_name))
class MyDaemon(daemon.Daemon):
def run(self):
pkg.main()
######################################
# edit this code
cur_dir = os.getcwd()
if not os.path.exists("{cur_dir}/run/".format(cur_dir=cur_dir)):
os.makedirs("./run")
if not os.path.exists("{cur_dir}/log/".format(cur_dir=cur_dir)):
os.makedirs("./log")
my_daemon = MyDaemon(
pidfile="{cur_dir}/run/{daemon_name}.pid".format(cur_dir=cur_dir,daemon_name=daemon_name),
stdout="{cur_dir}/log/{daemon_name}_stdout.log".format(cur_dir=cur_dir,daemon_name=daemon_name),
stderr="{cur_dir}/log/{daemon_name}_stderr.log".format(cur_dir=cur_dir,daemon_name=daemon_name)
)
if 'start' == sys.argv[2]:
my_daemon.start()
elif 'stop' == sys.argv[2]:
my_daemon.stop()
elif 'restart' == sys.argv[2]:
my_daemon.restart()
elif 'status' == sys.argv[2]:
alive = my_daemon.is_running()
if alive:
print('process [%s] is running ......' % my_daemon.get_pid())
else:
print('daemon process [%s] stopped' % daemon_name)
else:
print "Unknown command"
sys.exit(2)
sys.exit(0)
if __name__ == "__main__":
main()
| gpl-3.0 |
Dandandan/wikiprogramming | jsrepl/extern/python/closured/lib/python2.7/_weakrefset.py | 135 | 6389 | # Access WeakSet through the weakref module.
# This code is separated-out because it is needed
# by abc.py to load everything else at startup.
from _weakref import ref
__all__ = ['WeakSet']
class _IterationGuard(object):
# This context manager registers itself in the current iterators of the
# weak container, such as to delay all removals until the context manager
# exits.
# This technique should be relatively thread-safe (since sets are).
def __init__(self, weakcontainer):
# Don't create cycles
self.weakcontainer = ref(weakcontainer)
def __enter__(self):
w = self.weakcontainer()
if w is not None:
w._iterating.add(self)
return self
def __exit__(self, e, t, b):
w = self.weakcontainer()
if w is not None:
s = w._iterating
s.remove(self)
if not s:
w._commit_removals()
class WeakSet(object):
def __init__(self, data=None):
self.data = set()
def _remove(item, selfref=ref(self)):
self = selfref()
if self is not None:
if self._iterating:
self._pending_removals.append(item)
else:
self.data.discard(item)
self._remove = _remove
# A list of keys to be removed
self._pending_removals = []
self._iterating = set()
if data is not None:
self.update(data)
def _commit_removals(self):
l = self._pending_removals
discard = self.data.discard
while l:
discard(l.pop())
def __iter__(self):
with _IterationGuard(self):
for itemref in self.data:
item = itemref()
if item is not None:
yield item
def __len__(self):
return sum(x() is not None for x in self.data)
def __contains__(self, item):
try:
wr = ref(item)
except TypeError:
return False
return wr in self.data
def __reduce__(self):
return (self.__class__, (list(self),),
getattr(self, '__dict__', None))
__hash__ = None
def add(self, item):
if self._pending_removals:
self._commit_removals()
self.data.add(ref(item, self._remove))
def clear(self):
if self._pending_removals:
self._commit_removals()
self.data.clear()
def copy(self):
return self.__class__(self)
def pop(self):
if self._pending_removals:
self._commit_removals()
while True:
try:
itemref = self.data.pop()
except KeyError:
raise KeyError('pop from empty WeakSet')
item = itemref()
if item is not None:
return item
def remove(self, item):
if self._pending_removals:
self._commit_removals()
self.data.remove(ref(item))
def discard(self, item):
if self._pending_removals:
self._commit_removals()
self.data.discard(ref(item))
def update(self, other):
if self._pending_removals:
self._commit_removals()
if isinstance(other, self.__class__):
self.data.update(other.data)
else:
for element in other:
self.add(element)
def __ior__(self, other):
self.update(other)
return self
# Helper functions for simple delegating methods.
def _apply(self, other, method):
if not isinstance(other, self.__class__):
other = self.__class__(other)
newdata = method(other.data)
newset = self.__class__()
newset.data = newdata
return newset
def difference(self, other):
return self._apply(other, self.data.difference)
__sub__ = difference
def difference_update(self, other):
if self._pending_removals:
self._commit_removals()
if self is other:
self.data.clear()
else:
self.data.difference_update(ref(item) for item in other)
def __isub__(self, other):
if self._pending_removals:
self._commit_removals()
if self is other:
self.data.clear()
else:
self.data.difference_update(ref(item) for item in other)
return self
def intersection(self, other):
return self._apply(other, self.data.intersection)
__and__ = intersection
def intersection_update(self, other):
if self._pending_removals:
self._commit_removals()
self.data.intersection_update(ref(item) for item in other)
def __iand__(self, other):
if self._pending_removals:
self._commit_removals()
self.data.intersection_update(ref(item) for item in other)
return self
def issubset(self, other):
return self.data.issubset(ref(item) for item in other)
__lt__ = issubset
def __le__(self, other):
return self.data <= set(ref(item) for item in other)
def issuperset(self, other):
return self.data.issuperset(ref(item) for item in other)
__gt__ = issuperset
def __ge__(self, other):
return self.data >= set(ref(item) for item in other)
def __eq__(self, other):
if not isinstance(other, self.__class__):
return NotImplemented
return self.data == set(ref(item) for item in other)
def symmetric_difference(self, other):
return self._apply(other, self.data.symmetric_difference)
__xor__ = symmetric_difference
def symmetric_difference_update(self, other):
if self._pending_removals:
self._commit_removals()
if self is other:
self.data.clear()
else:
self.data.symmetric_difference_update(ref(item) for item in other)
def __ixor__(self, other):
if self._pending_removals:
self._commit_removals()
if self is other:
self.data.clear()
else:
self.data.symmetric_difference_update(ref(item) for item in other)
return self
def union(self, other):
return self._apply(other, self.data.union)
__or__ = union
def isdisjoint(self, other):
return len(self.intersection(other)) == 0
| mit |
0312birdzhang/opencc-for-sailfish | deps/gtest-1.7.0/test/gtest_catch_exceptions_test.py | 2139 | 9901 | #!/usr/bin/env python
#
# Copyright 2010 Google Inc. All Rights Reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Tests Google Test's exception catching behavior.
This script invokes gtest_catch_exceptions_test_ and
gtest_catch_exceptions_ex_test_ (programs written with
Google Test) and verifies their output.
"""
__author__ = '[email protected] (Vlad Losev)'
import os
import gtest_test_utils
# Constants.
FLAG_PREFIX = '--gtest_'
LIST_TESTS_FLAG = FLAG_PREFIX + 'list_tests'
NO_CATCH_EXCEPTIONS_FLAG = FLAG_PREFIX + 'catch_exceptions=0'
FILTER_FLAG = FLAG_PREFIX + 'filter'
# Path to the gtest_catch_exceptions_ex_test_ binary, compiled with
# exceptions enabled.
EX_EXE_PATH = gtest_test_utils.GetTestExecutablePath(
'gtest_catch_exceptions_ex_test_')
# Path to the gtest_catch_exceptions_test_ binary, compiled with
# exceptions disabled.
EXE_PATH = gtest_test_utils.GetTestExecutablePath(
'gtest_catch_exceptions_no_ex_test_')
environ = gtest_test_utils.environ
SetEnvVar = gtest_test_utils.SetEnvVar
# Tests in this file run a Google-Test-based test program and expect it
# to terminate prematurely. Therefore they are incompatible with
# the premature-exit-file protocol by design. Unset the
# premature-exit filepath to prevent Google Test from creating
# the file.
SetEnvVar(gtest_test_utils.PREMATURE_EXIT_FILE_ENV_VAR, None)
TEST_LIST = gtest_test_utils.Subprocess(
[EXE_PATH, LIST_TESTS_FLAG], env=environ).output
SUPPORTS_SEH_EXCEPTIONS = 'ThrowsSehException' in TEST_LIST
if SUPPORTS_SEH_EXCEPTIONS:
BINARY_OUTPUT = gtest_test_utils.Subprocess([EXE_PATH], env=environ).output
EX_BINARY_OUTPUT = gtest_test_utils.Subprocess(
[EX_EXE_PATH], env=environ).output
# The tests.
if SUPPORTS_SEH_EXCEPTIONS:
# pylint:disable-msg=C6302
class CatchSehExceptionsTest(gtest_test_utils.TestCase):
"""Tests exception-catching behavior."""
def TestSehExceptions(self, test_output):
self.assert_('SEH exception with code 0x2a thrown '
'in the test fixture\'s constructor'
in test_output)
self.assert_('SEH exception with code 0x2a thrown '
'in the test fixture\'s destructor'
in test_output)
self.assert_('SEH exception with code 0x2a thrown in SetUpTestCase()'
in test_output)
self.assert_('SEH exception with code 0x2a thrown in TearDownTestCase()'
in test_output)
self.assert_('SEH exception with code 0x2a thrown in SetUp()'
in test_output)
self.assert_('SEH exception with code 0x2a thrown in TearDown()'
in test_output)
self.assert_('SEH exception with code 0x2a thrown in the test body'
in test_output)
def testCatchesSehExceptionsWithCxxExceptionsEnabled(self):
self.TestSehExceptions(EX_BINARY_OUTPUT)
def testCatchesSehExceptionsWithCxxExceptionsDisabled(self):
self.TestSehExceptions(BINARY_OUTPUT)
class CatchCxxExceptionsTest(gtest_test_utils.TestCase):
"""Tests C++ exception-catching behavior.
Tests in this test case verify that:
* C++ exceptions are caught and logged as C++ (not SEH) exceptions
* Exception thrown affect the remainder of the test work flow in the
expected manner.
"""
def testCatchesCxxExceptionsInFixtureConstructor(self):
self.assert_('C++ exception with description '
'"Standard C++ exception" thrown '
'in the test fixture\'s constructor'
in EX_BINARY_OUTPUT)
self.assert_('unexpected' not in EX_BINARY_OUTPUT,
'This failure belongs in this test only if '
'"CxxExceptionInConstructorTest" (no quotes) '
'appears on the same line as words "called unexpectedly"')
if ('CxxExceptionInDestructorTest.ThrowsExceptionInDestructor' in
EX_BINARY_OUTPUT):
def testCatchesCxxExceptionsInFixtureDestructor(self):
self.assert_('C++ exception with description '
'"Standard C++ exception" thrown '
'in the test fixture\'s destructor'
in EX_BINARY_OUTPUT)
self.assert_('CxxExceptionInDestructorTest::TearDownTestCase() '
'called as expected.'
in EX_BINARY_OUTPUT)
def testCatchesCxxExceptionsInSetUpTestCase(self):
self.assert_('C++ exception with description "Standard C++ exception"'
' thrown in SetUpTestCase()'
in EX_BINARY_OUTPUT)
self.assert_('CxxExceptionInConstructorTest::TearDownTestCase() '
'called as expected.'
in EX_BINARY_OUTPUT)
self.assert_('CxxExceptionInSetUpTestCaseTest constructor '
'called as expected.'
in EX_BINARY_OUTPUT)
self.assert_('CxxExceptionInSetUpTestCaseTest destructor '
'called as expected.'
in EX_BINARY_OUTPUT)
self.assert_('CxxExceptionInSetUpTestCaseTest::SetUp() '
'called as expected.'
in EX_BINARY_OUTPUT)
self.assert_('CxxExceptionInSetUpTestCaseTest::TearDown() '
'called as expected.'
in EX_BINARY_OUTPUT)
self.assert_('CxxExceptionInSetUpTestCaseTest test body '
'called as expected.'
in EX_BINARY_OUTPUT)
def testCatchesCxxExceptionsInTearDownTestCase(self):
self.assert_('C++ exception with description "Standard C++ exception"'
' thrown in TearDownTestCase()'
in EX_BINARY_OUTPUT)
def testCatchesCxxExceptionsInSetUp(self):
self.assert_('C++ exception with description "Standard C++ exception"'
' thrown in SetUp()'
in EX_BINARY_OUTPUT)
self.assert_('CxxExceptionInSetUpTest::TearDownTestCase() '
'called as expected.'
in EX_BINARY_OUTPUT)
self.assert_('CxxExceptionInSetUpTest destructor '
'called as expected.'
in EX_BINARY_OUTPUT)
self.assert_('CxxExceptionInSetUpTest::TearDown() '
'called as expected.'
in EX_BINARY_OUTPUT)
self.assert_('unexpected' not in EX_BINARY_OUTPUT,
'This failure belongs in this test only if '
'"CxxExceptionInSetUpTest" (no quotes) '
'appears on the same line as words "called unexpectedly"')
def testCatchesCxxExceptionsInTearDown(self):
self.assert_('C++ exception with description "Standard C++ exception"'
' thrown in TearDown()'
in EX_BINARY_OUTPUT)
self.assert_('CxxExceptionInTearDownTest::TearDownTestCase() '
'called as expected.'
in EX_BINARY_OUTPUT)
self.assert_('CxxExceptionInTearDownTest destructor '
'called as expected.'
in EX_BINARY_OUTPUT)
def testCatchesCxxExceptionsInTestBody(self):
self.assert_('C++ exception with description "Standard C++ exception"'
' thrown in the test body'
in EX_BINARY_OUTPUT)
self.assert_('CxxExceptionInTestBodyTest::TearDownTestCase() '
'called as expected.'
in EX_BINARY_OUTPUT)
self.assert_('CxxExceptionInTestBodyTest destructor '
'called as expected.'
in EX_BINARY_OUTPUT)
self.assert_('CxxExceptionInTestBodyTest::TearDown() '
'called as expected.'
in EX_BINARY_OUTPUT)
def testCatchesNonStdCxxExceptions(self):
self.assert_('Unknown C++ exception thrown in the test body'
in EX_BINARY_OUTPUT)
def testUnhandledCxxExceptionsAbortTheProgram(self):
# Filters out SEH exception tests on Windows. Unhandled SEH exceptions
# cause tests to show pop-up windows there.
FITLER_OUT_SEH_TESTS_FLAG = FILTER_FLAG + '=-*Seh*'
# By default, Google Test doesn't catch the exceptions.
uncaught_exceptions_ex_binary_output = gtest_test_utils.Subprocess(
[EX_EXE_PATH,
NO_CATCH_EXCEPTIONS_FLAG,
FITLER_OUT_SEH_TESTS_FLAG],
env=environ).output
self.assert_('Unhandled C++ exception terminating the program'
in uncaught_exceptions_ex_binary_output)
self.assert_('unexpected' not in uncaught_exceptions_ex_binary_output)
if __name__ == '__main__':
gtest_test_utils.Main()
| apache-2.0 |
JVillella/tensorflow | tensorflow/contrib/boosted_trees/python/kernel_tests/stats_accumulator_ops_test.py | 34 | 17040 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Test for checking stats accumulator related ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.boosted_trees.python.ops import stats_accumulator_ops
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import test_util
from tensorflow.python.platform import googletest
class StatsAccumulatorScalarTest(test_util.TensorFlowTestCase):
"""Tests for scalar gradients and hessians accumulator."""
def testSimpleAcculumator(self):
with self.test_session() as sess:
accumulator = stats_accumulator_ops.StatsAccumulator(
stamp_token=0,
gradient_shape=tensor_shape.scalar(),
hessian_shape=tensor_shape.scalar())
with ops.control_dependencies([accumulator._create_op]):
op1 = accumulator.add(
stamp_token=0,
partition_ids=[1, 2],
feature_ids=[2, 3],
gradients=[0.1, 0.3],
hessians=[0.2, 0.4])
op2 = accumulator.add(0, [1], [2], [0.1], [0.2])
with ops.control_dependencies([op1, op2]):
num_updates, partition, feature, grads, hessians = accumulator.flush(
stamp_token=0, next_stamp_token=1)
num_updates, partition, feature, grads, hessians = sess.run(
[num_updates, partition, feature, grads, hessians])
result = _AccumulatorResultToDict(partition, feature, grads, hessians)
self.assertEqual(num_updates, 2)
self.assertEqual(len(result), 2)
self.assertAllClose(result[(1, 2)], [0.2, 0.4])
self.assertAllClose(result[(2, 3)], [0.3, 0.4])
def testDropStaleUpdate(self):
with self.test_session() as sess:
accumulator = stats_accumulator_ops.StatsAccumulator(
stamp_token=0,
gradient_shape=tensor_shape.scalar(),
hessian_shape=tensor_shape.scalar())
with ops.control_dependencies([accumulator._create_op]):
op1 = accumulator.add(
stamp_token=0,
partition_ids=[1, 2],
feature_ids=[2, 3],
gradients=[0.1, 0.3],
hessians=[0.2, 0.4])
op2 = accumulator.add(
stamp_token=-1,
partition_ids=[1],
feature_ids=[2],
gradients=[0.1],
hessians=[0.2])
with ops.control_dependencies([op1, op2]):
num_updates, partition, feature, grads, hessians = accumulator.flush(
stamp_token=0, next_stamp_token=1)
num_updates, partition, feature, grads, hessians = sess.run(
[num_updates, partition, feature, grads, hessians])
result = _AccumulatorResultToDict(partition, feature, grads, hessians)
self.assertEqual(num_updates, 1)
self.assertEqual(len(result), 2)
self.assertAllClose(result[(1, 2)], [0.1, 0.2])
self.assertAllClose(result[(2, 3)], [0.3, 0.4])
def testSerialize(self):
with self.test_session() as sess:
accumulator = stats_accumulator_ops.StatsAccumulator(
stamp_token=0,
gradient_shape=tensor_shape.scalar(),
hessian_shape=tensor_shape.scalar())
with ops.control_dependencies([accumulator._create_op]):
op1 = accumulator.add(
stamp_token=0,
partition_ids=[1, 2],
feature_ids=[2, 3],
gradients=[0.1, 0.3],
hessians=[0.2, 0.4])
with ops.control_dependencies([op1]):
(stamp_token, num_updates, partition_1, feature_1, grads_1,
hessians_1) = accumulator.serialize()
# Make sure that the accumulator hasn't changed during serialization.
with ops.control_dependencies([stamp_token]):
num_updates_2, partition_2, feature_2, grads_2, hessians_2 = (
accumulator.flush(stamp_token=0, next_stamp_token=1))
(stamp_token, num_updates, partition_1, feature_1, grads_1, hessians_1,
num_updates_2, partition_2, feature_2, grads_2, hessians_2) = sess.run(
[
stamp_token, num_updates, partition_1, feature_1, grads_1,
hessians_1, num_updates_2, partition_2, feature_2, grads_2,
hessians_2
])
result_1 = _AccumulatorResultToDict(partition_1, feature_1, grads_1,
hessians_1)
result_2 = _AccumulatorResultToDict(partition_2, feature_2, grads_2,
hessians_2)
self.assertEqual(num_updates, 1)
self.assertEqual(num_updates_2, 1)
self.assertEqual(len(result_1), 2)
self.assertAllClose(result_1[(1, 2)], [0.1, 0.2])
self.assertAllClose(result_1[(2, 3)], [0.3, 0.4])
self.assertAllEqual(result_1, result_2)
self.assertEqual(0, stamp_token)
def testDeserialize(self):
with self.test_session() as sess:
accumulator = stats_accumulator_ops.StatsAccumulator(
stamp_token=0,
gradient_shape=tensor_shape.scalar(),
hessian_shape=tensor_shape.scalar())
with ops.control_dependencies([accumulator._create_op]):
# These will be deleted due to deserialize call.
op1 = accumulator.add(
stamp_token=0,
partition_ids=[1, 2],
feature_ids=[2, 3],
gradients=[0.1, 0.3],
hessians=[0.2, 0.4])
with ops.control_dependencies([op1]):
deserialize = (accumulator.deserialize(
stamp_token=2,
num_updates=3,
partition_ids=[3, 4],
feature_ids=[5, 6],
gradients=[0.4, 0.5],
hessians=[0.6, 0.7]))
with ops.control_dependencies([deserialize]):
num_updates, partition, feature, grads, hessians = accumulator.flush(
stamp_token=2, next_stamp_token=3)
num_updates, partition, feature, grads, hessians = sess.run(
[num_updates, partition, feature, grads, hessians])
result = _AccumulatorResultToDict(partition, feature, grads,
hessians)
self.assertEqual(num_updates, 3)
self.assertEqual(len(result), 2)
self.assertAllClose(result[(3, 5)], [0.4, 0.6])
self.assertAllClose(result[(4, 6)], [0.5, 0.7])
def testMakeSummary(self):
with self.test_session() as sess:
accumulator = stats_accumulator_ops.StatsAccumulator(
stamp_token=0,
gradient_shape=tensor_shape.scalar(),
hessian_shape=tensor_shape.scalar())
partition, feature, grads, hessians = accumulator._make_summary(
partition_ids=[1, 2, 1],
feature_ids=[2, 3, 2],
gradients=[0.1, 0.3, 0.1],
hessians=[0.2, 0.4, 0.2])
partition, feature, grads, hessians = sess.run(
[partition, feature, grads, hessians])
result = _AccumulatorResultToDict(partition, feature, grads, hessians)
self.assertEqual(len(result), 2)
self.assertAllClose(result[(1, 2)], [0.2, 0.4])
self.assertAllClose(result[(2, 3)], [0.3, 0.4])
class StatsAccumulatorTensorTest(test_util.TensorFlowTestCase):
"""Tests for tensor gradients and hessians accumulator."""
def testSimpleAcculumator(self):
with self.test_session() as sess:
accumulator = stats_accumulator_ops.StatsAccumulator(
stamp_token=0,
gradient_shape=tensor_shape.TensorShape([2]),
hessian_shape=tensor_shape.TensorShape([2, 2]))
with ops.control_dependencies([accumulator._create_op]):
op1 = accumulator.add(
stamp_token=0,
partition_ids=[1, 2],
feature_ids=[2, 3],
# Two values for gradients,
gradients=[[0.1, 0.1], [0.2, 0.2]],
# A 2x2 matrix for each hessian.
hessians=[[[0.01, 0.02], [0.03, 0.04]],
[[0.05, 0.06], [0.07, 0.08]]])
op2 = accumulator.add(
stamp_token=0,
partition_ids=[1],
feature_ids=[2],
gradients=[[0.10, 0.11]],
hessians=[[[0.011, 0.022], [0.033, 0.044]]])
with ops.control_dependencies([op1, op2]):
num_updates, partition, feature, grads, hessians = accumulator.flush(
stamp_token=0, next_stamp_token=1)
num_updates, partition, feature, grads, hessians = sess.run(
[num_updates, partition, feature, grads, hessians])
result = _AccumulatorResultToDict(partition, feature, grads, hessians)
self.assertEqual(num_updates, 2)
self.assertEqual(len(result), 2)
self.assertAllClose(result[(1, 2)][0], [0.20, 0.21])
self.assertAllClose(result[(1, 2)][1], [[0.021, 0.042], [0.063, 0.084]])
self.assertAllClose(result[(2, 3)][0], [0.2, 0.2])
self.assertAllClose(result[(2, 3)][1], [[0.05, 0.06], [0.07, 0.08]])
def testDropStaleUpdate(self):
with self.test_session() as sess:
accumulator = stats_accumulator_ops.StatsAccumulator(
stamp_token=0,
gradient_shape=tensor_shape.TensorShape([2]),
hessian_shape=tensor_shape.TensorShape([2, 2]))
with ops.control_dependencies([accumulator._create_op]):
op1 = accumulator.add(
stamp_token=0,
partition_ids=[1, 2],
feature_ids=[2, 3],
# Two values for gradients,
gradients=[[0.1, 0.1], [0.2, 0.2]],
# A 2x2 matrix for each hessian.
hessians=[[[0.01, 0.02], [0.03, 0.04]],
[[0.05, 0.06], [0.07, 0.08]]])
op2 = accumulator.add(
stamp_token=-1,
partition_ids=[1],
feature_ids=[2],
gradients=[[0.10, 0.11]],
hessians=[[[0.011, 0.022], [0.033, 0.044]]])
with ops.control_dependencies([op1, op2]):
num_updates, partition, feature, grads, hessians = accumulator.flush(
stamp_token=0, next_stamp_token=1)
num_updates, partition, feature, grads, hessians = sess.run(
[num_updates, partition, feature, grads, hessians])
result = _AccumulatorResultToDict(partition, feature, grads, hessians)
self.assertEqual(num_updates, 1)
self.assertEqual(len(result), 2)
self.assertAllClose(result[(1, 2)][0], [0.1, 0.1])
self.assertAllClose(result[(1, 2)][1], [[0.01, 0.02], [0.03, 0.04]])
self.assertAllClose(result[(2, 3)][0], [0.2, 0.2])
self.assertAllClose(result[(2, 3)][1], [[0.05, 0.06], [0.07, 0.08]])
def testSerialize(self):
with self.test_session() as sess:
accumulator = stats_accumulator_ops.StatsAccumulator(
stamp_token=0,
gradient_shape=tensor_shape.TensorShape([2]),
hessian_shape=tensor_shape.TensorShape([2, 2]))
with ops.control_dependencies([accumulator._create_op]):
op1 = accumulator.add(
stamp_token=0,
partition_ids=[1, 2],
feature_ids=[2, 3],
# Two values for gradients,
gradients=[[0.1, 0.1], [0.2, 0.2]],
# A 2x2 matrix for each hessian.
hessians=[[[0.01, 0.02], [0.03, 0.04]],
[[0.05, 0.06], [0.07, 0.08]]])
with ops.control_dependencies([op1]):
(stamp_token, num_updates_1, partition_1, feature_1, grads_1,
hessians_1) = accumulator.serialize()
# Make sure that the accumulator hasn't changed during serialization.
with ops.control_dependencies([stamp_token]):
num_updates_2, partition_2, feature_2, grads_2, hessians_2 = (
accumulator.flush(stamp_token=0, next_stamp_token=1))
(stamp_token, num_updates_1, partition_1, feature_1, grads_1,
hessians_1, num_updates_2, partition_2, feature_2, grads_2,
hessians_2) = sess.run([
stamp_token, num_updates_1, partition_1, feature_1, grads_1,
hessians_1, num_updates_2, partition_2, feature_2, grads_2,
hessians_2
])
result_1 = _AccumulatorResultToDict(partition_1, feature_1, grads_1,
hessians_1)
result_2 = _AccumulatorResultToDict(partition_2, feature_2, grads_2,
hessians_2)
self.assertEqual(num_updates_1, 1)
self.assertEqual(num_updates_2, 1)
self.assertEqual(len(result_1), 2)
self.assertAllClose(result_1[(1, 2)][0], [0.1, 0.1])
self.assertAllClose(result_1[(1, 2)][1], [[0.01, 0.02], [0.03, 0.04]])
self.assertAllClose(result_1[(2, 3)][0], [0.2, 0.2])
self.assertAllClose(result_1[(2, 3)][1], [[0.05, 0.06], [0.07, 0.08]])
self.assertAllEqual(result_1[1, 2][0], result_2[1, 2][0])
self.assertAllEqual(result_1[1, 2][1], result_2[1, 2][1])
self.assertAllEqual(result_1[2, 3][0], result_2[2, 3][0])
self.assertAllEqual(result_1[2, 3][1], result_2[2, 3][1])
def testDeserialize(self):
with self.test_session() as sess:
accumulator = stats_accumulator_ops.StatsAccumulator(
stamp_token=0,
gradient_shape=tensor_shape.TensorShape([2]),
hessian_shape=tensor_shape.TensorShape([2, 2]))
with ops.control_dependencies([accumulator._create_op]):
# These will be deleted due to deserialize call.
op1 = accumulator.add(
stamp_token=0,
partition_ids=[1, 2],
feature_ids=[2, 3],
# Two values for gradients,
gradients=[[0.1, 0.1], [0.2, 0.2]],
# A 2x2 matrix for each hessian.
hessians=[[[0.01, 0.02], [0.03, 0.04]],
[[0.05, 0.06], [0.07, 0.08]]])
with ops.control_dependencies([op1]):
deserialize = accumulator.deserialize(
stamp_token=2,
num_updates=3,
partition_ids=[3, 4],
feature_ids=[4, 5],
# Two values for gradients,
gradients=[[0.3, 0.3], [0.5, 0.5]],
# A 2x2 matrix for each hessian.
hessians=[[[0.03, 0.04], [0.05, 0.06]], [[0.07, 0.08], [0.09,
0.10]]])
with ops.control_dependencies([deserialize]):
num_updates, partition, feature, grads, hessians = accumulator.flush(
stamp_token=2, next_stamp_token=3)
num_updates, partition, feature, grads, hessians = sess.run(
[num_updates, partition, feature, grads, hessians])
result = _AccumulatorResultToDict(partition, feature, grads,
hessians)
self.assertEqual(num_updates, 3)
self.assertEqual(len(result), 2)
self.assertAllClose(result[(3, 4)][0], [0.3, 0.3])
self.assertAllClose(result[(3, 4)][1], [[0.03, 0.04], [0.05, 0.06]])
self.assertAllClose(result[(4, 5)][0], [0.5, 0.5])
self.assertAllClose(result[(4, 5)][1], [[0.07, 0.08], [0.09, 0.10]])
def testMakeSummary(self):
with self.test_session() as sess:
accumulator = stats_accumulator_ops.StatsAccumulator(
stamp_token=0,
gradient_shape=tensor_shape.TensorShape([2]),
hessian_shape=tensor_shape.TensorShape([2, 2]))
partition, feature, grads, hessians = accumulator._make_summary(
partition_ids=[1, 2, 1],
feature_ids=[2, 3, 2],
# Two values for gradients,
gradients=[[0.1, 0.1], [0.2, 0.2], [0.10, 0.11]],
# A 2x2 matrix for each hessian.
hessians=[[[0.01, 0.02], [0.03, 0.04]], [[0.05, 0.06], [0.07, 0.08]],
[[0.011, 0.022], [0.033, 0.044]]])
partition, feature, grads, hessians = sess.run(
[partition, feature, grads, hessians])
result = _AccumulatorResultToDict(partition, feature, grads, hessians)
self.assertEqual(len(result), 2)
self.assertAllClose(result[(1, 2)][0], [0.20, 0.21])
self.assertAllClose(result[(1, 2)][1], [[0.021, 0.042], [0.063, 0.084]])
self.assertAllClose(result[(2, 3)][0], [0.2, 0.2])
self.assertAllClose(result[(2, 3)][1], [[0.05, 0.06], [0.07, 0.08]])
def _AccumulatorResultToDict(partition, feature, grads, hessians):
"""Converts the inputs to a dictionary since the ordering changes."""
return {(partition[i], feature[i]): (grads[i], hessians[i])
for i in range(len(partition))}
if __name__ == "__main__":
googletest.main()
| apache-2.0 |
marissazhou/django | tests/signed_cookies_tests/tests.py | 288 | 2768 | from __future__ import unicode_literals
from django.core import signing
from django.http import HttpRequest, HttpResponse
from django.test import SimpleTestCase, override_settings
from django.test.utils import freeze_time
class SignedCookieTest(SimpleTestCase):
def test_can_set_and_read_signed_cookies(self):
response = HttpResponse()
response.set_signed_cookie('c', 'hello')
self.assertIn('c', response.cookies)
self.assertTrue(response.cookies['c'].value.startswith('hello:'))
request = HttpRequest()
request.COOKIES['c'] = response.cookies['c'].value
value = request.get_signed_cookie('c')
self.assertEqual(value, 'hello')
def test_can_use_salt(self):
response = HttpResponse()
response.set_signed_cookie('a', 'hello', salt='one')
request = HttpRequest()
request.COOKIES['a'] = response.cookies['a'].value
value = request.get_signed_cookie('a', salt='one')
self.assertEqual(value, 'hello')
self.assertRaises(signing.BadSignature,
request.get_signed_cookie, 'a', salt='two')
def test_detects_tampering(self):
response = HttpResponse()
response.set_signed_cookie('c', 'hello')
request = HttpRequest()
request.COOKIES['c'] = response.cookies['c'].value[:-2] + '$$'
self.assertRaises(signing.BadSignature,
request.get_signed_cookie, 'c')
def test_default_argument_suppresses_exceptions(self):
response = HttpResponse()
response.set_signed_cookie('c', 'hello')
request = HttpRequest()
request.COOKIES['c'] = response.cookies['c'].value[:-2] + '$$'
self.assertEqual(request.get_signed_cookie('c', default=None), None)
def test_max_age_argument(self):
value = 'hello'
with freeze_time(123456789):
response = HttpResponse()
response.set_signed_cookie('c', value)
request = HttpRequest()
request.COOKIES['c'] = response.cookies['c'].value
self.assertEqual(request.get_signed_cookie('c'), value)
with freeze_time(123456800):
self.assertEqual(request.get_signed_cookie('c', max_age=12), value)
self.assertEqual(request.get_signed_cookie('c', max_age=11), value)
self.assertRaises(signing.SignatureExpired,
request.get_signed_cookie, 'c', max_age=10)
@override_settings(SECRET_KEY=b'\xe7')
def test_signed_cookies_with_binary_key(self):
response = HttpResponse()
response.set_signed_cookie('c', 'hello')
request = HttpRequest()
request.COOKIES['c'] = response.cookies['c'].value
self.assertEqual(request.get_signed_cookie('c'), 'hello')
| bsd-3-clause |
68foxboris/enigma2-openpli-vuplus | lib/python/Screens/Rc.py | 12 | 2569 | from Components.Pixmap import MovingPixmap, MultiPixmap
from Tools.Directories import resolveFilename, SCOPE_SKIN
from xml.etree.ElementTree import ElementTree
from Components.config import config, ConfigInteger
from Components.RcModel import rc_model
config.misc.rcused = ConfigInteger(default = 1)
class Rc:
def __init__(self):
self["rc"] = MultiPixmap()
self["arrowdown"] = MovingPixmap()
self["arrowdown2"] = MovingPixmap()
self["arrowup"] = MovingPixmap()
self["arrowup2"] = MovingPixmap()
config.misc.rcused = ConfigInteger(default = 1)
self.isDefaultRc = rc_model.rcIsDefault()
self.rcheight = 500
self.rcheighthalf = 250
self.selectpics = []
self.selectpics.append((self.rcheighthalf, ["arrowdown", "arrowdown2"], (-18,-70)))
self.selectpics.append((self.rcheight, ["arrowup", "arrowup2"], (-18,0)))
self.readPositions()
self.clearSelectedKeys()
self.onShown.append(self.initRc)
def initRc(self):
if self.isDefaultRc:
self["rc"].setPixmapNum(config.misc.rcused.value)
else:
self["rc"].setPixmapNum(0)
def readPositions(self):
if self.isDefaultRc:
target = resolveFilename(SCOPE_SKIN, "rcpositions.xml")
else:
target = rc_model.getRcPositions()
tree = ElementTree(file = target)
rcs = tree.getroot()
self.rcs = {}
for rc in rcs:
id = int(rc.attrib["id"])
self.rcs[id] = {}
for key in rc:
name = key.attrib["name"]
pos = key.attrib["pos"].split(",")
self.rcs[id][name] = (int(pos[0]), int(pos[1]))
def getSelectPic(self, pos):
for selectPic in self.selectpics:
if pos[1] <= selectPic[0]:
return (selectPic[1], selectPic[2])
return None
def hideRc(self):
self["rc"].hide()
self.hideSelectPics()
def showRc(self):
self["rc"].show()
def selectKey(self, key):
if self.isDefaultRc:
rc = self.rcs[config.misc.rcused.value]
else:
rc = self.rcs[2]
if key in rc:
rcpos = self["rc"].getPosition()
pos = rc[key]
selectPics = self.getSelectPic(pos)
selectPic = None
for x in selectPics[0]:
if x not in self.selectedKeys:
selectPic = x
break
if selectPic is not None:
print "selectPic:", selectPic
self[selectPic].moveTo(rcpos[0] + pos[0] + selectPics[1][0], rcpos[1] + pos[1] + selectPics[1][1], 1)
self[selectPic].startMoving()
self[selectPic].show()
self.selectedKeys.append(selectPic)
def clearSelectedKeys(self):
self.showRc()
self.selectedKeys = []
self.hideSelectPics()
def hideSelectPics(self):
for selectPic in self.selectpics:
for pic in selectPic[1]:
self[pic].hide()
| gpl-2.0 |
ashishtanwer/DFS | conf/trafficmatrixReader_backup3.py | 1 | 2728 | #!/usr/bin/python
import random
fi = open("traffic_matrix_001", "r")
fo = open("traffic_matrix_001.dot", "wb")
fo.write("graph test {\n")
print "graph test {\n"
fo.write("\nflowexport=text\n")
print "\nflowexport=text\n"
RouterDict = dict()
for line in fi:
if line!= "":
counter=0
LinkList =line.split(" ")
if LinkList[1] in RouterDict:
RouterDict[LinkList[1]]['s'].append(["ipdst=10.%d.0.0/16"%(random.randint(1, 255))])
RouterDict[LinkList[1]]['s'].append(["flowsize=%d"%float(LinkList[3])])
RouterDict[LinkList[1]]['s'].append(["flowstart=exponential(%d)"%(float(LinkList[0])/1000)])
else:
counter += 1
RouterDict[LinkList[1]]={'autoack': ['False'], 'ipdests':[] , 'traffic':[['m']], 'm': [], 's':[]}
RouterDict[LinkList[1]]['m']=[["modulator start=0.0 generator=s profile=((3600,),(1,)) "]]
RouterDict[LinkList[1]]['s'].append(["harpoon "])
RouterDict[LinkList[1]]['s'].append(["ipsrc=10.%d.0.0/16"%(counter)])
RouterDict[LinkList[1]]['s'].append(["ipdst=10.%d.0.0/16"%(random.randint(1, 255))])
RouterDict[LinkList[1]]['s'].append(["flowsize=%d"%float(LinkList[3])])
RouterDict[LinkList[1]]['s'].append(["flowstart=exponential(%d)"%(float(LinkList[0])/1000)])
RouterDict[LinkList[1]]['s'].append(["ipproto=randomchoice(6) "])
RouterDict[LinkList[1]]['s'].append(["sport = randomchoice(22, 80, 443) "])
RouterDict[LinkList[1]]['s'].append(["dport = randomunifint(1025, 65535) "])
RouterDict[LinkList[1]]['s'].append(["lossrate = randomchoice(0.001) "])
if LinkList[2] in RouterDict:
RouterDict[LinkList[2]]['traffic']=[["m2"]]
RouterDict[LinkList[2]]['ipdests'].append(["10.%d.0.0/16"%(random.randint(1, 255))])
else:
RouterDict[LinkList[2]]={'autoack': [['False']], 'ipdests':[] , 'traffic':[['m']], 'm': [], 's':[]}
RouterDict[LinkList[2]]['m']=[["modulator start=0.0 generator=s profile=((3600,),(1,)) "]]
RouterDict[LinkList[2]]['ipdests'].append(["10.%d.0.0/16"%(random.randint(1, 255))])
for k,v in RouterDict.iteritems(): ##Outer Dictionary
print k,
RouterProp=v
print "["
for k1,v1 in RouterProp.iteritems(): ##Inner Dictionary
#print k1
PropertyList=v1 ##List of Router Properties
PropertyStr=""
for item in PropertyList: ## item is list list
PropertyStr+= ''.join(item)
PropertyStr+= ' '
print "\t"+k1+"="+"\""+str(PropertyStr)+"\""
print"];"
fo.write("}\n")
print "}\n"
| gpl-2.0 |
abhishekarora12/ansible | lib/ansible/parsing/__init__.py | 18 | 11050 | # (c) 2012-2014, Michael DeHaan <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import copy
import json
import os
import stat
from yaml import load, YAMLError
from six import text_type
from ansible.errors import AnsibleParserError
from ansible.errors.yaml_strings import YAML_SYNTAX_ERROR
from ansible.parsing.vault import VaultLib
from ansible.parsing.splitter import unquote
from ansible.parsing.yaml.loader import AnsibleLoader
from ansible.parsing.yaml.objects import AnsibleBaseYAMLObject, AnsibleUnicode
from ansible.utils.path import unfrackpath
from ansible.utils.unicode import to_unicode
class DataLoader():
'''
The DataLoader class is used to load and parse YAML or JSON content,
either from a given file name or from a string that was previously
read in through other means. A Vault password can be specified, and
any vault-encrypted files will be decrypted.
Data read from files will also be cached, so the file will never be
read from disk more than once.
Usage:
dl = DataLoader()
(or)
dl = DataLoader(vault_password='foo')
ds = dl.load('...')
ds = dl.load_from_file('/path/to/file')
'''
def __init__(self):
self._basedir = '.'
self._FILE_CACHE = dict()
# initialize the vault stuff with an empty password
self.set_vault_password(None)
def set_vault_password(self, vault_password):
self._vault_password = vault_password
self._vault = VaultLib(password=vault_password)
def load(self, data, file_name='<string>', show_content=True):
'''
Creates a python datastructure from the given data, which can be either
a JSON or YAML string.
'''
try:
# we first try to load this data as JSON
return json.loads(data)
except:
# if loading JSON failed for any reason, we go ahead
# and try to parse it as YAML instead
if isinstance(data, AnsibleUnicode):
# The PyYAML's libyaml bindings use PyUnicode_CheckExact so
# they are unable to cope with our subclass.
# Unwrap and re-wrap the unicode so we can keep track of line
# numbers
new_data = text_type(data)
else:
new_data = data
try:
new_data = self._safe_load(new_data, file_name=file_name)
except YAMLError as yaml_exc:
self._handle_error(yaml_exc, file_name, show_content)
if isinstance(data, AnsibleUnicode):
new_data = AnsibleUnicode(new_data)
new_data.ansible_pos = data.ansible_pos
return new_data
def load_from_file(self, file_name):
''' Loads data from a file, which can contain either JSON or YAML. '''
file_name = self.path_dwim(file_name)
# if the file has already been read in and cached, we'll
# return those results to avoid more file/vault operations
if file_name in self._FILE_CACHE:
parsed_data = self._FILE_CACHE[file_name]
else:
# read the file contents and load the data structure from them
(file_data, show_content) = self._get_file_contents(file_name)
parsed_data = self.load(data=file_data, file_name=file_name, show_content=show_content)
# cache the file contents for next time
self._FILE_CACHE[file_name] = parsed_data
# return a deep copy here, so the cache is not affected
return copy.deepcopy(parsed_data)
def path_exists(self, path):
path = self.path_dwim(path)
return os.path.exists(path)
def is_file(self, path):
path = self.path_dwim(path)
return os.path.isfile(path)
def is_directory(self, path):
path = self.path_dwim(path)
return os.path.isdir(path)
def list_directory(self, path):
path = self.path_dwim(path)
return os.listdir(path)
def is_executable(self, path):
'''is the given path executable?'''
path = self.path_dwim(path)
return (stat.S_IXUSR & os.stat(path)[stat.ST_MODE] or stat.S_IXGRP & os.stat(path)[stat.ST_MODE] or stat.S_IXOTH & os.stat(path)[stat.ST_MODE])
def _safe_load(self, stream, file_name=None):
''' Implements yaml.safe_load(), except using our custom loader class. '''
loader = AnsibleLoader(stream, file_name)
try:
return loader.get_single_data()
finally:
loader.dispose()
def _get_file_contents(self, file_name):
'''
Reads the file contents from the given file name, and will decrypt them
if they are found to be vault-encrypted.
'''
if not file_name or not isinstance(file_name, basestring):
raise AnsibleParserError("Invalid filename: '%s'" % str(file_name))
if not self.path_exists(file_name) or not self.is_file(file_name):
raise AnsibleParserError("the file_name '%s' does not exist, or is not readable" % file_name)
show_content = True
try:
with open(file_name, 'rb') as f:
data = f.read()
if self._vault.is_encrypted(data):
data = self._vault.decrypt(data)
show_content = False
data = to_unicode(data, errors='strict')
return (data, show_content)
except (IOError, OSError) as e:
raise AnsibleParserError("an error occurred while trying to read the file '%s': %s" % (file_name, str(e)))
def _handle_error(self, yaml_exc, file_name, show_content):
'''
Optionally constructs an object (AnsibleBaseYAMLObject) to encapsulate the
file name/position where a YAML exception occurred, and raises an AnsibleParserError
to display the syntax exception information.
'''
# if the YAML exception contains a problem mark, use it to construct
# an object the error class can use to display the faulty line
err_obj = None
if hasattr(yaml_exc, 'problem_mark'):
err_obj = AnsibleBaseYAMLObject()
err_obj.ansible_pos = (file_name, yaml_exc.problem_mark.line + 1, yaml_exc.problem_mark.column + 1)
raise AnsibleParserError(YAML_SYNTAX_ERROR, obj=err_obj, show_content=show_content)
def get_basedir(self):
''' returns the current basedir '''
return self._basedir
def set_basedir(self, basedir):
''' sets the base directory, used to find files when a relative path is given '''
if basedir is not None:
self._basedir = to_unicode(basedir)
def path_dwim(self, given):
'''
make relative paths work like folks expect.
'''
given = unquote(given)
if given.startswith("/"):
return os.path.abspath(given)
elif given.startswith("~"):
return os.path.abspath(os.path.expanduser(given))
else:
return os.path.abspath(os.path.join(self._basedir, given))
def path_dwim_relative(self, path, dirname, source):
''' find one file in a role/playbook dirs with/without dirname subdir '''
search = []
isrole = False
# I have full path, nothing else needs to be looked at
if source.startswith('~') or source.startswith('/'):
search.append(self.path_dwim(source))
else:
# base role/play path + templates/files/vars + relative filename
search.append(os.path.join(path, dirname, source))
basedir = unfrackpath(path)
# is it a role and if so make sure you get correct base path
if path.endswith('tasks') and os.path.exists(os.path.join(path,'main.yml')) \
or os.path.exists(os.path.join(path,'tasks/main.yml')):
isrole = True
if path.endswith('tasks'):
basedir = unfrackpath(os.path.dirname(path))
cur_basedir = self._basedir
self.set_basedir(basedir)
# resolved base role/play path + templates/files/vars + relative filename
search.append(self.path_dwim(os.path.join(basedir, dirname, source)))
self.set_basedir(cur_basedir)
if isrole and not source.endswith(dirname):
# look in role's tasks dir w/o dirname
search.append(self.path_dwim(os.path.join(basedir, 'tasks', source)))
# try to create absolute path for loader basedir + templates/files/vars + filename
search.append(self.path_dwim(os.path.join(dirname,source)))
search.append(self.path_dwim(os.path.join(basedir, source)))
# try to create absolute path for loader basedir + filename
search.append(self.path_dwim(source))
for candidate in search:
if os.path.exists(candidate):
break
return candidate
def read_vault_password_file(self, vault_password_file):
"""
Read a vault password from a file or if executable, execute the script and
retrieve password from STDOUT
"""
this_path = os.path.realpath(os.path.expanduser(vault_password_file))
if not os.path.exists(this_path):
raise AnsibleError("The vault password file %s was not found" % this_path)
if self.is_executable(this_path):
try:
# STDERR not captured to make it easier for users to prompt for input in their scripts
p = subprocess.Popen(this_path, stdout=subprocess.PIPE)
except OSError as e:
raise AnsibleError("Problem running vault password script %s (%s). If this is not a script, remove the executable bit from the file." % (' '.join(this_path), e))
stdout, stderr = p.communicate()
self.set_vault_password(stdout.strip('\r\n'))
else:
try:
f = open(this_path, "rb")
self.set_vault_password(f.read().strip())
f.close()
except (OSError, IOError) as e:
raise AnsibleError("Could not read vault password file %s: %s" % (this_path, e))
| gpl-3.0 |
drpngx/tensorflow | tensorflow/contrib/sparsemax/python/ops/sparsemax.py | 19 | 2598 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Sparsemax op."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn
__all__ = ["sparsemax"]
def sparsemax(logits, name=None):
"""Computes sparsemax activations [1].
For each batch `i` and class `j` we have
$$sparsemax[i, j] = max(logits[i, j] - tau(logits[i, :]), 0)$$
[1]: https://arxiv.org/abs/1602.02068
Args:
logits: A `Tensor`. Must be one of the following types: `half`, `float32`,
`float64`.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `logits`.
"""
with ops.name_scope(name, "sparsemax", [logits]) as name:
logits = ops.convert_to_tensor(logits, name="logits")
obs = array_ops.shape(logits)[0]
dims = array_ops.shape(logits)[1]
z = logits - math_ops.reduce_mean(logits, axis=1)[:, array_ops.newaxis]
# sort z
z_sorted, _ = nn.top_k(z, k=dims)
# calculate k(z)
z_cumsum = math_ops.cumsum(z_sorted, axis=1)
k = math_ops.range(
1, math_ops.cast(dims, logits.dtype) + 1, dtype=logits.dtype)
z_check = 1 + k * z_sorted > z_cumsum
# because the z_check vector is always [1,1,...1,0,0,...0] finding the
# (index + 1) of the last `1` is the same as just summing the number of 1.
k_z = math_ops.reduce_sum(math_ops.cast(z_check, dtypes.int32), axis=1)
# calculate tau(z)
indices = array_ops.stack([math_ops.range(0, obs), k_z - 1], axis=1)
tau_sum = array_ops.gather_nd(z_cumsum, indices)
tau_z = (tau_sum - 1) / math_ops.cast(k_z, logits.dtype)
# calculate p
return math_ops.maximum(
math_ops.cast(0, logits.dtype), z - tau_z[:, array_ops.newaxis])
| apache-2.0 |
viaict/viaduct | app/models/base_model.py | 1 | 5425 | """
Extra functionality that is used by all models.
It extends db.Model with extra functions.
"""
import dateutil.parser
from datetime import datetime
from sqlalchemy.exc import SQLAlchemyError
from typing import Any, Tuple
from app import db
from app.utils.serialize_sqla import serialize_sqla
class BaseEntity(object):
__table_args__: Any = {'sqlite_autoincrement': True}
# Columns (in order) to be printed when an instance of the object is
# printed
prints: Tuple[str, ...] = ('id',)
# Columns to be shown when the to_dict function is used. This should only
# be changed when certain values should not be shown in the dictionary.
# Relationships should be added when a relationship is supposed to be in
# the dictionary as well.
json_excludes: Tuple[str, ...] = tuple()
jsons = None
json_relationships = None
json_relationship_ids: Tuple[str, ...] = tuple()
# Columns that every model needs
id = db.Column(db.Integer, primary_key=True)
created = db.Column(db.DateTime, default=datetime.now)
modified = db.Column(db.DateTime, default=datetime.now,
onupdate=datetime.now)
# Get all entries.
@classmethod
def get_all(cls):
return cls.query.all()
# Get entry by id.
@classmethod
def by_id(cls, _id):
return cls.query.filter_by(id=_id).first()
# Remove entry by id.
@classmethod
def remove_by_id(cls, _id):
entry = cls.by_id(_id)
if entry is None:
return
db.session.delete(entry)
db.session.commit()
# Get entries by id list.
@classmethod
def by_ids(cls, ids):
try:
return db.session.query(cls).filter(cls.id.in_(ids)).all()
except SQLAlchemyError:
return []
# Function used by print to print a model at server side.
# It uses the prints attribute from the object to determine what values to
# print. This attribute is the id of the object by default.
def __repr__(self):
first = True
string = '<%s(' % (type(self).__name__)
for attr in self.prints:
if not first:
string += ', '
string += '"%s"' % (getattr(self, attr))
first = False
string += ')>'
return string
# Functionality after this point is a bit hard to understand. Just read the
# function comments and that should be enough.
# Function to convert a sqlalchemy object instance to a dictionary. This is
# needed for json serialization of an object. The jsons attribute is used
# to determine what values to serialize (password hashes and such should
# not in there)
def to_dict(self, exclude=True, **kwargs):
attrs = {}
if not self.jsons or not exclude:
if exclude:
jsons = (column.name for column in self.__table__.columns if
column.name not in self.json_excludes)
else:
jsons = (column.name for column in self.__table__.columns)
else:
jsons = self.jsons
for column in jsons:
value = serialize_sqla(getattr(self, column), **kwargs)
attrs[column] = value
if self.json_relationships:
for rel in self.json_relationships:
attrs[rel] = serialize_sqla(getattr(self, rel).all(), **kwargs)
for rel in self.json_relationship_ids:
attrs[rel] = tuple(a[0] for a in getattr(self, rel).values('id'))
return attrs
# Function that automatically parses a dictionary to the model. It will
# change the the entry that it finds with the id. All other key value pairs
# will be parsed to column value pairs. The entry will also be saved.
@classmethod
def merge_dict(cls, obj, relationships={}):
# Get the correct entry from the database
if 'id' in obj and obj['id']:
entry = cls.by_id(obj['id'])
if not entry:
return None
# If the dict doesn't contain id it means the entry does not exist yet
else:
entry = cls()
# Remove id, created and modified, since those are things you want to
# automaticaly update
obj.pop('id', None)
obj.pop('created', None)
obj.pop('modified', None)
column_names = tuple(column.name for column in cls.__table__.columns)
# Update all values from the dict that exist as a column or a
# relationship
for key, value in list(obj.items()):
if key in column_names:
columntype = str(cls.__table__.columns[key].type)
if columntype == 'DATE' and value is not None:
if isinstance(value, str):
value = dateutil.parser.parse(value)
elif columntype == 'TIME' and value is not None:
if isinstance(value, str):
value = dateutil.parser.parse(value).time()
setattr(entry, key, value)
elif key in relationships:
setattr(entry, key, relationships[key].by_ids(value))
db.session.add(entry)
db.session.commit()
return entry
# For future proofing use new_dict when creating new entries, so it could
# become a separate function if needed
new_dict = merge_dict
| mit |
dataxu/ansible | test/units/modules/network/nxos/test_nxos_switchport.py | 51 | 3526 | # (c) 2016 Red Hat Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible.compat.tests.mock import patch
from ansible.modules.network.nxos import _nxos_switchport
from .nxos_module import TestNxosModule, load_fixture, set_module_args
class TestNxosSwitchportModule(TestNxosModule):
module = _nxos_switchport
def setUp(self):
super(TestNxosSwitchportModule, self).setUp()
self.mock_run_commands = patch('ansible.modules.network.nxos._nxos_switchport.run_commands')
self.run_commands = self.mock_run_commands.start()
self.mock_load_config = patch('ansible.modules.network.nxos._nxos_switchport.load_config')
self.load_config = self.mock_load_config.start()
self.mock_get_capabilities = patch('ansible.modules.network.nxos._nxos_switchport.get_capabilities')
self.get_capabilities = self.mock_get_capabilities.start()
self.get_capabilities.return_value = {'network_api': 'cliconf'}
def tearDown(self):
super(TestNxosSwitchportModule, self).tearDown()
self.mock_run_commands.stop()
self.mock_load_config.stop()
self.mock_get_capabilities.stop()
def load_fixtures(self, commands=None, device=''):
def load_from_file(*args, **kwargs):
module, commands = args
output = list()
for command in commands:
filename = str(command).split(' | ')[0].replace(' ', '_')
filename = filename.replace('2/1', '')
output.append(load_fixture('_nxos_switchport', filename))
return output
self.run_commands.side_effect = load_from_file
self.load_config.return_value = None
def test_nxos_switchport_present(self):
set_module_args(dict(interface='Ethernet2/1', mode='access', access_vlan=1, state='present'))
result = self.execute_module(changed=True)
self.assertEqual(result['commands'], ['interface ethernet2/1', 'switchport access vlan 1'])
def test_nxos_switchport_unconfigured(self):
set_module_args(dict(interface='Ethernet2/1', state='unconfigured'))
result = self.execute_module(changed=True)
self.assertEqual(result['commands'], ['interface ethernet2/1',
'switchport mode access',
'switch access vlan 1',
'switchport trunk native vlan 1',
'switchport trunk allowed vlan all'])
def test_nxos_switchport_absent(self):
set_module_args(dict(interface='Ethernet2/1', mode='access', access_vlan=3, state='absent'))
result = self.execute_module(changed=False)
self.assertEqual(result['commands'], [])
| gpl-3.0 |
joeythesaint/yocto-autobuilder | lib/python2.7/site-packages/Twisted-12.2.0-py2.7-linux-x86_64.egg/twisted/internet/_pollingfile.py | 18 | 8883 | # -*- test-case-name: twisted.internet.test.test_pollingfile -*-
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Implements a simple polling interface for file descriptors that don't work with
select() - this is pretty much only useful on Windows.
"""
from zope.interface import implements
from twisted.internet.interfaces import IConsumer, IPushProducer
MIN_TIMEOUT = 0.000000001
MAX_TIMEOUT = 0.1
class _PollableResource:
active = True
def activate(self):
self.active = True
def deactivate(self):
self.active = False
class _PollingTimer:
# Everything is private here because it is really an implementation detail.
def __init__(self, reactor):
self.reactor = reactor
self._resources = []
self._pollTimer = None
self._currentTimeout = MAX_TIMEOUT
self._paused = False
def _addPollableResource(self, res):
self._resources.append(res)
self._checkPollingState()
def _checkPollingState(self):
for resource in self._resources:
if resource.active:
self._startPolling()
break
else:
self._stopPolling()
def _startPolling(self):
if self._pollTimer is None:
self._pollTimer = self._reschedule()
def _stopPolling(self):
if self._pollTimer is not None:
self._pollTimer.cancel()
self._pollTimer = None
def _pause(self):
self._paused = True
def _unpause(self):
self._paused = False
self._checkPollingState()
def _reschedule(self):
if not self._paused:
return self.reactor.callLater(self._currentTimeout, self._pollEvent)
def _pollEvent(self):
workUnits = 0.
anyActive = []
for resource in self._resources:
if resource.active:
workUnits += resource.checkWork()
# Check AFTER work has been done
if resource.active:
anyActive.append(resource)
newTimeout = self._currentTimeout
if workUnits:
newTimeout = self._currentTimeout / (workUnits + 1.)
if newTimeout < MIN_TIMEOUT:
newTimeout = MIN_TIMEOUT
else:
newTimeout = self._currentTimeout * 2.
if newTimeout > MAX_TIMEOUT:
newTimeout = MAX_TIMEOUT
self._currentTimeout = newTimeout
if anyActive:
self._pollTimer = self._reschedule()
# If we ever (let's hope not) need the above functionality on UNIX, this could
# be factored into a different module.
import win32pipe
import win32file
import win32api
import pywintypes
class _PollableReadPipe(_PollableResource):
implements(IPushProducer)
def __init__(self, pipe, receivedCallback, lostCallback):
# security attributes for pipes
self.pipe = pipe
self.receivedCallback = receivedCallback
self.lostCallback = lostCallback
def checkWork(self):
finished = 0
fullDataRead = []
while 1:
try:
buffer, bytesToRead, result = win32pipe.PeekNamedPipe(self.pipe, 1)
# finished = (result == -1)
if not bytesToRead:
break
hr, data = win32file.ReadFile(self.pipe, bytesToRead, None)
fullDataRead.append(data)
except win32api.error:
finished = 1
break
dataBuf = ''.join(fullDataRead)
if dataBuf:
self.receivedCallback(dataBuf)
if finished:
self.cleanup()
return len(dataBuf)
def cleanup(self):
self.deactivate()
self.lostCallback()
def close(self):
try:
win32api.CloseHandle(self.pipe)
except pywintypes.error:
# You can't close std handles...?
pass
def stopProducing(self):
self.close()
def pauseProducing(self):
self.deactivate()
def resumeProducing(self):
self.activate()
FULL_BUFFER_SIZE = 64 * 1024
class _PollableWritePipe(_PollableResource):
implements(IConsumer)
def __init__(self, writePipe, lostCallback):
self.disconnecting = False
self.producer = None
self.producerPaused = 0
self.streamingProducer = 0
self.outQueue = []
self.writePipe = writePipe
self.lostCallback = lostCallback
try:
win32pipe.SetNamedPipeHandleState(writePipe,
win32pipe.PIPE_NOWAIT,
None,
None)
except pywintypes.error:
# Maybe it's an invalid handle. Who knows.
pass
def close(self):
self.disconnecting = True
def bufferFull(self):
if self.producer is not None:
self.producerPaused = 1
self.producer.pauseProducing()
def bufferEmpty(self):
if self.producer is not None and ((not self.streamingProducer) or
self.producerPaused):
self.producer.producerPaused = 0
self.producer.resumeProducing()
return True
return False
# almost-but-not-quite-exact copy-paste from abstract.FileDescriptor... ugh
def registerProducer(self, producer, streaming):
"""Register to receive data from a producer.
This sets this selectable to be a consumer for a producer. When this
selectable runs out of data on a write() call, it will ask the producer
to resumeProducing(). A producer should implement the IProducer
interface.
FileDescriptor provides some infrastructure for producer methods.
"""
if self.producer is not None:
raise RuntimeError(
"Cannot register producer %s, because producer %s was never "
"unregistered." % (producer, self.producer))
if not self.active:
producer.stopProducing()
else:
self.producer = producer
self.streamingProducer = streaming
if not streaming:
producer.resumeProducing()
def unregisterProducer(self):
"""Stop consuming data from a producer, without disconnecting.
"""
self.producer = None
def writeConnectionLost(self):
self.deactivate()
try:
win32api.CloseHandle(self.writePipe)
except pywintypes.error:
# OMG what
pass
self.lostCallback()
def writeSequence(self, seq):
"""
Append a C{list} or C{tuple} of bytes to the output buffer.
@param seq: C{list} or C{tuple} of C{str} instances to be appended to
the output buffer.
@raise TypeError: If C{seq} contains C{unicode}.
"""
if unicode in map(type, seq):
raise TypeError("Unicode not allowed in output buffer.")
self.outQueue.extend(seq)
def write(self, data):
"""
Append some bytes to the output buffer.
@param data: C{str} to be appended to the output buffer.
@type data: C{str}.
@raise TypeError: If C{data} is C{unicode} instead of C{str}.
"""
if isinstance(data, unicode):
raise TypeError("Unicode not allowed in output buffer.")
if self.disconnecting:
return
self.outQueue.append(data)
if sum(map(len, self.outQueue)) > FULL_BUFFER_SIZE:
self.bufferFull()
def checkWork(self):
numBytesWritten = 0
if not self.outQueue:
if self.disconnecting:
self.writeConnectionLost()
return 0
try:
win32file.WriteFile(self.writePipe, '', None)
except pywintypes.error:
self.writeConnectionLost()
return numBytesWritten
while self.outQueue:
data = self.outQueue.pop(0)
errCode = 0
try:
errCode, nBytesWritten = win32file.WriteFile(self.writePipe,
data, None)
except win32api.error:
self.writeConnectionLost()
break
else:
# assert not errCode, "wtf an error code???"
numBytesWritten += nBytesWritten
if len(data) > nBytesWritten:
self.outQueue.insert(0, data[nBytesWritten:])
break
else:
resumed = self.bufferEmpty()
if not resumed and self.disconnecting:
self.writeConnectionLost()
return numBytesWritten
| gpl-2.0 |
okami-1/python-dnssec | dnssec/defs.py | 1 | 4584 | # Copyright (C) 2015 Okami, [email protected]
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 3
# of the License, or (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
LOG_DEFAULT = '/var/log/dnssec-tools/pyrollerd.log'
ME = 'pyrollerd'
DEFAULT_NAP = 60
# Method selected for calculating rollover times.
RM_ENDROLL = 1 # Calculate from end of last roll.
RM_KEYGEN = 2 # Calculate from last key generation.
RM_STARTROLL = 3 # Calculate from start of last roll. (NYI)
DT_LOADZONE = 'roll_loadzone'
DT_LOGFILE = 'roll_logfile'
DT_LOGLEVEL = 'roll_loglevel'
DT_LOGTZ = 'log_tz'
DT_RNDCOPTS = 'rndc-opts'
DT_SLEEP = 'roll_sleeptime'
DT_USERNAME = 'roll_username'
DT_AUTOSIGN = 'roll_autosign'
OPT_ALWAYSSIGN = 'alwayssign'
OPT_AUTOSIGN = 'autosign'
OPT_DIR = 'directory'
OPT_DISPLAY = 'display'
OPT_DTCONF = 'dtconfig'
OPT_FOREGROUND = 'foreground'
OPT_HELP = 'help'
OPT_LOGFILE = 'logfile'
OPT_LOGLEVEL = 'loglevel'
OPT_LOGTZ = 'logtz'
OPT_NORELOAD = 'noreload'
OPT_PARAMS = 'parameters'
OPT_PIDFILE = 'pidfile'
OPT_REALM = 'realm'
OPT_RRFILE = 'rrfile'
OPT_SINGLERUN = 'singlerun'
OPT_SLEEP = 'sleep'
OPT_USERNAME = 'username'
OPT_VERBOSE = 'verbose'
OPT_VERSION = 'Version'
OPT_ZONESIGNER = 'zonesigner'
OPT_ZSARGS = 'zsargs'
MIN_SLEEP = 10 # Minimum time rollerd will sleep.
EVT_FULLLIST = 1 # Full list is run every N seconds.
EVT_QUEUE_SOON = 2 # Queues, with "soon" events.
QUEUE_ERRTIME = 60 # Time to sleep on rollrec error.
# QUEUE_SOONLIMIT defines the length of "soon". When building the soon
# queue, any zone with an event between now and (now + QUEUE_SOONLIMIT)
# will be added to the soon queue. This is a seconds count.
#
# This value will depend on the number of managed zones and their lifespans.
# The default value is for a day, which means the soon queue will hold all
# events that will occur within the next 24 hours.
QUEUE_SOONLIMIT = 86400
QUEUE_RUNSCAN = '<<< run full scan >>>' # Fake rollrec name to trigger a full scan.
# If we find the rollrec file is empty, we'll give an error message
# only on an occasional pass through the zone list.
MAXRRFERRS = 5 # Number of list passes to stay quiet.
# The remaining ROLLCMD_ entities are the rollmgr_sendcmd() commands
# recognized by rollerd. %roll_commands is a hash table of valid commands.
ROLLCMD_DISPLAY = 'rollcmd_display'
ROLLCMD_DSPUB = 'rollcmd_dspub'
ROLLCMD_DSPUBALL = 'rollcmd_dspuball'
ROLLCMD_GETSTATUS = 'rollcmd_getstatus'
ROLLCMD_LOGFILE = 'rollcmd_logfile'
ROLLCMD_LOGLEVEL = 'rollcmd_loglevel'
ROLLCMD_LOGMSG = 'rollcmd_logmsg'
ROLLCMD_LOGTZ = 'rollcmd_logtz'
ROLLCMD_MERGERRFS = 'rollcmd_mergerrfs'
ROLLCMD_PHASEMSG = 'rollcmd_phasemsg'
ROLLCMD_ROLLALL = 'rollcmd_rollall'
ROLLCMD_ROLLALLKSKS = 'rollcmd_rollallksks'
ROLLCMD_ROLLALLZSKS = 'rollcmd_rollallzsks'
ROLLCMD_ROLLKSK = 'rollcmd_rollksk'
ROLLCMD_ROLLREC = 'rollcmd_rollrec'
ROLLCMD_ROLLZONE = 'rollcmd_rollzone'
ROLLCMD_ROLLZSK = 'rollcmd_rollzsk'
ROLLCMD_RUNQUEUE = 'rollcmd_runqueue'
ROLLCMD_QUEUELIST = 'rollcmd_queuelist'
ROLLCMD_QUEUESTATUS = 'rollcmd_queuestatus'
ROLLCMD_SHUTDOWN = 'rollcmd_shutdown'
ROLLCMD_SIGNZONE = 'rollcmd_signzone'
ROLLCMD_SIGNZONES = 'rollcmd_signzones'
ROLLCMD_SKIPALL = 'rollcmd_skipall'
ROLLCMD_SKIPZONE = 'rollcmd_skipzone'
ROLLCMD_SLEEPTIME = 'rollcmd_sleeptime'
ROLLCMD_SPLITRRF = 'rollcmd_splitrrf'
ROLLCMD_STATUS = 'rollcmd_status'
ROLLCMD_ZONEGROUP = 'rollcmd_zonegroup'
ROLLCMD_ZONELOG = 'rollcmd_zonelog'
ROLLCMD_ZONESTATUS = 'rollcmd_zonestatus'
ROLLCMD_ZSARGS = 'rollcmd_zsargs'
# The ROLLCMD_RC_ entities are return codes sent from rollerd and received
# by client programs from rollmgr_getresp().
ROLLCMD_RC_OKAY = 0
ROLLCMD_RC_BADLEVEL = 1
ROLLCMD_RC_BADFILE = 2
ROLLCMD_RC_BADSLEEP = 3
ROLLCMD_RC_BADROLLREC = 4
ROLLCMD_RC_BADTZ = 5
ROLLCMD_RC_RRFOPEN = 6
ROLLCMD_RC_NOZONES = 7
ROLLCMD_RC_BADZONE = 8
ROLLCMD_RC_BADZONEDATA = 9
ROLLCMD_RC_DISPLAY = 10
ROLLCMD_RC_KSKROLL = 11
ROLLCMD_RC_ZSKROLL = 12
ROLLCMD_RC_NOARGS = 13
ROLLCMD_RC_BADEVENT = 14
ROLLCMD_RC_BADZONEGROUP = 15
| gpl-3.0 |
pyparallel/numpy | numpy/lib/tests/test_ufunclike.py | 188 | 2024 | from __future__ import division, absolute_import, print_function
import numpy.core as nx
import numpy.lib.ufunclike as ufl
from numpy.testing import (
run_module_suite, TestCase, assert_, assert_equal, assert_array_equal
)
class TestUfunclike(TestCase):
def test_isposinf(self):
a = nx.array([nx.inf, -nx.inf, nx.nan, 0.0, 3.0, -3.0])
out = nx.zeros(a.shape, bool)
tgt = nx.array([True, False, False, False, False, False])
res = ufl.isposinf(a)
assert_equal(res, tgt)
res = ufl.isposinf(a, out)
assert_equal(res, tgt)
assert_equal(out, tgt)
def test_isneginf(self):
a = nx.array([nx.inf, -nx.inf, nx.nan, 0.0, 3.0, -3.0])
out = nx.zeros(a.shape, bool)
tgt = nx.array([False, True, False, False, False, False])
res = ufl.isneginf(a)
assert_equal(res, tgt)
res = ufl.isneginf(a, out)
assert_equal(res, tgt)
assert_equal(out, tgt)
def test_fix(self):
a = nx.array([[1.0, 1.1, 1.5, 1.8], [-1.0, -1.1, -1.5, -1.8]])
out = nx.zeros(a.shape, float)
tgt = nx.array([[1., 1., 1., 1.], [-1., -1., -1., -1.]])
res = ufl.fix(a)
assert_equal(res, tgt)
res = ufl.fix(a, out)
assert_equal(res, tgt)
assert_equal(out, tgt)
assert_equal(ufl.fix(3.14), 3)
def test_fix_with_subclass(self):
class MyArray(nx.ndarray):
def __new__(cls, data, metadata=None):
res = nx.array(data, copy=True).view(cls)
res.metadata = metadata
return res
def __array_wrap__(self, obj, context=None):
obj.metadata = self.metadata
return obj
a = nx.array([1.1, -1.1])
m = MyArray(a, metadata='foo')
f = ufl.fix(m)
assert_array_equal(f, nx.array([1, -1]))
assert_(isinstance(f, MyArray))
assert_equal(f.metadata, 'foo')
if __name__ == "__main__":
run_module_suite()
| bsd-3-clause |
EricSB/nupic | examples/opf/clients/hotgym/anomaly/model_params.py | 3 | 8788 | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
MODEL_PARAMS = {
# Type of model that the rest of these parameters apply to.
'model': "CLA",
# Version that specifies the format of the config.
'version': 1,
# Intermediate variables used to compute fields in modelParams and also
# referenced from the control section.
'aggregationInfo': { 'days': 0,
'fields': [(u'c1', 'sum'), (u'c0', 'first')],
'hours': 1,
'microseconds': 0,
'milliseconds': 0,
'minutes': 0,
'months': 0,
'seconds': 0,
'weeks': 0,
'years': 0},
'predictAheadTime': None,
# Model parameter dictionary.
'modelParams': {
# The type of inference that this model will perform
'inferenceType': 'TemporalAnomaly',
'sensorParams': {
# Sensor diagnostic output verbosity control;
# if > 0: sensor region will print out on screen what it's sensing
# at each step 0: silent; >=1: some info; >=2: more info;
# >=3: even more info (see compute() in py/regions/RecordSensor.py)
'verbosity' : 0,
# Example:
# dsEncoderSchema = [
# DeferredDictLookup('__field_name_encoder'),
# ],
#
# (value generated from DS_ENCODER_SCHEMA)
'encoders': {
u'timestamp_timeOfDay': {
'fieldname': u'timestamp',
'name': u'timestamp_timeOfDay',
'timeOfDay': (21, 9.5),
'type': 'DateEncoder'
},
u'timestamp_dayOfWeek': None,
u'timestamp_weekend': None,
u'consumption': {
'clipInput': True,
'fieldname': u'consumption',
'maxval': 100.0,
'minval': 0.0,
'n': 50,
'name': u'consumption',
'type': 'ScalarEncoder',
'w': 21
},
},
# A dictionary specifying the period for automatically-generated
# resets from a RecordSensor;
#
# None = disable automatically-generated resets (also disabled if
# all of the specified values evaluate to 0).
# Valid keys is the desired combination of the following:
# days, hours, minutes, seconds, milliseconds, microseconds, weeks
#
# Example for 1.5 days: sensorAutoReset = dict(days=1,hours=12),
'sensorAutoReset' : None,
},
'spEnable': True,
'spParams': {
# SP diagnostic output verbosity control;
# 0: silent; >=1: some info; >=2: more info;
'spVerbosity' : 0,
# Spatial Pooler implementation selector.
# Options: 'py', 'cpp' (speed optimized, new)
'spatialImp' : 'cpp',
'globalInhibition': 1,
# Number of columns in the SP (must be same as in TP)
'columnCount': 2048,
'inputWidth': 0,
# SP inhibition control (absolute value);
# Maximum number of active columns in the SP region's output (when
# there are more, the weaker ones are suppressed)
'numActiveColumnsPerInhArea': 40,
'seed': 1956,
# potentialPct
# What percent of the columns's receptive field is available
# for potential synapses.
'potentialPct': 0.8,
# The default connected threshold. Any synapse whose
# permanence value is above the connected threshold is
# a "connected synapse", meaning it can contribute to the
# cell's firing. Typical value is 0.10.
'synPermConnected': 0.1,
'synPermActiveInc': 0.0001,
'synPermInactiveDec': 0.0005,
# boostStrength controls the strength of boosting. It should be a
# a number greater or equal than 0.0. No boosting is applied if
# boostStrength=0.0. Boosting encourages efficient usage of columns.
'boostStrength': 0.0,
},
# Controls whether TP is enabled or disabled;
# TP is necessary for making temporal predictions, such as predicting
# the next inputs. Without TP, the model is only capable of
# reconstructing missing sensor inputs (via SP).
'tpEnable' : True,
'tpParams': {
# TP diagnostic output verbosity control;
# 0: silent; [1..6]: increasing levels of verbosity
# (see verbosity in nupic/trunk/py/nupic/research/TP.py and TP10X*.py)
'verbosity': 0,
# Number of cell columns in the cortical region (same number for
# SP and TP)
# (see also tpNCellsPerCol)
'columnCount': 2048,
# The number of cells (i.e., states), allocated per column.
'cellsPerColumn': 32,
'inputWidth': 2048,
'seed': 1960,
# Temporal Pooler implementation selector (see _getTPClass in
# CLARegion.py).
'temporalImp': 'cpp',
# New Synapse formation count
# NOTE: If None, use spNumActivePerInhArea
#
# TODO: need better explanation
'newSynapseCount': 20,
# Maximum number of synapses per segment
# > 0 for fixed-size CLA
# -1 for non-fixed-size CLA
#
# TODO: for Ron: once the appropriate value is placed in TP
# constructor, see if we should eliminate this parameter from
# description.py.
'maxSynapsesPerSegment': 32,
# Maximum number of segments per cell
# > 0 for fixed-size CLA
# -1 for non-fixed-size CLA
#
# TODO: for Ron: once the appropriate value is placed in TP
# constructor, see if we should eliminate this parameter from
# description.py.
'maxSegmentsPerCell': 128,
# Initial Permanence
# TODO: need better explanation
'initialPerm': 0.21,
# Permanence Increment
'permanenceInc': 0.1,
# Permanence Decrement
# If set to None, will automatically default to tpPermanenceInc
# value.
'permanenceDec' : 0.1,
'globalDecay': 0.0,
'maxAge': 0,
# Minimum number of active synapses for a segment to be considered
# during search for the best-matching segments.
# None=use default
# Replaces: tpMinThreshold
'minThreshold': 9,
# Segment activation threshold.
# A segment is active if it has >= tpSegmentActivationThreshold
# connected synapses that are active due to infActiveState
# None=use default
# Replaces: tpActivationThreshold
'activationThreshold': 12,
'outputType': 'normal',
# "Pay Attention Mode" length. This tells the TP how many new
# elements to append to the end of a learned sequence at a time.
# Smaller values are better for datasets with short sequences,
# higher values are better for datasets with long sequences.
'pamLength': 3,
},
# Don't create the classifier since we don't need predictions.
'clEnable': False,
'clParams': None,
'anomalyParams': { u'anomalyCacheRecords': None,
u'autoDetectThreshold': None,
u'autoDetectWaitRecords': 2184},
'trainSPNetOnlyIfRequested': False,
},
}
| agpl-3.0 |
ptemplier/ansible | lib/ansible/modules/web_infrastructure/apache2_module.py | 15 | 7074 | #!/usr/bin/python
# coding: utf-8 -*-
# (c) 2013-2014, Christian Berendt <[email protected]>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: apache2_module
version_added: 1.6
author:
- Christian Berendt (@berendt)
- Ralf Hertel (@n0trax)
- Robin Roth (@robinro)
short_description: enables/disables a module of the Apache2 webserver
description:
- Enables or disables a specified module of the Apache2 webserver.
options:
name:
description:
- name of the module to enable/disable
required: true
force:
description:
- force disabling of default modules and override Debian warnings
required: false
choices: ['True', 'False']
default: False
version_added: "2.1"
state:
description:
- indicate the desired state of the resource
choices: ['present', 'absent']
default: present
ignore_configcheck:
description:
- Ignore configuration checks about inconsistent module configuration. Especially for mpm_* modules.
choices: ['True', 'False']
default: False
version_added: "2.3"
requirements: ["a2enmod","a2dismod"]
'''
EXAMPLES = '''
# enables the Apache2 module "wsgi"
- apache2_module:
state: present
name: wsgi
# disables the Apache2 module "wsgi"
- apache2_module:
state: absent
name: wsgi
# disable default modules for Debian
- apache2_module:
state: absent
name: autoindex
force: True
# disable mpm_worker and ignore warnings about missing mpm module
- apache2_module:
state: absent
name: mpm_worker
ignore_configcheck: True
'''
RETURN = '''
result:
description: message about action taken
returned: always
type: string
warnings:
description: list of warning messages
returned: when needed
type: list
rc:
description: return code of underlying command
returned: failed
type: int
stdout:
description: stdout of underlying command
returned: failed
type: string
stderr:
description: stderr of underlying command
returned: failed
type: string
'''
import re
def _run_threaded(module):
control_binary = _get_ctl_binary(module)
result, stdout, stderr = module.run_command("%s -V" % control_binary)
return bool(re.search(r'threaded:[ ]*yes', stdout))
def _get_ctl_binary(module):
for command in ['apache2ctl', 'apachectl']:
ctl_binary = module.get_bin_path(command)
if ctl_binary is not None:
return ctl_binary
module.fail_json(
msg="Neither of apache2ctl nor apachctl found."
" At least one apache control binary is necessary."
)
def _module_is_enabled(module):
control_binary = _get_ctl_binary(module)
name = module.params['name']
ignore_configcheck = module.params['ignore_configcheck']
result, stdout, stderr = module.run_command("%s -M" % control_binary)
if result != 0:
error_msg = "Error executing %s: %s" % (control_binary, stderr)
if ignore_configcheck:
if 'AH00534' in stderr and 'mpm_' in name:
module.warnings.append(
"No MPM module loaded! apache2 reload AND other module actions"
" will fail if no MPM module is loaded immediately."
)
else:
module.warnings.append(error_msg)
return False
else:
module.fail_json(msg=error_msg)
searchstring = ' ' + create_apache_identifier(name)
return searchstring in stdout
def create_apache_identifier(name):
"""
By convention if a module is loaded via name, it appears in apache2ctl -M as
name_module.
Some modules don't follow this convention and we use replacements for those."""
# a2enmod name replacement to apache2ctl -M names
text_workarounds = [
('shib2', 'mod_shib'),
('evasive', 'evasive20_module'),
]
# re expressions to extract subparts of names
re_workarounds = [
('php', r'^(php\d)\.'),
]
for a2enmod_spelling, module_name in text_workarounds:
if a2enmod_spelling in name:
return module_name
for search, reexpr in re_workarounds:
if search in name:
rematch = re.search(reexpr, name)
return rematch.group(1) + '_module'
return name + '_module'
def _set_state(module, state):
name = module.params['name']
force = module.params['force']
want_enabled = state == 'present'
state_string = {'present': 'enabled', 'absent': 'disabled'}[state]
a2mod_binary = {'present': 'a2enmod', 'absent': 'a2dismod'}[state]
success_msg = "Module %s %s" % (name, state_string)
if _module_is_enabled(module) != want_enabled:
if module.check_mode:
module.exit_json(changed=True,
result=success_msg,
warnings=module.warnings)
a2mod_binary = module.get_bin_path(a2mod_binary)
if a2mod_binary is None:
module.fail_json(msg="%s not found. Perhaps this system does not use %s to manage apache" % (a2mod_binary, a2mod_binary))
if not want_enabled and force:
# force exists only for a2dismod on debian
a2mod_binary += ' -f'
result, stdout, stderr = module.run_command("%s %s" % (a2mod_binary, name))
if _module_is_enabled(module) == want_enabled:
module.exit_json(changed=True,
result=success_msg,
warnings=module.warnings)
else:
module.fail_json(msg="Failed to set module %s to %s: %s" % (name, state_string, stdout),
rc=result,
stdout=stdout,
stderr=stderr)
else:
module.exit_json(changed=False,
result=success_msg,
warnings=module.warnings)
def main():
module = AnsibleModule(
argument_spec=dict(
name=dict(required=True),
force=dict(required=False, type='bool', default=False),
state=dict(default='present', choices=['absent', 'present']),
ignore_configcheck=dict(required=False, type='bool', default=False),
),
supports_check_mode=True,
)
module.warnings = []
name = module.params['name']
if name == 'cgi' and _run_threaded(module):
module.fail_json(msg="Your MPM seems to be threaded. No automatic actions on module %s possible." % name)
if module.params['state'] in ['present', 'absent']:
_set_state(module, module.params['state'])
# import module snippets
from ansible.module_utils.basic import AnsibleModule
if __name__ == '__main__':
main()
| gpl-3.0 |
slevenhagen/odoo | addons/knowledge/__openerp__.py | 261 | 1738 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name' : 'Knowledge Management System',
'version' : '1.0',
'depends' : ['base','base_setup'],
'author' : 'OpenERP SA',
'category': 'Hidden/Dependency',
'description': """
Installer for knowledge-based Hidden.
=====================================
Makes the Knowledge Application Configuration available from where you can install
document and Wiki based Hidden.
""",
'website': 'https://www.odoo.com',
'data': [
'security/knowledge_security.xml',
'security/ir.model.access.csv',
'knowledge_view.xml',
'res_config_view.xml',
],
'demo': ['knowledge_demo.xml'],
'installable': True,
'auto_install': False,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
yaph/github-data-challenge | language_correlation/d3.py | 1 | 1144 | # -*- coding: utf-8 -*-
import csv, json
# build langs dict
langs = {}
flangs = open('top_langs.json')
top_langs = json.load(flangs)
idx = 0 # zero-based index
for tl in top_langs:
if int(tl['pushes']) >= 100000:
langs[tl['repository_language']] = {'id': idx, 'size': tl['pushes'], 'links': []}
idx += 1
flangs.close()
# add correlation data to langs dict
fcsv = open('language_correlation.csv', 'rb')
reader = csv.reader(fcsv)
headers = reader.next()
for record in reader:
correlation, from_lang, to_lang, created_at = record
correlation = float(correlation)
if from_lang not in langs or to_lang not in langs: continue
langs[from_lang]['links'].append({'target': to_lang, 'value': correlation})
fcsv.close()
nodes = []
links = []
for lang in langs:
if len(langs[lang]['links']) == 0: continue
nodes.append({'name': lang, 'size': langs[lang]['size']})
for link in langs[lang]['links']:
links.append({'source': langs[lang]['id'],
'target': langs[link['target']]['id'],
'value': link['value']})
print json.dumps({'nodes': nodes, 'links': links})
| mit |
yamahata/neutron | neutron/agent/linux/utils.py | 9 | 4436 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 Locaweb.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# @author: Juliano Martinez, Locaweb.
import fcntl
import os
import shlex
import socket
import struct
import tempfile
from eventlet.green import subprocess
from eventlet import greenthread
from neutron.common import utils
from neutron.openstack.common import excutils
from neutron.openstack.common import log as logging
LOG = logging.getLogger(__name__)
def create_process(cmd, root_helper=None, addl_env=None):
"""Create a process object for the given command.
The return value will be a tuple of the process object and the
list of command arguments used to create it.
"""
if root_helper:
cmd = shlex.split(root_helper) + cmd
cmd = map(str, cmd)
LOG.debug(_("Running command: %s"), cmd)
env = os.environ.copy()
if addl_env:
env.update(addl_env)
obj = utils.subprocess_popen(cmd, shell=False,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
env=env)
return obj, cmd
def execute(cmd, root_helper=None, process_input=None, addl_env=None,
check_exit_code=True, return_stderr=False):
try:
obj, cmd = create_process(cmd, root_helper=root_helper,
addl_env=addl_env)
_stdout, _stderr = (process_input and
obj.communicate(process_input) or
obj.communicate())
obj.stdin.close()
m = _("\nCommand: %(cmd)s\nExit code: %(code)s\nStdout: %(stdout)r\n"
"Stderr: %(stderr)r") % {'cmd': cmd, 'code': obj.returncode,
'stdout': _stdout, 'stderr': _stderr}
LOG.debug(m)
if obj.returncode and check_exit_code:
raise RuntimeError(m)
finally:
# NOTE(termie): this appears to be necessary to let the subprocess
# call clean something up in between calls, without
# it two execute calls in a row hangs the second one
greenthread.sleep(0)
return return_stderr and (_stdout, _stderr) or _stdout
def get_interface_mac(interface):
DEVICE_NAME_LEN = 15
MAC_START = 18
MAC_END = 24
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
info = fcntl.ioctl(s.fileno(), 0x8927,
struct.pack('256s', interface[:DEVICE_NAME_LEN]))
return ''.join(['%02x:' % ord(char)
for char in info[MAC_START:MAC_END]])[:-1]
def replace_file(file_name, data):
"""Replaces the contents of file_name with data in a safe manner.
First write to a temp file and then rename. Since POSIX renames are
atomic, the file is unlikely to be corrupted by competing writes.
We create the tempfile on the same device to ensure that it can be renamed.
"""
base_dir = os.path.dirname(os.path.abspath(file_name))
tmp_file = tempfile.NamedTemporaryFile('w+', dir=base_dir, delete=False)
tmp_file.write(data)
tmp_file.close()
os.chmod(tmp_file.name, 0o644)
os.rename(tmp_file.name, file_name)
def find_child_pids(pid):
"""Retrieve a list of the pids of child processes of the given pid."""
try:
raw_pids = execute(['ps', '--ppid', pid, '-o', 'pid='])
except RuntimeError as e:
# Unexpected errors are the responsibility of the caller
with excutils.save_and_reraise_exception() as ctxt:
# Exception has already been logged by execute
no_children_found = 'Exit code: 1' in str(e)
if no_children_found:
ctxt.reraise = False
return []
return [x.strip() for x in raw_pids.split('\n') if x.strip()]
| apache-2.0 |
gabrielelanaro/pyquante | PyQuante/Basis/sto3g.py | 3 | 62358 | """\
basis_sto3g.dat basis set for use with PyQuante
This program is part of the PyQuante quantum chemistry program suite.
Copyright (c) 2004, Richard P. Muller. All Rights Reserved.
PyQuante version 1.2 and later is covered by the modified BSD
license. Please see the file LICENSE that is part of this
distribution.
"""
basis_data = \
{1: [('S',
[(3.4252509099999999, 0.15432897000000001),
(0.62391373000000006, 0.53532813999999995),
(0.16885539999999999, 0.44463454000000002)])],
2: [('S',
[(6.3624213899999997, 0.15432897000000001),
(1.1589229999999999, 0.53532813999999995),
(0.31364978999999998, 0.44463454000000002)])],
3: [('S',
[(16.119575000000001, 0.15432897000000001),
(2.9362007000000001, 0.53532813999999995),
(0.79465050000000004, 0.44463454000000002)]),
('S',
[(0.63628969999999996, -0.099967230000000004),
(0.14786009999999999, 0.39951282999999999),
(0.048088699999999998, 0.70011546999999996)]),
('P',
[(0.63628969999999996, 0.15591627),
(0.14786009999999999, 0.60768372000000004),
(0.048088699999999998, 0.39195739000000002)])],
4: [('S',
[(30.167871000000002, 0.15432897000000001),
(5.4951153000000001, 0.53532813999999995),
(1.4871927, 0.44463454000000002)]),
('S',
[(1.3148331, -0.099967230000000004),
(0.3055389, 0.39951282999999999),
(0.099370700000000006, 0.70011546999999996)]),
('P',
[(1.3148331, 0.15591627),
(0.3055389, 0.60768372000000004),
(0.099370700000000006, 0.39195739000000002)])],
5: [('S',
[(48.791113000000003, 0.15432897000000001),
(8.8873622000000001, 0.53532813999999995),
(2.4052669999999998, 0.44463454000000002)]),
('S',
[(2.2369561, -0.099967230000000004),
(0.51982050000000002, 0.39951282999999999),
(0.16906180000000001, 0.70011546999999996)]),
('P',
[(2.2369561, 0.15591627),
(0.51982050000000002, 0.60768372000000004),
(0.16906180000000001, 0.39195739000000002)])],
6: [('S',
[(71.616837000000004, 0.15432897000000001),
(13.045095999999999, 0.53532813999999995),
(3.5305122, 0.44463454000000002)]),
('S',
[(2.9412493999999998, -0.099967230000000004),
(0.68348310000000001, 0.39951282999999999),
(0.22228990000000001, 0.70011546999999996)]),
('P',
[(2.9412493999999998, 0.15591627),
(0.68348310000000001, 0.60768372000000004),
(0.22228990000000001, 0.39195739000000002)])],
7: [('S',
[(99.106168999999994, 0.15432897000000001),
(18.052312000000001, 0.53532813999999995),
(4.8856602000000002, 0.44463454000000002)]),
('S',
[(3.7804559000000002, -0.099967230000000004),
(0.87849659999999996, 0.39951282999999999),
(0.28571439999999998, 0.70011546999999996)]),
('P',
[(3.7804559000000002, 0.15591627),
(0.87849659999999996, 0.60768372000000004),
(0.28571439999999998, 0.39195739000000002)])],
8: [('S',
[(130.70931999999999, 0.15432897000000001),
(23.808861, 0.53532813999999995),
(6.4436083000000002, 0.44463454000000002)]),
('S',
[(5.0331513000000001, -0.099967230000000004),
(1.1695960999999999, 0.39951282999999999),
(0.38038899999999998, 0.70011546999999996)]),
('P',
[(5.0331513000000001, 0.15591627),
(1.1695960999999999, 0.60768372000000004),
(0.38038899999999998, 0.39195739000000002)])],
9: [('S',
[(166.67912999999999, 0.15432897000000001),
(30.360811999999999, 0.53532813999999995),
(8.2168206999999995, 0.44463454000000002)]),
('S',
[(6.4648032000000004, -0.099967230000000004),
(1.5022812000000001, 0.39951282999999999),
(0.48858849999999998, 0.70011546999999996)]),
('P',
[(6.4648032000000004, 0.15591627),
(1.5022812000000001, 0.60768372000000004),
(0.48858849999999998, 0.39195739000000002)])],
10: [('S',
[(207.01561000000001, 0.15432897000000001),
(37.708151000000001, 0.53532813999999995),
(10.205297, 0.44463454000000002)]),
('S',
[(8.2463151000000003, -0.099967230000000004),
(1.9162661999999999, 0.39951282999999999),
(0.62322929999999999, 0.70011546999999996)]),
('P',
[(8.2463151000000003, 0.15591627),
(1.9162661999999999, 0.60768372000000004),
(0.62322929999999999, 0.39195739000000002)])],
11: [('S',
[(250.77243000000001, 0.15432896730000001),
(45.678511, 0.53532814230000003),
(12.362387999999999, 0.44463454219999998)]),
('S',
[(12.040193, -0.099967229190000007),
(2.7978819000000001, 0.3995128261),
(0.90995800000000004, 0.70011546889999998)]),
('P',
[(12.040193, 0.15591627499999999),
(2.7978819000000001, 0.60768371860000003),
(0.90995800000000004, 0.3919573931)]),
('S',
[(1.4787406000000001, -0.21962036900000001),
(0.41256490000000001, 0.22559543360000001),
(0.16147510000000001, 0.90039842599999997)]),
('P',
[(1.4787406000000001, 0.01058760429),
(0.41256490000000001, 0.5951670053),
(0.16147510000000001, 0.46200101199999999)])],
12: [('S',
[(299.23739999999998, 0.15432896730000001),
(54.50647, 0.53532814230000003),
(14.751580000000001, 0.44463454219999998)]),
('S',
[(15.12182, -0.099967229190000007),
(3.5139870000000002, 0.3995128261),
(1.142857, 0.70011546889999998)]),
('P',
[(15.12182, 0.15591627499999999),
(3.5139870000000002, 0.60768371860000003),
(1.142857, 0.3919573931)]),
('S',
[(1.395448, -0.21962036900000001),
(0.38932600000000001, 0.22559543360000001),
(0.15237999999999999, 0.90039842599999997)]),
('P',
[(1.395448, 0.01058760429),
(0.38932600000000001, 0.5951670053),
(0.15237999999999999, 0.46200101199999999)])],
13: [('S',
[(351.42147670000003, 0.15432896730000001),
(64.011860670000004, 0.53532814230000003),
(17.324107609999999, 0.44463454219999998)]),
('S',
[(18.899396209999999, -0.099967229190000007),
(4.3918132329999997, 0.3995128261),
(1.4283539700000001, 0.70011546889999998)]),
('P',
[(18.899396209999999, 0.15591627499999999),
(4.3918132329999997, 0.60768371860000003),
(1.4283539700000001, 0.3919573931)]),
('S',
[(1.3954482930000001, -0.21962036900000001),
(0.38932653179999999, 0.22559543360000001),
(0.15237976589999999, 0.90039842599999997)]),
('P',
[(1.3954482930000001, 0.01058760429),
(0.38932653179999999, 0.5951670053),
(0.15237976589999999, 0.46200101199999999)])],
14: [('S',
[(407.79755139999997, 0.15432896730000001),
(74.280833049999998, 0.53532814230000003),
(20.103292289999999, 0.44463454219999998)]),
('S',
[(23.193656059999999, -0.099967229190000007),
(5.3897068709999996, 0.3995128261),
(1.7528999519999999, 0.70011546889999998)]),
('P',
[(23.193656059999999, 0.15591627499999999),
(5.3897068709999996, 0.60768371860000003),
(1.7528999519999999, 0.3919573931)]),
('S',
[(1.4787406219999999, -0.21962036900000001),
(0.41256488009999998, 0.22559543360000001),
(0.1614750979, 0.90039842599999997)]),
('P',
[(1.4787406219999999, 0.01058760429),
(0.41256488009999998, 0.5951670053),
(0.1614750979, 0.46200101199999999)])],
15: [('S',
[(468.3656378, 0.15432896730000001),
(85.313385589999996, 0.53532814230000003),
(23.089131559999998, 0.44463454219999998)]),
('S',
[(28.032639580000001, -0.099967229190000007),
(6.5141825769999997, 0.3995128261),
(2.1186143519999998, 0.70011546889999998)]),
('P',
[(28.032639580000001, 0.15591627499999999),
(6.5141825769999997, 0.60768371860000003),
(2.1186143519999998, 0.3919573931)]),
('S',
[(1.7431032310000001, -0.21962036900000001),
(0.48632137710000001, 0.22559543360000001),
(0.19034289090000001, 0.90039842599999997)]),
('P',
[(1.7431032310000001, 0.01058760429),
(0.48632137710000001, 0.5951670053),
(0.19034289090000001, 0.46200101199999999)])],
16: [('S',
[(533.1257359, 0.15432896730000001),
(97.109518300000005, 0.53532814230000003),
(26.281625420000001, 0.44463454219999998)]),
('S',
[(33.329751729999998, -0.099967229190000007),
(7.7451175210000001, 0.3995128261),
(2.5189525989999999, 0.70011546889999998)]),
('P',
[(33.329751729999998, 0.15591627499999999),
(7.7451175210000001, 0.60768371860000003),
(2.5189525989999999, 0.3919573931)]),
('S',
[(2.029194274, -0.21962036900000001),
(0.56614005180000004, 0.22559543360000001),
(0.22158337920000001, 0.90039842599999997)]),
('P',
[(2.029194274, 0.01058760429),
(0.56614005180000004, 0.5951670053),
(0.22158337920000001, 0.46200101199999999)])],
17: [('S',
[(601.34561359999998, 0.15432896730000001),
(109.5358542, 0.53532814230000003),
(29.644676860000001, 0.44463454219999998)]),
('S',
[(38.96041889, -0.099967229190000007),
(9.0535634770000009, 0.3995128261),
(2.9444998340000001, 0.70011546889999998)]),
('P',
[(38.96041889, 0.15591627499999999),
(9.0535634770000009, 0.60768371860000003),
(2.9444998340000001, 0.3919573931)]),
('S',
[(2.1293864949999999, -0.21962036900000001),
(0.59409342740000004, 0.22559543360000001),
(0.23252414099999999, 0.90039842599999997)]),
('P',
[(2.1293864949999999, 0.01058760429),
(0.59409342740000004, 0.5951670053),
(0.23252414099999999, 0.46200101199999999)])],
18: [('S',
[(674.44651839999995, 0.15432896730000001),
(122.8512753, 0.53532814230000003),
(33.248349449999999, 0.44463454219999998)]),
('S',
[(45.164243919999997, -0.099967229190000007),
(10.495199, 0.3995128261),
(3.4133644479999998, 0.70011546889999998)]),
('P',
[(45.164243919999997, 0.15591627499999999),
(10.495199, 0.60768371860000003),
(3.4133644479999998, 0.3919573931)]),
('S',
[(2.6213665179999999, -0.21962036900000001),
(0.73135460500000005, 0.22559543360000001),
(0.28624723559999998, 0.90039842599999997)]),
('P',
[(2.6213665179999999, 0.01058760429),
(0.73135460500000005, 0.5951670053),
(0.28624723559999998, 0.46200101199999999)])],
19: [('S',
[(771.51036810000005, 0.15432896730000001),
(140.53157659999999, 0.53532814230000003),
(38.033328990000001, 0.44463454219999998)]),
('S',
[(52.402039790000003, -0.099967229199999993),
(12.177107100000001, 0.3995128261),
(3.960373165, 0.70011546889999998)]),
('P',
[(52.402039790000003, 0.15591627499999999),
(12.177107100000001, 0.60768371860000003),
(3.960373165, 0.3919573931)]),
('S',
[(3.6515839849999998, -0.21962036900000001),
(1.0187826630000001, 0.22559543360000001),
(0.3987446295, 0.90039842599999997)]),
('P',
[(3.6515839849999998, 0.010587604299999999),
(1.0187826630000001, 0.5951670053),
(0.3987446295, 0.46200101199999999)]),
('S',
[(0.50398225050000001, -0.30884412150000001),
(0.18600114649999999, 0.0196064117),
(0.082140067430000005, 1.131034442)]),
('P',
[(0.50398225050000001, -0.12154686000000001),
(0.18600114649999999, 0.57152276040000005),
(0.082140067430000005, 0.54989494709999998)])],
20: [('S',
[(854.03249510000001, 0.15432896730000001),
(155.5630851, 0.53532814230000003),
(42.101441790000003, 0.44463454219999998)]),
('S',
[(59.560299440000001, -0.099967229199999993),
(13.840532700000001, 0.3995128261),
(4.5013707969999999, 0.70011546889999998)]),
('P',
[(59.560299440000001, 0.15591627499999999),
(13.840532700000001, 0.60768371860000003),
(4.5013707969999999, 0.3919573931)]),
('S',
[(4.3747062559999996, -0.21962036900000001),
(1.220531941, 0.22559543360000001),
(0.47770793, 0.90039842599999997)]),
('P',
[(4.3747062559999996, 0.010587604299999999),
(1.220531941, 0.5951670053),
(0.47770793, 0.46200101199999999)]),
('S',
[(0.45584897569999999, -0.30884412150000001),
(0.168236941, 0.0196064117),
(0.074295207000000002, 1.131034442)]),
('P',
[(0.45584897569999999, -0.12154686000000001),
(0.168236941, 0.57152276040000005),
(0.074295207000000002, 0.54989494709999998)])],
21: [('S',
[(941.66242499999998, 0.15432896730000001),
(171.5249862, 0.53532814230000003),
(46.421355159999997, 0.44463454219999998)]),
('S',
[(67.176687709999996, -0.099967229199999993),
(15.61041754, 0.3995128261),
(5.0769922779999996, 0.70011546889999998)]),
('P',
[(67.176687709999996, 0.15591627499999999),
(15.61041754, 0.60768371860000003),
(5.0769922779999996, 0.3919573931)]),
('S',
[(4.698159231, -0.2277635023),
(1.4330883130000001, 0.21754360440000001),
(0.55293002400000002, 0.91667696109999997)]),
('P',
[(4.698159231, 0.0049515111999999997),
(1.4330883130000001, 0.57776646909999996),
(0.55293002400000002, 0.4846460366)]),
('D',
[(0.55170006790000004, 0.2197679508),
(0.16828610550000001, 0.65554736270000002),
(0.064930011199999998, 0.28657325900000002)]),
('S',
[(0.63093283840000003, -0.30884412150000001),
(0.2328538976, 0.0196064117),
(0.1028307363, 1.131034442)]),
('P',
[(0.63093283840000003, -0.12154686000000001),
(0.2328538976, 0.57152276040000005),
(0.1028307363, 0.54989494709999998)])],
22: [('S',
[(1033.5712450000001, 0.15432896730000001),
(188.26629260000001, 0.53532814230000003),
(50.952206009999998, 0.44463454219999998)]),
('S',
[(75.251204599999994, -0.099967229199999993),
(17.486761619999999, 0.3995128261),
(5.6872376060000001, 0.70011546889999998)]),
('P',
[(75.251204599999994, 0.15591627499999999),
(17.486761619999999, 0.60768371860000003),
(5.6872376060000001, 0.3919573931)]),
('S',
[(5.3955354739999999, -0.2277635023),
(1.6458102960000001, 0.21754360440000001),
(0.63500477700000002, 0.91667696109999997)]),
('P',
[(5.3955354739999999, 0.0049515111999999997),
(1.6458102960000001, 0.57776646909999996),
(0.63500477700000002, 0.4846460366)]),
('D',
[(1.645981194, 0.2197679508),
(0.50207672800000003, 0.65554736270000002),
(0.19371680999999999, 0.28657325900000002)]),
('S',
[(0.71226402460000005, -0.30884412150000001),
(0.26287022030000001, 0.0196064117),
(0.1160862609, 1.131034442)]),
('P',
[(0.71226402460000005, -0.12154686000000001),
(0.26287022030000001, 0.57152276040000005),
(0.1160862609, 0.54989494709999998)])],
23: [('S',
[(1130.7625169999999, 0.15432896730000001),
(205.9698041, 0.53532814230000003),
(55.743467109999997, 0.44463454219999998)]),
('S',
[(83.783850110000003, -0.099967229199999993),
(19.469564930000001, 0.3995128261),
(6.3321067839999996, 0.70011546889999998)]),
('P',
[(83.783850110000003, 0.15591627499999999),
(19.469564930000001, 0.60768371860000003),
(6.3321067839999996, 0.3919573931)]),
('S',
[(6.1411512760000004, -0.2277635023),
(1.873246881, 0.21754360440000001),
(0.72275688250000003, 0.91667696109999997)]),
('P',
[(6.1411512760000004, 0.0049515111999999997),
(1.873246881, 0.57776646909999996),
(0.72275688250000003, 0.4846460366)]),
('D',
[(2.9648179269999999, 0.2197679508),
(0.90436396760000004, 0.65554736270000002),
(0.34893173370000002, 0.28657325900000002)]),
('S',
[(0.71226402460000005, -0.30884412150000001),
(0.26287022030000001, 0.0196064117),
(0.1160862609, 1.131034442)]),
('P',
[(0.71226402460000005, -0.12154686000000001),
(0.26287022030000001, 0.57152276040000005),
(0.1160862609, 0.54989494709999998)])],
24: [('S',
[(1232.3204499999999, 0.15432896730000001),
(224.46870820000001, 0.53532814230000003),
(60.749992509999998, 0.44463454219999998)]),
('S',
[(92.774624230000001, -0.099967229199999993),
(21.558827489999999, 0.3995128261),
(7.0115998099999999, 0.70011546889999998)]),
('P',
[(92.774624230000001, 0.15591627499999999),
(21.558827489999999, 0.60768371860000003),
(7.0115998099999999, 0.3919573931)]),
('S',
[(6.8994880959999998, -0.2277635023),
(2.104563782, 0.21754360440000001),
(0.81200613430000002, 0.91667696109999997)]),
('P',
[(6.8994880959999998, 0.0049515111999999997),
(2.104563782, 0.57776646909999996),
(0.81200613430000002, 0.4846460366)]),
('D',
[(4.2414792410000004, 0.2197679508),
(1.2937863599999999, 0.65554736270000002),
(0.49918299929999999, 0.28657325900000002)]),
('S',
[(0.75477805369999995, -0.30884412150000001),
(0.27856057080000002, 0.0196064117),
(0.1230152851, 1.131034442)]),
('P',
[(0.75477805369999995, -0.12154686000000001),
(0.27856057080000002, 0.57152276040000005),
(0.1230152851, 0.54989494709999998)])],
25: [('S',
[(1337.153266, 0.15432896730000001),
(243.56413649999999, 0.53532814230000003),
(65.917960620000002, 0.44463454219999998)]),
('S',
[(102.02200209999999, -0.099967229199999993),
(23.707719229999999, 0.3995128261),
(7.7104860979999996, 0.70011546889999998)]),
('P',
[(102.02200209999999, 0.15591627499999999),
(23.707719229999999, 0.60768371860000003),
(7.7104860979999996, 0.3919573931)]),
('S',
[(7.7019609219999996, -0.2277635023),
(2.349343572, 0.21754360440000001),
(0.90644978700000001, 0.91667696109999997)]),
('P',
[(7.7019609219999996, 0.0049515111999999997),
(2.349343572, 0.57776646909999996),
(0.90644978700000001, 0.4846460366)]),
('D',
[(5.4269504609999997, 0.2197679508),
(1.6553928680000001, 0.65554736270000002),
(0.63870203199999998, 0.28657325900000002)]),
('S',
[(0.67098228599999998, -0.30884412150000001),
(0.247634663, 0.0196064117),
(0.109358078, 1.131034442)]),
('P',
[(0.67098228599999998, -0.12154686000000001),
(0.247634663, 0.57152276040000005),
(0.109358078, 0.54989494709999998)])],
26: [('S',
[(1447.4004110000001, 0.15432896730000001),
(263.6457916, 0.53532814230000003),
(71.352840189999995, 0.44463454219999998)]),
('S',
[(111.91948910000001, -0.099967229199999993),
(26.00768236, 0.3995128261),
(8.4585054900000003, 0.70011546889999998)]),
('P',
[(111.91948910000001, 0.15591627499999999),
(26.00768236, 0.60768371860000003),
(8.4585054900000003, 0.3919573931)]),
('S',
[(8.5485697540000007, -0.2277635023),
(2.6075862500000002, 0.21754360440000001),
(1.00608784, 0.91667696109999997)]),
('P',
[(8.5485697540000007, 0.0049515111999999997),
(2.6075862500000002, 0.57776646909999996),
(1.00608784, 0.4846460366)]),
('D',
[(6.4118034750000001, 0.2197679508),
(1.955804428, 0.65554736270000002),
(0.75461015099999995, 0.28657325900000002)]),
('S',
[(0.59211568140000004, -0.30884412150000001),
(0.21852792539999999, 0.0196064117),
(0.096504235899999999, 1.131034442)]),
('P',
[(0.59211568140000004, -0.12154686000000001),
(0.21852792539999999, 0.57152276040000005),
(0.096504235899999999, 0.54989494709999998)])],
27: [('S',
[(1557.298704, 0.15432896730000001),
(283.66390289999998, 0.53532814230000003),
(76.770522339999999, 0.44463454219999998)]),
('S',
[(121.83447409999999, -0.099967229199999993),
(28.311711639999999, 0.3995128261),
(9.2078473209999991, 0.70011546889999998)]),
('P',
[(121.83447409999999, 0.15591627499999999),
(28.311711639999999, 0.60768371860000003),
(9.2078473209999991, 0.3919573931)]),
('S',
[(9.4808516780000005, -0.2277635023),
(2.8919619519999999, 0.21754360440000001),
(1.1158088269999999, 0.91667696109999997)]),
('P',
[(9.4808516780000005, 0.0049515111999999997),
(2.8919619519999999, 0.57776646909999996),
(1.1158088269999999, 0.4846460366)]),
('D',
[(7.6645273889999999, 0.2197679508),
(2.3379251509999999, 0.65554736270000002),
(0.90204420500000004, 0.28657325900000002)]),
('S',
[(0.59211568140000004, -0.30884412150000001),
(0.21852792539999999, 0.0196064117),
(0.096504235899999999, 1.131034442)]),
('P',
[(0.59211568140000004, -0.12154686000000001),
(0.21852792539999999, 0.57152276040000005),
(0.096504235899999999, 0.54989494709999998)])],
28: [('S',
[(1679.7710279999999, 0.15432896730000001),
(305.97238959999999, 0.53532814230000003),
(82.808069430000003, 0.44463454219999998)]),
('S',
[(132.85888990000001, -0.099967229199999993),
(30.87354878, 0.3995128261),
(10.041036269999999, 0.70011546889999998)]),
('P',
[(132.85888990000001, 0.15591627499999999),
(30.87354878, 0.60768371860000003),
(10.041036269999999, 0.3919573931)]),
('S',
[(10.330743350000001, -0.2277635023),
(3.151206003, 0.21754360440000001),
(1.2158332409999999, 0.91667696109999997)]),
('P',
[(10.330743350000001, 0.0049515111999999997),
(3.151206003, 0.57776646909999996),
(1.2158332409999999, 0.4846460366)]),
('D',
[(8.6277227550000006, 0.2197679508),
(2.6317304379999999, 0.65554736270000002),
(1.0154034190000001, 0.28657325900000002)]),
('S',
[(0.63093283840000003, -0.30884412150000001),
(0.2328538976, 0.0196064117),
(0.1028307363, 1.131034442)]),
('P',
[(0.63093283840000003, -0.12154686000000001),
(0.2328538976, 0.57152276040000005),
(0.1028307363, 0.54989494709999998)])],
29: [('S',
[(1801.80673, 0.15432896730000001),
(328.201345, 0.53532814230000003),
(88.824092280000002, 0.44463454219999998)]),
('S',
[(144.1212184, -0.099967229199999993),
(33.490671730000003, 0.3995128261),
(10.892205880000001, 0.70011546889999998)]),
('P',
[(144.1212184, 0.15591627499999999),
(33.490671730000003, 0.60768371860000003),
(10.892205880000001, 0.3919573931)]),
('S',
[(11.307754020000001, -0.2277635023),
(3.4492253970000002, 0.21754360440000001),
(1.330818388, 0.91667696109999997)]),
('P',
[(11.307754020000001, 0.0049515111999999997),
(3.4492253970000002, 0.57776646909999996),
(1.330818388, 0.4846460366)]),
('D',
[(9.6479119299999994, 0.2197679508),
(2.9429206539999999, 0.65554736270000002),
(1.1354702780000001, 0.28657325900000002)]),
('S',
[(0.63093283840000003, -0.30884412150000001),
(0.2328538976, 0.0196064117),
(0.1028307363, 1.131034442)]),
('P',
[(0.63093283840000003, -0.12154686000000001),
(0.2328538976, 0.57152276040000005),
(0.1028307363, 0.54989494709999998)])],
30: [('S',
[(1929.4323010000001, 0.15432896730000001),
(351.44850209999998, 0.53532814230000003),
(95.115680209999994, 0.44463454219999998)]),
('S',
[(155.84167550000001, -0.099967229199999993),
(36.214253909999996, 0.3995128261),
(11.777999339999999, 0.70011546889999998)]),
('P',
[(155.84167550000001, 0.15591627499999999),
(36.214253909999996, 0.60768371860000003),
(11.777999339999999, 0.3919573931)]),
('S',
[(12.28152744, -0.2277635023),
(3.7462573269999999, 0.21754360440000001),
(1.4454225409999999, 0.91667696109999997)]),
('P',
[(12.28152744, 0.0049515111999999997),
(3.7462573269999999, 0.57776646909999996),
(1.4454225409999999, 0.4846460366)]),
('D',
[(10.947370769999999, 0.2197679508),
(3.3392970179999999, 0.65554736270000002),
(1.288404602, 0.28657325900000002)]),
('S',
[(0.88971388539999996, -0.30884412150000001),
(0.32836037899999998, 0.0196064117),
(0.14500740549999999, 1.131034442)]),
('P',
[(0.88971388539999996, -0.12154686000000001),
(0.32836037899999998, 0.57152276040000005),
(0.14500740549999999, 0.54989494709999998)])],
31: [('S',
[(2061.424532, 0.15432896730000001),
(375.49105170000001, 0.53532814230000003),
(101.6225324, 0.44463454219999998)]),
('S',
[(167.76186799999999, -0.099967229199999993),
(38.984250279999998, 0.3995128261),
(12.678888130000001, 0.70011546889999998)]),
('P',
[(167.76186799999999, 0.15591627499999999),
(38.984250279999998, 0.60768371860000003),
(12.678888130000001, 0.3919573931)]),
('S',
[(12.6150552, -0.2277635023),
(3.8479939270000001, 0.21754360440000001),
(1.4846756839999999, 0.91667696109999997)]),
('P',
[(12.6150552, 0.0049515111999999997),
(3.8479939270000001, 0.57776646909999996),
(1.4846756839999999, 0.4846460366)]),
('D',
[(12.6150552, 0.2197679508),
(3.8479939270000001, 0.65554736270000002),
(1.4846756839999999, 0.28657325900000002)]),
('S',
[(0.79852437359999995, -0.30884412150000001),
(0.29470571410000002, 0.0196064117),
(0.13014515060000001, 1.131034442)]),
('P',
[(0.79852437359999995, -0.12154686000000001),
(0.29470571410000002, 0.57152276040000005),
(0.13014515060000001, 0.54989494709999998)])],
32: [('S',
[(2196.3842289999998, 0.15432896730000001),
(400.07412920000002, 0.53532814230000003),
(108.27567259999999, 0.44463454219999998)]),
('S',
[(180.389038, -0.099967229199999993),
(41.91853304, 0.3995128261),
(13.633207949999999, 0.70011546889999998)]),
('P',
[(180.389038, 0.15591627499999999),
(41.91853304, 0.60768371860000003),
(13.633207949999999, 0.3919573931)]),
('S',
[(14.196656190000001, -0.2277635023),
(4.3304326399999997, 0.21754360440000001),
(1.670815538, 0.91667696109999997)]),
('P',
[(14.196656190000001, 0.0049515111999999997),
(4.3304326399999997, 0.57776646909999996),
(1.670815538, 0.4846460366)]),
('D',
[(14.196656190000001, 0.2197679508),
(4.3304326399999997, 0.65554736270000002),
(1.670815538, 0.28657325900000002)]),
('S',
[(0.98583255999999997, -0.30884412150000001),
(0.36383421500000002, 0.0196064117),
(0.1606730254, 1.131034442)]),
('P',
[(0.98583255999999997, -0.12154686000000001),
(0.36383421500000002, 0.57152276040000005),
(0.1606730254, 0.54989494709999998)])],
33: [('S',
[(2337.0656730000001, 0.15432896730000001),
(425.69942980000002, 0.53532814230000003),
(115.21087900000001, 0.44463454219999998)]),
('S',
[(193.19705350000001, -0.099967229199999993),
(44.8948404, 0.3995128261),
(14.601195479999999, 0.70011546889999998)]),
('P',
[(193.19705350000001, 0.15591627499999999),
(44.8948404, 0.60768371860000003),
(14.601195479999999, 0.3919573931)]),
('S',
[(15.87163584, -0.2277635023),
(4.8413548190000002, 0.21754360440000001),
(1.8679451979999999, 0.91667696109999997)]),
('P',
[(15.87163584, 0.0049515111999999997),
(4.8413548190000002, 0.57776646909999996),
(1.8679451979999999, 0.4846460366)]),
('D',
[(15.87163584, 0.2197679508),
(4.8413548190000002, 0.65554736270000002),
(1.8679451979999999, 0.28657325900000002)]),
('S',
[(1.1076814639999999, -0.30884412150000001),
(0.40880412389999998, 0.0196064117),
(0.1805322114, 1.131034442)]),
('P',
[(1.1076814639999999, -0.12154686000000001),
(0.40880412389999998, 0.57152276040000005),
(0.1805322114, 0.54989494709999998)])],
34: [('S',
[(2480.6268140000002, 0.15432896730000001),
(451.8492708, 0.53532814230000003),
(122.2880464, 0.44463454219999998)]),
('S',
[(206.15787800000001, -0.099967229199999993),
(47.906657269999997, 0.3995128261),
(15.580731800000001, 0.70011546889999998)]),
('P',
[(206.15787800000001, 0.15591627499999999),
(47.906657269999997, 0.60768371860000003),
(15.580731800000001, 0.3919573931)]),
('S',
[(17.639994139999999, -0.2277635023),
(5.3807604649999998, 0.21754360440000001),
(2.0760646660000002, 0.91667696109999997)]),
('P',
[(17.639994139999999, 0.0049515111999999997),
(5.3807604649999998, 0.57776646909999996),
(2.0760646660000002, 0.4846460366)]),
('D',
[(17.639994139999999, 0.2197679508),
(5.3807604649999998, 0.65554736270000002),
(2.0760646660000002, 0.28657325900000002)]),
('S',
[(1.214644297, -0.30884412150000001),
(0.44828013630000002, 0.0196064117),
(0.19796523460000001, 1.131034442)]),
('P',
[(1.214644297, -0.12154686000000001),
(0.44828013630000002, 0.57152276040000005),
(0.19796523460000001, 0.54989494709999998)])],
35: [('S',
[(2629.9974710000001, 0.15432896730000001),
(479.05732239999998, 0.53532814230000003),
(129.65160700000001, 0.44463454219999998)]),
('S',
[(219.8350255, -0.099967229199999993),
(51.084932219999999, 0.3995128261),
(16.61440546, 0.70011546889999998)]),
('P',
[(219.8350255, 0.15591627499999999),
(51.084932219999999, 0.60768371860000003),
(16.61440546, 0.3919573931)]),
('S',
[(19.50173109, -0.2277635023),
(5.9486495770000003, 0.21754360440000001),
(2.2951739400000002, 0.91667696109999997)]),
('P',
[(19.50173109, 0.0049515111999999997),
(5.9486495770000003, 0.57776646909999996),
(2.2951739400000002, 0.4846460366)]),
('D',
[(19.50173109, 0.2197679508),
(5.9486495770000003, 0.65554736270000002),
(2.2951739400000002, 0.28657325900000002)]),
('S',
[(1.3960374879999999, -0.30884412150000001),
(0.51522563180000003, 0.0196064117),
(0.2275290713, 1.131034442)]),
('P',
[(1.3960374879999999, -0.12154686000000001),
(0.51522563180000003, 0.57152276040000005),
(0.2275290713, 0.54989494709999998)])],
36: [('S',
[(2782.1600549999998, 0.15432896730000001),
(506.77392700000001, 0.53532814230000003),
(137.15280189999999, 0.44463454219999998)]),
('S',
[(233.95141179999999, -0.099967229199999993),
(54.365276809999997, 0.3995128261),
(17.681275329999998, 0.70011546889999998)]),
('P',
[(233.95141179999999, 0.15591627499999999),
(54.365276809999997, 0.60768371860000003),
(17.681275329999998, 0.3919573931)]),
('S',
[(21.456846710000001, -0.2277635023),
(6.5450221559999999, 0.21754360440000001),
(2.5252730209999998, 0.91667696109999997)]),
('P',
[(21.456846710000001, 0.0049515111999999997),
(6.5450221559999999, 0.57776646909999996),
(2.5252730209999998, 0.4846460366)]),
('D',
[(21.456846710000001, 0.2197679508),
(6.5450221559999999, 0.65554736270000002),
(2.5252730209999998, 0.28657325900000002)]),
('S',
[(1.5900493360000001, -0.30884412150000001),
(0.58682820530000002, 0.0196064117),
(0.25914952270000002, 1.131034442)]),
('P',
[(1.5900493360000001, -0.12154686000000001),
(0.58682820530000002, 0.57152276040000005),
(0.25914952270000002, 0.54989494709999998)])],
37: [('S',
[(2938.601529, 0.15432896730000001),
(535.26993700000003, 0.53532814230000003),
(144.86493400000001, 0.44463454219999998)]),
('S',
[(248.507037, -0.099967229199999993),
(57.747691000000003, 0.3995128261),
(18.781341000000001, 0.70011546889999998)]),
('P',
[(248.507037, 0.15591627499999999),
(57.747691000000003, 0.60768371860000003),
(18.781341000000001, 0.3919573931)]),
('S',
[(23.505340969999999, -0.2277635023),
(7.1698782010000004, 0.21754360440000001),
(2.766361909, 0.91667696109999997)]),
('P',
[(23.505340969999999, 0.0049515111000000001),
(7.1698782010000004, 0.57776646909999996),
(2.766361909, 0.4846460366)]),
('D',
[(23.505340969999999, 0.2197679508),
(7.1698782010000004, 0.65554736270000002),
(2.766361909, 0.28657325900000002)]),
('S',
[(2.24779682, -0.30884412150000001),
(0.82957839300000003, 0.0196064117),
(0.36635056500000002, 1.131034442)]),
('P',
[(2.24779682, -0.12154686000000001),
(0.82957839300000003, 0.57152276040000005),
(0.36635056500000002, 0.54989494709999998)]),
('S',
[(0.48699399189999998, -0.38426426070000003),
(0.26221615650000002, -0.1972567438),
(0.1158254875, 1.3754955120000001)]),
('P',
[(0.48699399189999998, -0.34816915259999998),
(0.26221615650000002, 0.62903236900000004),
(0.1158254875, 0.66628327430000001)])],
38: [('S',
[(3100.9839510000002, 0.15432896730000001),
(564.84809780000001, 0.53532814230000003),
(152.86993889999999, 0.44463454219999998)]),
('S',
[(263.50190070000002, -0.099967229199999993),
(61.232174929999999, 0.3995128261),
(19.914603719999999, 0.70011546889999998)]),
('P',
[(263.50190070000002, 0.15591627499999999),
(61.232174929999999, 0.60768371860000003),
(19.914603719999999, 0.3919573931)]),
('S',
[(25.578866919999999, -0.2277635023),
(7.8023697070000004, 0.21754360440000001),
(3.010396794, 0.91667696109999997)]),
('P',
[(25.578866919999999, 0.0049515111000000001),
(7.8023697070000004, 0.57776646909999996),
(3.010396794, 0.4846460366)]),
('D',
[(25.578866919999999, 0.2197679508),
(7.8023697070000004, 0.65554736270000002),
(3.010396794, 0.28657325900000002)]),
('S',
[(2.4610324029999999, -0.30884412150000001),
(0.90827573399999995, 0.0196064117),
(0.40110414, 1.131034442)]),
('P',
[(2.4610324029999999, -0.12154686000000001),
(0.90827573399999995, 0.57152276040000005),
(0.40110414, 0.54989494709999998)]),
('S',
[(0.43708048030000002, -0.38426426070000003),
(0.23534081640000001, -0.1972567438),
(0.1039541771, 1.3754955120000001)]),
('P',
[(0.43708048030000002, -0.34816915259999998),
(0.23534081640000001, 0.62903236900000004),
(0.1039541771, 0.66628327430000001)])],
39: [('S',
[(3266.0268689999998, 0.15432896730000001),
(594.91087100000004, 0.53532814230000003),
(161.00609900000001, 0.44463454219999998)]),
('S',
[(277.937724, -0.099967229199999993),
(64.586749999999995, 0.3995128261),
(21.005616, 0.70011546889999998)]),
('P',
[(277.937724, 0.15591627499999999),
(64.586749999999995, 0.60768371860000003),
(21.005616, 0.3919573931)]),
('S',
[(30.671326000000001, -0.2277635023),
(8.5572219999999994, 0.21754360440000001),
(3.3492389999999999, 0.91667696109999997)]),
('P',
[(30.671326000000001, 0.0049515111000000001),
(8.5572219999999994, 0.57776646909999996),
(3.3492389999999999, 0.4846460366)]),
('D',
[(5.6600429999999999, 0.2197679508),
(1.774715, 0.65554736270000002),
(0.69129499999999999, 0.28657325900000002)]),
('S',
[(2.6676880000000001, -0.33061006259999998),
(0.98454399999999997, 0.057610953299999997),
(0.43478499999999998, 1.15578745)]),
('P',
[(2.6676880000000001, -0.1283927634),
(0.98454399999999997, 0.5852047641),
(0.43478499999999998, 0.54394420399999999)]),
('D',
[(2.128212, 0.12506621379999999),
(0.96259399999999995, 0.66867855769999995),
(0.47286099999999998, 0.30524682450000001)]),
('S',
[(0.207424, -0.38426426070000003),
(0.11168500000000001, -0.1972567438),
(0.049333000000000002, 1.3754955120000001)]),
('P',
[(0.207424, -0.34816915259999998),
(0.11168500000000001, 0.62903236900000004),
(0.049333000000000002, 0.66628327430000001)])],
40: [('S',
[(3435.348677, 0.15432896730000001),
(625.75304979999999, 0.53532814230000003),
(169.35319580000001, 0.44463454219999998)]),
('S',
[(293.78302919999999, -0.099967229199999993),
(68.268857969999999, 0.3995128261),
(22.203151439999999, 0.70011546889999998)]),
('P',
[(293.78302919999999, 0.15591627499999999),
(68.268857969999999, 0.60768371860000003),
(22.203151439999999, 0.3919573931)]),
('S',
[(30.73293103, -0.2277635023),
(9.374523538, 0.21754360440000001),
(3.6169826180000002, 0.91667696109999997)]),
('P',
[(30.73293103, 0.0049515111000000001),
(9.374523538, 0.57776646909999996),
(3.6169826180000002, 0.4846460366)]),
('D',
[(30.73293103, 0.2197679508),
(9.374523538, 0.65554736270000002),
(3.6169826180000002, 0.28657325900000002)]),
('S',
[(2.8276078149999999, -0.33061006259999998),
(1.1010558269999999, 0.057610953299999997),
(0.48468748560000002, 1.15578745)]),
('P',
[(2.8276078149999999, -0.1283927634),
(1.1010558269999999, 0.5852047641),
(0.48468748560000002, 0.54394420399999999)]),
('D',
[(0.48699399189999998, 0.12506621379999999),
(0.26221615650000002, 0.66867855769999995),
(0.1158254875, 0.30524682450000001)]),
('S',
[(0.88783018869999997, -0.38426426070000003),
(0.34571647360000002, -0.1972567438),
(0.15218524280000001, 1.3754955120000001)]),
('P',
[(0.88783018869999997, -0.34816915259999998),
(0.34571647360000002, 0.62903236900000004),
(0.15218524280000001, 0.66628327430000001)])],
41: [('S',
[(3610.7428639999998, 0.15432896730000001),
(657.70132009999998, 0.53532814230000003),
(177.99964449999999, 0.44463454219999998)]),
('S',
[(310.06757279999999, -0.099967229199999993),
(72.053035690000002, 0.3995128261),
(23.433883479999999, 0.70011546889999998)]),
('P',
[(310.06757279999999, 0.15591627499999999),
(72.053035690000002, 0.60768371860000003),
(23.433883479999999, 0.3919573931)]),
('S',
[(33.01997858, -0.2277635023),
(10.07214594, 0.21754360440000001),
(3.8861470279999999, 0.91667696109999997)]),
('P',
[(33.01997858, 0.0049515111000000001),
(10.07214594, 0.57776646909999996),
(3.8861470279999999, 0.4846460366)]),
('D',
[(33.01997858, 0.2197679508),
(10.07214594, 0.65554736270000002),
(3.8861470279999999, 0.28657325900000002)]),
('S',
[(3.1447984299999998, -0.33061006259999998),
(1.224568208, 0.057610953299999997),
(0.53905793989999995, 1.15578745)]),
('P',
[(3.1447984299999998, -0.1283927634),
(1.224568208, 0.5852047641),
(0.53905793989999995, 0.54394420399999999)]),
('D',
[(1.344878866, 0.12506621379999999),
(0.52368885939999998, 0.66867855769999995),
(0.23052912510000001, 0.30524682450000001)]),
('S',
[(0.48699399189999998, -0.38426426070000003),
(0.26221615650000002, -0.1972567438),
(0.1158254875, 1.3754955120000001)]),
('P',
[(0.48699399189999998, -0.34816915259999998),
(0.26221615650000002, 0.62903236900000004),
(0.1158254875, 0.66628327430000001)])],
42: [('S',
[(3788.666115, 0.15432896730000001),
(690.11026230000004, 0.53532814230000003),
(186.7707691, 0.44463454219999998)]),
('S',
[(326.43095670000002, -0.099967229199999993),
(75.855534199999994, 0.3995128261),
(24.670574009999999, 0.70011546889999998)]),
('P',
[(326.43095670000002, 0.15591627499999999),
(75.855534199999994, 0.60768371860000003),
(24.670574009999999, 0.3919573931)]),
('S',
[(35.469481289999997, -0.2277635023),
(10.819322339999999, 0.21754360440000001),
(4.174430912, 0.91667696109999997)]),
('P',
[(35.469481289999997, 0.0049515111000000001),
(10.819322339999999, 0.57776646909999996),
(4.174430912, 0.4846460366)]),
('D',
[(35.469481289999997, 0.2197679508),
(10.819322339999999, 0.65554736270000002),
(4.174430912, 0.28657325900000002)]),
('S',
[(3.4968951879999999, -0.33061006259999998),
(1.361672861, 0.057610953299999997),
(0.59941174559999999, 1.15578745)]),
('P',
[(3.4968951879999999, -0.1283927634),
(1.361672861, 0.5852047641),
(0.59941174559999999, 0.54394420399999999)]),
('D',
[(1.7021123149999999, 0.12506621379999999),
(0.66279371269999998, 0.66867855769999995),
(0.29176342399999999, 0.30524682450000001)]),
('S',
[(0.51296250809999999, -0.38426426070000003),
(0.27619859699999999, -0.1972567438),
(0.1220017773, 1.3754955120000001)]),
('P',
[(0.51296250809999999, -0.34816915259999998),
(0.27619859699999999, 0.62903236900000004),
(0.1220017773, 0.66628327430000001)])],
43: [('S',
[(3970.8682570000001, 0.15432896730000001),
(723.29860980000001, 0.53532814230000003),
(195.75283110000001, 0.44463454219999998)]),
('S',
[(343.58463230000001, -0.099967229199999993),
(79.84167952, 0.3995128261),
(25.966992189999999, 0.70011546889999998)]),
('P',
[(343.58463230000001, 0.15591627499999999),
(79.84167952, 0.60768371860000003),
(25.966992189999999, 0.3919573931)]),
('S',
[(38.089919829999999, -0.2277635023),
(11.61863962, 0.21754360440000001),
(4.4828323670000003, 0.91667696109999997)]),
('P',
[(38.089919829999999, 0.0049515111000000001),
(11.61863962, 0.57776646909999996),
(4.4828323670000003, 0.4846460366)]),
('D',
[(38.089919829999999, 0.2197679508),
(11.61863962, 0.65554736270000002),
(4.4828323670000003, 0.28657325900000002)]),
('S',
[(3.829752708, -0.33061006259999998),
(1.491285854, 0.057610953299999997),
(0.65646770399999999, 1.15578745)]),
('P',
[(3.829752708, -0.1283927634),
(1.491285854, 0.5852047641),
(0.65646770399999999, 0.54394420399999999)]),
('D',
[(2.1013732279999999, 0.12506621379999999),
(0.81826384279999997, 0.66867855769999995),
(0.36020175799999998, 0.30524682450000001)]),
('S',
[(0.4616999826, -0.38426426070000003),
(0.24859689630000001, -0.1972567438),
(0.1098096207, 1.3754955120000001)]),
('P',
[(0.4616999826, -0.34816915259999998),
(0.24859689630000001, 0.62903236900000004),
(0.1098096207, 0.66628327430000001)])],
44: [('S',
[(4159.2742099999996, 0.15432896730000001),
(757.61698939999997, 0.53532814230000003),
(205.04072389999999, 0.44463454219999998)]),
('S',
[(360.79865610000002, -0.099967229199999993),
(83.841848429999999, 0.3995128261),
(27.26797127, 0.70011546889999998)]),
('P',
[(360.79865610000002, 0.15591627499999999),
(83.841848429999999, 0.60768371860000003),
(27.26797127, 0.3919573931)]),
('S',
[(40.717516779999997, -0.2277635023),
(12.420140440000001, 0.21754360440000001),
(4.7920763019999999, 0.91667696109999997)]),
('P',
[(40.717516779999997, 0.0049515111000000001),
(12.420140440000001, 0.57776646909999996),
(4.7920763019999999, 0.4846460366)]),
('D',
[(40.717516779999997, 0.2197679508),
(12.420140440000001, 0.65554736270000002),
(4.7920763019999999, 0.28657325900000002)]),
('S',
[(4.1975163709999999, -0.33061006259999998),
(1.6344911179999999, 0.057610953299999997),
(0.71950701390000005, 1.15578745)]),
('P',
[(4.1975163709999999, -0.1283927634),
(1.6344911179999999, 0.5852047641),
(0.71950701390000005, 0.54394420399999999)]),
('D',
[(2.3908957609999999, 0.12506621379999999),
(0.93100241669999995, 0.66867855769999995),
(0.40982955580000002, 0.30524682450000001)]),
('S',
[(0.41313548480000001, -0.38426426070000003),
(0.2224479167, -0.1972567438),
(0.098259156599999994, 1.3754955120000001)]),
('P',
[(0.41313548480000001, -0.34816915259999998),
(0.2224479167, 0.62903236900000004),
(0.098259156599999994, 0.66628327430000001)])],
45: [('S',
[(4350.0777939999998, 0.15432896730000001),
(792.37210049999999, 0.53532814230000003),
(214.4468133, 0.44463454219999998)]),
('S',
[(378.43342639999997, -0.099967229199999993),
(87.939789809999994, 0.3995128261),
(28.60074899, 0.70011546889999998)]),
('P',
[(378.43342639999997, 0.15591627499999999),
(87.939789809999994, 0.60768371860000003),
(28.60074899, 0.3919573931)]),
('S',
[(43.521794550000003, -0.2277635023),
(13.275534540000001, 0.21754360440000001),
(5.1221139390000001, 0.91667696109999997)]),
('P',
[(43.521794550000003, 0.0049515111000000001),
(13.275534540000001, 0.57776646909999996),
(5.1221139390000001, 0.4846460366)]),
('D',
[(43.521794550000003, 0.2197679508),
(13.275534540000001, 0.65554736270000002),
(5.1221139390000001, 0.28657325900000002)]),
('S',
[(4.5408574079999999, -0.33061006259999998),
(1.768186338, 0.057610953299999997),
(0.77835997889999997, 1.15578745)]),
('P',
[(4.5408574079999999, -0.1283927634),
(1.768186338, 0.5852047641),
(0.77835997889999997, 0.54394420399999999)]),
('D',
[(2.779066094, 0.12506621379999999),
(1.082153932, 0.66867855769999995),
(0.47636682499999999, 0.30524682450000001)]),
('S',
[(0.41313548480000001, -0.38426426070000003),
(0.2224479167, -0.1972567438),
(0.098259156599999994, 1.3754955120000001)]),
('P',
[(0.41313548480000001, -0.34816915259999998),
(0.2224479167, 0.62903236900000004),
(0.098259156599999994, 0.66628327430000001)])],
46: [('S',
[(4545.160269, 0.15432896730000001),
(827.90661680000005, 0.53532814230000003),
(224.06384019999999, 0.44463454219999998)]),
('S',
[(396.48894330000002, -0.099967229199999993),
(92.135503650000004, 0.3995128261),
(29.965325350000001, 0.70011546889999998)]),
('P',
[(396.48894330000002, 0.15591627499999999),
(92.135503650000004, 0.60768371860000003),
(29.965325350000001, 0.3919573931)]),
('S',
[(46.41945097, -0.2277635023),
(14.15941211, 0.21754360440000001),
(5.463141383, 0.91667696109999997)]),
('P',
[(46.41945097, 0.0049515111000000001),
(14.15941211, 0.57776646909999996),
(5.463141383, 0.4846460366)]),
('D',
[(46.41945097, 0.2197679508),
(14.15941211, 0.65554736270000002),
(5.463141383, 0.28657325900000002)]),
('S',
[(4.9191045889999998, -0.33061006259999998),
(1.91547383, 0.057610953299999997),
(0.84319629539999996, 1.15578745)]),
('P',
[(4.9191045889999998, -0.1283927634),
(1.91547383, 0.5852047641),
(0.84319629539999996, 0.54394420399999999)]),
('D',
[(3.0259774479999999, 0.12506621379999999),
(1.178299934, 0.66867855769999995),
(0.5186905316, 0.30524682450000001)]),
('S',
[(0.43708048030000002, -0.38426426070000003),
(0.23534081640000001, -0.1972567438),
(0.1039541771, 1.3754955120000001)]),
('P',
[(0.43708048030000002, -0.34816915259999998),
(0.23534081640000001, 0.62903236900000004),
(0.1039541771, 0.66628327430000001)])],
47: [('S',
[(4744.5216339999997, 0.15432896730000001),
(864.22053830000004, 0.53532814230000003),
(233.89180450000001, 0.44463454219999998)]),
('S',
[(414.9652069, -0.099967229199999993),
(96.428989950000002, 0.3995128261),
(31.36170035, 0.70011546889999998)]),
('P',
[(414.9652069, 0.15591627499999999),
(96.428989950000002, 0.60768371860000003),
(31.36170035, 0.3919573931)]),
('S',
[(49.410486050000003, -0.2277635023),
(15.071773139999999, 0.21754360440000001),
(5.8151586340000003, 0.91667696109999997)]),
('P',
[(49.410486050000003, 0.0049515111000000001),
(15.071773139999999, 0.57776646909999996),
(5.8151586340000003, 0.4846460366)]),
('D',
[(49.410486050000003, 0.2197679508),
(15.071773139999999, 0.65554736270000002),
(5.8151586340000003, 0.28657325900000002)]),
('S',
[(5.2902304500000001, -0.33061006259999998),
(2.0599883160000001, 0.057610953299999997),
(0.9068119281, 1.15578745)]),
('P',
[(5.2902304500000001, -0.1283927634),
(2.0599883160000001, 0.5852047641),
(0.9068119281, 0.54394420399999999)]),
('D',
[(3.2833956679999998, 0.12506621379999999),
(1.278537254, 0.66867855769999995),
(0.56281524689999995, 0.30524682450000001)]),
('S',
[(0.43708048030000002, -0.38426426070000003),
(0.23534081640000001, -0.1972567438),
(0.1039541771, 1.3754955120000001)]),
('P',
[(0.43708048030000002, -0.34816915259999998),
(0.23534081640000001, 0.62903236900000004),
(0.1039541771, 0.66628327430000001)])],
48: [('S',
[(4950.2619050000003, 0.15432896730000001),
(901.69638559999999, 0.53532814230000003),
(244.03423129999999, 0.44463454219999998)]),
('S',
[(433.44693849999999, -0.099967229199999993),
(100.72374689999999, 0.3995128261),
(32.758488610000001, 0.70011546889999998)]),
('P',
[(433.44693849999999, 0.15591627499999999),
(100.72374689999999, 0.60768371860000003),
(32.758488610000001, 0.3919573931)]),
('S',
[(52.592792350000003, -0.2277635023),
(16.042477999999999, 0.21754360440000001),
(6.1896867440000003, 0.91667696109999997)]),
('P',
[(52.592792350000003, 0.0049515111000000001),
(16.042477999999999, 0.57776646909999996),
(6.1896867440000003, 0.4846460366)]),
('D',
[(52.592792350000003, 0.2197679508),
(16.042477999999999, 0.65554736270000002),
(6.1896867440000003, 0.28657325900000002)]),
('S',
[(5.6748517959999996, -0.33061006259999998),
(2.2097578750000002, 0.057610953299999997),
(0.97274085659999998, 1.15578745)]),
('P',
[(5.6748517959999996, -0.1283927634),
(2.2097578750000002, 0.5852047641),
(0.97274085659999998, 0.54394420399999999)]),
('D',
[(3.6429639759999999, 0.12506621379999999),
(1.4185512899999999, 0.66867855769999995),
(0.62444977000000002, 0.30524682450000001)]),
('S',
[(0.59491509809999998, -0.38426426070000003),
(0.32032500000000003, -0.1972567438),
(0.14149318550000001, 1.3754955120000001)]),
('P',
[(0.59491509809999998, -0.34816915259999998),
(0.32032500000000003, 0.62903236900000004),
(0.14149318550000001, 0.66628327430000001)])],
49: [('S',
[(5158.2247139999999, 0.15432896730000001),
(939.57707070000004, 0.53532814230000003),
(254.2862231, 0.44463454219999998)]),
('S',
[(452.33132230000001, -0.099967229199999993),
(105.11207159999999, 0.3995128261),
(34.185707989999997, 0.70011546889999998)]),
('P',
[(452.33132230000001, 0.15591627499999999),
(105.11207159999999, 0.60768371860000003),
(34.185707989999997, 0.3919573931)]),
('S',
[(55.975397690000001, -0.2277635023),
(17.074280439999999, 0.21754360440000001),
(6.5877882039999998, 0.91667696109999997)]),
('P',
[(55.975397690000001, 0.0049515111999999997),
(17.074280439999999, 0.57776646909999996),
(6.5877882039999998, 0.4846460366)]),
('D',
[(55.975397690000001, 0.2197679508),
(17.074280439999999, 0.65554736270000002),
(6.5877882039999998, 0.28657325900000002)]),
('S',
[(5.0485491800000002, -0.33061006259999998),
(1.9658788819999999, 0.057610953399999998),
(0.86538472369999997, 1.1155787450000001)]),
('P',
[(5.0485491800000002, -0.1283927634),
(1.9658788819999999, 0.5852047641),
(0.86538472369999997, 0.54394420399999999)]),
('D',
[(5.0485491800000002, 0.12506621379999999),
(1.9658788819999999, 0.66867855769999995),
(0.86538472369999997, 0.30524682450000001)]),
('S',
[(0.56692306120000002, -0.38426426070000003),
(0.30525301869999999, -0.1972567438),
(0.13483562639999999, 1.3754955120000001)]),
('P',
[(0.56692306120000002, -0.34816915259999998),
(0.30525301869999999, 0.62903236900000004),
(0.13483562639999999, 0.66628327430000001)])],
50: [('S',
[(5370.4664130000001, 0.15432896730000001),
(978.23716109999998, 0.53532814230000003),
(264.74915220000003, 0.44463454219999998)]),
('S',
[(472.0515322, -0.099967229199999993),
(109.6946243, 0.3995128261),
(35.676096360000003, 0.70011546889999998)]),
('P',
[(472.0515322, 0.15591627499999999),
(109.6946243, 0.60768371860000003),
(35.676096360000003, 0.3919573931)]),
('S',
[(59.151411879999998, -0.2277635023),
(18.043066, 0.21754360440000001),
(6.9615757900000004, 0.91667696109999997)]),
('P',
[(59.151411879999998, 0.0049515111999999997),
(18.043066, 0.57776646909999996),
(6.9615757900000004, 0.4846460366)]),
('D',
[(59.151411879999998, 0.2197679508),
(18.043066, 0.65554736270000002),
(6.9615757900000004, 0.28657325900000002)]),
('S',
[(5.5831385290000002, -0.33061006259999998),
(2.174045204, 0.057610953399999998),
(0.95702005089999997, 1.1155787450000001)]),
('P',
[(5.5831385290000002, -0.1283927634),
(2.174045204, 0.5852047641),
(0.95702005089999997, 0.54394420399999999)]),
('D',
[(5.5831385290000002, 0.12506621379999999),
(2.174045204, 0.66867855769999995),
(0.95702005089999997, 0.30524682450000001)]),
('S',
[(0.62358164199999999, -0.38426426070000003),
(0.3357601616, -0.1972567438),
(0.1483111678, 1.3754955120000001)]),
('P',
[(0.62358164199999999, -0.34816915259999998),
(0.3357601616, 0.62903236900000004),
(0.1483111678, 0.66628327430000001)])],
51: [('S',
[(5586.9870019999998, 0.15432896730000001),
(1017.676657, 0.53532814230000003),
(275.42301889999999, 0.44463454219999998)]),
('S',
[(492.19248879999998, -0.099967229199999993),
(114.37494940000001, 0.3995128261),
(37.198283359999998, 0.70011546889999998)]),
('P',
[(492.19248879999998, 0.15591627499999999),
(114.37494940000001, 0.60768371860000003),
(37.198283359999998, 0.3919573931)]),
('S',
[(62.521797749999998, -0.2277635023),
(19.07114112, 0.21754360440000001),
(7.3582391310000004, 0.91667696109999997)]),
('P',
[(62.521797749999998, 0.0049515111999999997),
(19.07114112, 0.57776646909999996),
(7.3582391310000004, 0.4846460366)]),
('D',
[(62.521797749999998, 0.2197679508),
(19.07114112, 0.65554736270000002),
(7.3582391310000004, 0.28657325900000002)]),
('S',
[(6.1206931490000001, -0.33061006259999998),
(2.383366187, 0.057610953399999998),
(1.0491636630000001, 1.1155787450000001)]),
('P',
[(6.1206931490000001, -0.1283927634),
(2.383366187, 0.5852047641),
(1.0491636630000001, 0.54394420399999999)]),
('D',
[(6.1206931490000001, 0.12506621379999999),
(2.383366187, 0.66867855769999995),
(1.0491636630000001, 0.30524682450000001)]),
('S',
[(0.65292269280000004, -0.38426426070000003),
(0.3515585034, -0.1972567438),
(0.1552895732, 1.3754955120000001)]),
('P',
[(0.65292269280000004, -0.34816915259999998),
(0.3515585034, 0.62903236900000004),
(0.1552895732, 0.66628327430000001)])],
52: [('S',
[(5810.0615909999997, 0.15432896730000001),
(1058.309972, 0.53532814230000003),
(286.4199797, 0.44463454219999998)]),
('S',
[(512.75419199999999, -0.099967229199999993),
(119.15304709999999, 0.3995128261),
(38.752268999999998, 0.70011546889999998)]),
('P',
[(512.75419199999999, 0.15591627499999999),
(119.15304709999999, 0.60768371860000003),
(38.752268999999998, 0.3919573931)]),
('S',
[(65.985562270000003, -0.2277635023),
(20.127699700000001, 0.21754360440000001),
(7.765892279, 0.91667696109999997)]),
('P',
[(65.985562270000003, 0.0049515111999999997),
(20.127699700000001, 0.57776646909999996),
(7.765892279, 0.4846460366)]),
('D',
[(65.985562270000003, 0.2197679508),
(20.127699700000001, 0.65554736270000002),
(7.765892279, 0.28657325900000002)]),
('S',
[(6.7079569210000001, -0.33061006259999998),
(2.6120436549999999, 0.057610953399999998),
(1.149828048, 1.1155787450000001)]),
('P',
[(6.7079569210000001, -0.1283927634),
(2.6120436549999999, 0.5852047641),
(1.149828048, 0.54394420399999999)]),
('D',
[(6.7079569210000001, 0.12506621379999999),
(2.6120436549999999, 0.66867855769999995),
(1.149828048, 0.30524682450000001)]),
('S',
[(0.70127134830000004, -0.38426426070000003),
(0.37759126529999998, -0.1972567438),
(0.16678870200000001, 1.3754955120000001)]),
('P',
[(0.70127134830000004, -0.34816915259999998),
(0.37759126529999998, 0.62903236900000004),
(0.16678870200000001, 0.66628327430000001)])],
53: [('S',
[(6035.1836229999999, 0.15432896730000001),
(1099.316231, 0.53532814230000003),
(297.5178737, 0.44463454219999998)]),
('S',
[(533.73664180000003, -0.099967229199999993),
(124.0289171, 0.3995128261),
(40.338053279999997, 0.70011546889999998)]),
('P',
[(533.73664180000003, 0.15591627499999999),
(124.0289171, 0.60768371860000003),
(40.338053279999997, 0.3919573931)]),
('S',
[(69.54270545, -0.2277635023),
(21.212741749999999, 0.21754360440000001),
(8.1845352340000002, 0.91667696109999997)]),
('P',
[(69.54270545, 0.0049515111999999997),
(21.212741749999999, 0.57776646909999996),
(8.1845352340000002, 0.4846460366)]),
('D',
[(69.54270545, 0.2197679508),
(21.212741749999999, 0.65554736270000002),
(8.1845352340000002, 0.28657325900000002)]),
('S',
[(7.2959911960000001, -0.33061006259999998),
(2.8410211539999999, 0.057610953399999998),
(1.2506245060000001, 1.1155787450000001)]),
('P',
[(7.2959911960000001, -0.1283927634),
(2.8410211539999999, 0.5852047641),
(1.2506245060000001, 0.54394420399999999)]),
('D',
[(7.2959911960000001, 0.12506621379999999),
(2.8410211539999999, 0.66867855769999995),
(1.2506245060000001, 0.30524682450000001)]),
('S',
[(0.79003645820000001, -0.38426426070000003),
(0.42538578919999998, -0.1972567438),
(0.1879003836, 1.3754955120000001)]),
('P',
[(0.79003645820000001, -0.34816915259999998),
(0.42538578919999998, 0.62903236900000004),
(0.1879003836, 0.66628327430000001)])]}
| bsd-3-clause |
medspx/QGIS | python/user.py | 6 | 4679 | # -*- coding: utf-8 -*-
"""
***************************************************************************
user.py
---------------------
Date : January 2015
Copyright : (C) 2015 by Nathan Woodrow
Email : woodrow dot nathan at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Nathan Woodrow'
__date__ = 'January 2015'
__copyright__ = '(C) 2015, Nathan Woodrow'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
import os
import sys
import glob
import traceback
from qgis.PyQt.QtCore import QCoreApplication
from qgis.core import QgsApplication, QgsMessageLog
def load_user_expressions(path):
"""
Load all user expressions from the given paths
"""
#Loop all py files and import them
modules = glob.glob(path + "/*.py")
names = [os.path.basename(f)[:-3] for f in modules]
for name in names:
if name == "__init__":
continue
# As user expression functions should be registered with qgsfunction
# just importing the file is enough to get it to load the functions into QGIS
try:
__import__("expressions.{0}".format(name), locals(), globals())
except:
error = traceback.format_exc()
msgtitle = QCoreApplication.translate("UserExpressions", "User expressions")
msg = QCoreApplication.translate("UserExpressions", "The user expression {0} is not valid").format(name)
QgsMessageLog.logMessage(msg + "\n" + error, msgtitle, QgsMessageLog.WARNING)
userpythonhome = os.path.join(QgsApplication.qgisSettingsDirPath(), "python")
expressionspath = os.path.join(userpythonhome, "expressions")
sys.path.append(userpythonhome)
if not os.path.exists(expressionspath):
os.makedirs(expressionspath)
initfile = os.path.join(expressionspath, "__init__.py")
if not os.path.exists(initfile):
open(initfile, "w").close()
template = """\"\"\"
Define a new function using the @qgsfunction decorator.
The function accept the following parameters
:param [any]: Define any parameters you want to pass to your function before
the following arguments.
:param feature: The current feature
:param parent: The QgsExpression object
:param context: If there is an argument called ``context`` found at the last
position, this variable will contain a ``QgsExpressionContext``
object, that gives access to various additional information like
expression variables. E.g. ``context.variable('layer_id')``
:returns: The result of the expression.
The @qgsfunction decorator accepts the following arguments:
:param args: Defines the number of arguments. With ``args='auto'`` the number
arguments will automatically be extracted from the signature.
:param group: The name of the group under which this expression function will
be listed.
:param usesgeometry: Set this to False if your function does not access
feature.geometry(). Defaults to True.
:param referenced_columns: An array of attribute names that are required to run
this function. Defaults to
[QgsFeatureRequest.ALL_ATTRIBUTES].
\"\"\"
from qgis.core import *
from qgis.gui import *
@qgsfunction(args='auto', group='Custom')
def my_sum(value1, value2, feature, parent):
\"\"\"
Calculates the sum of the two parameters value1 and value2.
<h2>Example usage:</h2>
<ul>
<li>my_sum(5, 8) -> 13</li>
<li>my_sum(\"fiel1\", \"field2\") -> 42</li>
</ul>
\"\"\"
return value1 + value2
"""
try:
import expressions
expressions.load = load_user_expressions
expressions.load(expressionspath)
expressions.template = template
except ImportError:
# We get a import error and crash for some reason even if we make the expressions package
# TODO Fix the crash on first load with no expressions folder
# But for now it's not the end of the world if it doesn't load the first time
pass
| gpl-2.0 |
geobricks/geobricks_trmm | geobricks_trmm/resources/trmm_schema.py | 1 | 6243 | schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"type": "object",
"title": "TRMM",
"description": "Tropical Rainfall Measuring Mission.",
"definitions": {
"href": {
"title": "Endpoint",
"description": "Relative path to the service.",
"type": "string",
"default": "/"
},
"method": {
"title": "HTTP Method",
"description": "Method type to invoke the service.",
"type": "string",
"enum": [
"GET",
"POST",
"PUT",
"DELETE"
]
},
"code": {
"title": "Code",
"description": "Encoded value.",
"type": "string"
},
"label": {
"title": "Label",
"description": "Human-readable label.",
"type": "string"
},
"code_label": {
"type": "object",
"properties": {
"code": {
"$ref": "#/definitions/code"
},
"label": {
"$ref": "#/definitions/label"
}
}
},
"layer": {
"type": "object",
"properties": {
"code": {
"$ref": "#/definitions/code"
},
"label": {
"$ref": "#/definitions/label"
},
"extensions": {
"type": "array",
"title": "Extensions",
"description": "Extensions available for the same code.",
"items": {
"type": "string",
"title": "Extension",
"enum": [
".tif",
".tfw"
]
}
}
}
}
},
"properties": {
"service_type": {
"type": "string",
"title": "Type",
"description": "REST service type.",
"enum": "DATASOURCE",
"default": "DATASOURCE"
},
"list_years": {
"type": "object",
"title": "List years",
"description": "List all the available years.",
"properties": {
"schema": {
"type": "object",
"properties": {
"href": {
"$ref": "#/definitions/href",
"propertyOrder": 0
}
}
},
"target": {
"items": {
"title": "Year",
"$ref": "#/definitions/code_label"
},
"type": "array"
}
}
},
"list_months": {
"type": "object",
"title": "List months",
"description": "List all the available months for a given year.",
"properties": {
"schema": {
"type": "object",
"properties": {
"href": {
"$ref": "#/definitions/href",
"propertyOrder": 0
},
"year": {
"type": "integer",
"propertyOrder": 1
}
}
},
"target": {
"items": {
"title": "Month",
"$ref": "#/definitions/code_label"
},
"type": "array"
}
}
},
"list_days": {
"type": "object",
"title": "List days",
"description": "List all the available days for a given year and month.",
"properties": {
"schema": {
"type": "object",
"properties": {
"href": {
"$ref": "#/definitions/href",
"propertyOrder": 0
},
"year": {
"type": "integer",
"propertyOrder": 1
},
"month": {
"type": "integer",
"propertyOrder": 2
}
}
},
"target": {
"items": {
"title": "Day",
"$ref": "#/definitions/code_label"
},
"type": "array"
}
}
},
"list_layers": {
"type": "object",
"title": "List layers",
"description": "List all the available layers for a given year, month and day.",
"properties": {
"schema": {
"type": "object",
"properties": {
"href": {
"$ref": "#/definitions/href",
"propertyOrder": 0
},
"year": {
"type": "integer",
"propertyOrder": 1
},
"month": {
"type": "integer",
"propertyOrder": 2
},
"day": {
"type": "integer",
"propertyOrder": 3
}
}
},
"target": {
"items": {
"title": "Layer",
"$ref": "#/definitions/layer"
},
"type": "array"
}
}
}
}
}
| gpl-2.0 |
penzance/quis | quis/settings/base.py | 1 | 7227 | """
Django settings for quis project.
Generated by 'django-admin startproject' using Django 1.9.5 of TLT template.
For more information on this file, see
https://docs.djangoproject.com/en/1.9/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.9/ref/settings/
"""
import os
import logging
from .secure import SECURE_SETTINGS
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
# NOTE: Since we have a settings module, we have to go one more directory up to get to
# the project root
BASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
# Application definition
INSTALLED_APPS = [
# 'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'quis',
]
MIDDLEWARE_CLASSES = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
# Authentication
# Django defaults are below, but will need to be customized if using something
# other than the built-in Django auth, such as PIN, LTI, etc.
# AUTHENTICATION_BACKENDS = ('django.contrib.auth.backends.ModelBackend',)
# LOGIN_URL = '/accounts/login'
ROOT_URLCONF = 'quis.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'quis.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.9/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql',
'NAME': SECURE_SETTINGS.get('db_default_name', 'quis'),
'USER': SECURE_SETTINGS.get('db_default_user', 'postgres'),
'PASSWORD': SECURE_SETTINGS.get('db_default_password'),
'HOST': SECURE_SETTINGS.get('db_default_host', '127.0.0.1'),
'PORT': SECURE_SETTINGS.get('db_default_port', 5432), # Default postgres port
},
}
# Sessions
# https://docs.djangoproject.com/en/1.9/topics/http/sessions/#module-django.contrib.sessions
# Store sessions in default cache defined below
SESSION_ENGINE = 'django.contrib.sessions.backends.cache'
# NOTE: This setting only affects the session cookie, not the expiration of the session
# being stored in the cache. The session keys will expire according to the value of
# SESSION_COOKIE_AGE (https://docs.djangoproject.com/en/1.9/ref/settings/#session-cookie-age),
# which defaults to 2 weeks.
SESSION_EXPIRE_AT_BROWSER_CLOSE = True
# Cache
# https://docs.djangoproject.com/en/1.9/ref/settings/#std:setting-CACHES
REDIS_HOST = SECURE_SETTINGS.get('redis_host', '127.0.0.1')
REDIS_PORT = SECURE_SETTINGS.get('redis_port', 6379)
CACHES = {
'default': {
'BACKEND': 'redis_cache.RedisCache',
'LOCATION': "redis://%s:%s/0" % (REDIS_HOST, REDIS_PORT),
'OPTIONS': {
'PARSER_CLASS': 'redis.connection.HiredisParser'
},
'KEY_PREFIX': 'quis', # Provide a unique value for shared cache
# See following for default timeout (5 minutes as of 1.7):
# https://docs.djangoproject.com/en/1.9/ref/settings/#std:setting-CACHES-TIMEOUT
'TIMEOUT': SECURE_SETTINGS.get('default_cache_timeout_secs', 300),
},
}
# Internationalization
# https://docs.djangoproject.com/en/1.9/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
# A boolean that specifies whether Django's translation system should be enabled. This provides
# an easy way to turn it off, for performance. If this is set to False, Django will make some
# optimizations so as not to load the translation machinery.
USE_I18N = False
# A boolean that specifies if localized formatting of data will be enabled by default or not.
# If this is set to True, e.g. Django will display numbers and dates using the format of the
# current locale. NOTE: this would only really come into play if your locale was outside of the
# US
USE_L10N = False
# A boolean that specifies if datetimes will be timezone-aware by default or not. If this is set to
# True, Django will use timezone-aware datetimes internally. Otherwise, Django will use naive
# datetimes in local time.
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.9/howto/static-files/
# This directory is being ignored by git
STATIC_ROOT = os.path.normpath(os.path.join(BASE_DIR, 'http_static'))
STATIC_URL = '/static/'
# Logging
# https://docs.djangoproject.com/en/1.9/topics/logging/#configuring-logging
# Turn off default Django logging
# https://docs.djangoproject.com/en/1.9/topics/logging/#disabling-logging-configuration
LOGGING_CONFIG = None
_DEFAULT_LOG_LEVEL = SECURE_SETTINGS.get('log_level', logging.DEBUG)
_LOG_ROOT = SECURE_SETTINGS.get('log_root', '')
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'verbose': {
'format': '%(levelname)s\t%(asctime)s.%(msecs)03dZ\t%(name)s:%(lineno)s\t%(message)s',
'datefmt': '%Y-%m-%dT%H:%M:%S'
},
'simple': {
'format': '%(levelname)s\t%(name)s:%(lineno)s\t%(message)s',
},
},
'handlers': {
# By default, log to a file
'default': {
'class': 'logging.handlers.WatchedFileHandler',
'level': _DEFAULT_LOG_LEVEL,
'formatter': 'verbose',
'filename': os.path.join(_LOG_ROOT, 'django-quis.log'),
},
},
# This is the default logger for any apps or libraries that use the logger
# package, but are not represented in the `loggers` dict below. A level
# must be set and handlers defined. Setting this logger is equivalent to
# setting and empty string logger in the loggers dict below, but the separation
# here is a bit more explicit. See link for more details:
# https://docs.python.org/2.7/library/logging.config.html#dictionary-schema-details
'root': {
'level': logging.WARNING,
'handlers': ['default'],
},
'loggers': {
# Add app specific loggers here, should look something like this:
# '': {
# 'level': _DEFAULT_LOG_LEVEL,
# 'handlers': ['default'],
# 'propagate': False,
# },
# Make sure that propagate is False so that the root logger doesn't get involved
# after an app logger handles a log message.
},
}
# Other project specific settings
WATCHMEN = SECURE_SETTINGS['watchmen']
| mit |
fabian4/trove | trove/db/sqlalchemy/migrate_repo/schema.py | 4 | 2608 | # Copyright 2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Various conveniences used for migration scripts."""
from oslo_log import log as logging
import sqlalchemy.types
logger = logging.getLogger('trove.db.sqlalchemy.migrate_repo.schema')
class String(sqlalchemy.types.String):
def __init__(self, length, *args, **kwargs):
super(String, self).__init__(*args, length=length, **kwargs)
class Text(sqlalchemy.types.Text):
def __init__(self, length=None, *args, **kwargs):
super(Text, self).__init__(*args, length=length, **kwargs)
class Boolean(sqlalchemy.types.Boolean):
def __init__(self, create_constraint=True, name=None, *args, **kwargs):
super(Boolean, self).__init__(*args,
create_constraint=create_constraint,
name=name,
**kwargs)
class DateTime(sqlalchemy.types.DateTime):
def __init__(self, timezone=False, *args, **kwargs):
super(DateTime, self).__init__(*args,
timezone=timezone,
**kwargs)
class Integer(sqlalchemy.types.Integer):
def __init__(self, *args, **kwargs):
super(Integer, self).__init__(*args, **kwargs)
class BigInteger(sqlalchemy.types.BigInteger):
def __init__(self, *args, **kwargs):
super(BigInteger, self).__init__(*args, **kwargs)
class Float(sqlalchemy.types.Float):
def __init__(self, *args, **kwargs):
super(Float, self).__init__(*args, **kwargs)
def create_tables(tables):
for table in tables:
logger.info("creating table %(table)s" % {'table': table})
table.create()
def drop_tables(tables):
for table in tables:
logger.info("dropping table %(table)s" % {'table': table})
table.drop()
def Table(name, metadata, *args, **kwargs):
return sqlalchemy.schema.Table(name, metadata, *args,
mysql_engine='INNODB', **kwargs)
| apache-2.0 |
bop/rango | lib/python2.7/site-packages/django/contrib/gis/gdal/prototypes/srs.py | 219 | 3498 | from ctypes import c_char_p, c_int, c_void_p, POINTER
from django.contrib.gis.gdal.libgdal import lgdal, std_call
from django.contrib.gis.gdal.prototypes.generation import (const_string_output,
double_output, int_output, srs_output, string_output, void_output)
## Shortcut generation for routines with known parameters.
def srs_double(f):
"""
Creates a function prototype for the OSR routines that take
the OSRSpatialReference object and
"""
return double_output(f, [c_void_p, POINTER(c_int)], errcheck=True)
def units_func(f):
"""
Creates a ctypes function prototype for OSR units functions, e.g.,
OSRGetAngularUnits, OSRGetLinearUnits.
"""
return double_output(f, [c_void_p, POINTER(c_char_p)], strarg=True)
# Creation & destruction.
clone_srs = srs_output(std_call('OSRClone'), [c_void_p])
new_srs = srs_output(std_call('OSRNewSpatialReference'), [c_char_p])
release_srs = void_output(lgdal.OSRRelease, [c_void_p], errcheck=False)
destroy_srs = void_output(std_call('OSRDestroySpatialReference'), [c_void_p], errcheck=False)
srs_validate = void_output(lgdal.OSRValidate, [c_void_p])
# Getting the semi_major, semi_minor, and flattening functions.
semi_major = srs_double(lgdal.OSRGetSemiMajor)
semi_minor = srs_double(lgdal.OSRGetSemiMinor)
invflattening = srs_double(lgdal.OSRGetInvFlattening)
# WKT, PROJ, EPSG, XML importation routines.
from_wkt = void_output(lgdal.OSRImportFromWkt, [c_void_p, POINTER(c_char_p)])
from_proj = void_output(lgdal.OSRImportFromProj4, [c_void_p, c_char_p])
from_epsg = void_output(std_call('OSRImportFromEPSG'), [c_void_p, c_int])
from_xml = void_output(lgdal.OSRImportFromXML, [c_void_p, c_char_p])
from_user_input = void_output(std_call('OSRSetFromUserInput'), [c_void_p, c_char_p])
# Morphing to/from ESRI WKT.
morph_to_esri = void_output(lgdal.OSRMorphToESRI, [c_void_p])
morph_from_esri = void_output(lgdal.OSRMorphFromESRI, [c_void_p])
# Identifying the EPSG
identify_epsg = void_output(lgdal.OSRAutoIdentifyEPSG, [c_void_p])
# Getting the angular_units, linear_units functions
linear_units = units_func(lgdal.OSRGetLinearUnits)
angular_units = units_func(lgdal.OSRGetAngularUnits)
# For exporting to WKT, PROJ.4, "Pretty" WKT, and XML.
to_wkt = string_output(std_call('OSRExportToWkt'), [c_void_p, POINTER(c_char_p)], decoding='ascii')
to_proj = string_output(std_call('OSRExportToProj4'), [c_void_p, POINTER(c_char_p)], decoding='ascii')
to_pretty_wkt = string_output(std_call('OSRExportToPrettyWkt'), [c_void_p, POINTER(c_char_p), c_int], offset=-2, decoding='ascii')
# Memory leak fixed in GDAL 1.5; still exists in 1.4.
to_xml = string_output(lgdal.OSRExportToXML, [c_void_p, POINTER(c_char_p), c_char_p], offset=-2, decoding='ascii')
# String attribute retrival routines.
get_attr_value = const_string_output(std_call('OSRGetAttrValue'), [c_void_p, c_char_p, c_int], decoding='ascii')
get_auth_name = const_string_output(lgdal.OSRGetAuthorityName, [c_void_p, c_char_p], decoding='ascii')
get_auth_code = const_string_output(lgdal.OSRGetAuthorityCode, [c_void_p, c_char_p], decoding='ascii')
# SRS Properties
isgeographic = int_output(lgdal.OSRIsGeographic, [c_void_p])
islocal = int_output(lgdal.OSRIsLocal, [c_void_p])
isprojected = int_output(lgdal.OSRIsProjected, [c_void_p])
# Coordinate transformation
new_ct= srs_output(std_call('OCTNewCoordinateTransformation'), [c_void_p, c_void_p])
destroy_ct = void_output(std_call('OCTDestroyCoordinateTransformation'), [c_void_p], errcheck=False)
| gpl-2.0 |
spcs/synaps | synaps/auth/ldapdriver.py | 1 | 28673 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Auth driver for ldap. Includes FakeLdapDriver.
It should be easy to create a replacement for this driver supporting
other backends by creating another class that exposes the same
public methods.
"""
import functools
import sys
from synaps import exception
from synaps import flags
from synaps import log as logging
from synaps.openstack.common import cfg
ldap_opts = [
cfg.IntOpt('ldap_schema_version',
default=2,
help='Current version of the LDAP schema'),
cfg.StrOpt('ldap_url',
default='ldap://localhost',
help='Point this at your ldap server'),
cfg.StrOpt('ldap_password',
default='changeme',
help='LDAP password'),
cfg.StrOpt('ldap_user_dn',
default='cn=Manager,dc=example,dc=com',
help='DN of admin user'),
cfg.StrOpt('ldap_user_id_attribute',
default='uid',
help='Attribute to use as id'),
cfg.StrOpt('ldap_user_name_attribute',
default='cn',
help='Attribute to use as name'),
cfg.StrOpt('ldap_user_unit',
default='Users',
help='OID for Users'),
cfg.StrOpt('ldap_user_subtree',
default='ou=Users,dc=example,dc=com',
help='OU for Users'),
cfg.BoolOpt('ldap_user_modify_only',
default=False,
help='Modify user attributes instead of creating/deleting'),
cfg.StrOpt('ldap_project_subtree',
default='ou=Groups,dc=example,dc=com',
help='OU for Projects'),
cfg.StrOpt('role_project_subtree',
default='ou=Groups,dc=example,dc=com',
help='OU for Roles'),
# NOTE(vish): mapping with these flags is necessary because we're going
# to tie in to an existing ldap schema
cfg.StrOpt('ldap_cloudadmin',
default='cn=cloudadmins,ou=Groups,dc=example,dc=com',
help='cn for Cloud Admins'),
cfg.StrOpt('ldap_itsec',
default='cn=itsec,ou=Groups,dc=example,dc=com',
help='cn for ItSec'),
cfg.StrOpt('ldap_sysadmin',
default='cn=sysadmins,ou=Groups,dc=example,dc=com',
help='cn for Sysadmins'),
cfg.StrOpt('ldap_netadmin',
default='cn=netadmins,ou=Groups,dc=example,dc=com',
help='cn for NetAdmins'),
cfg.StrOpt('ldap_developer',
default='cn=developers,ou=Groups,dc=example,dc=com',
help='cn for Developers'),
]
FLAGS = flags.FLAGS
FLAGS.register_opts(ldap_opts)
LOG = logging.getLogger(__name__)
if FLAGS.memcached_servers:
import memcache
else:
from synaps.common import memorycache as memcache
# TODO(vish): make an abstract base class with the same public methods
# to define a set interface for AuthDrivers. I'm delaying
# creating this now because I'm expecting an auth refactor
# in which we may want to change the interface a bit more.
def _clean(attr):
"""Clean attr for insertion into ldap"""
if attr is None:
return None
if isinstance(attr, unicode):
return str(attr)
return attr
def sanitize(fn):
"""Decorator to sanitize all args"""
@functools.wraps(fn)
def _wrapped(self, *args, **kwargs):
args = [_clean(x) for x in args]
kwargs = dict((k, _clean(v)) for (k, v) in kwargs)
return fn(self, *args, **kwargs)
_wrapped.func_name = fn.func_name
return _wrapped
class LDAPWrapper(object):
def __init__(self, ldap, url, user, password):
self.ldap = ldap
self.url = url
self.user = user
self.password = password
self.conn = None
def __wrap_reconnect(f):
def inner(self, *args, **kwargs):
if self.conn is None:
self.connect()
return f(self.conn)(*args, **kwargs)
else:
try:
return f(self.conn)(*args, **kwargs)
except self.ldap.SERVER_DOWN:
self.connect()
return f(self.conn)(*args, **kwargs)
return inner
def connect(self):
try:
self.conn = self.ldap.initialize(self.url)
self.conn.simple_bind_s(self.user, self.password)
except self.ldap.SERVER_DOWN:
self.conn = None
raise
search_s = __wrap_reconnect(lambda conn: conn.search_s)
add_s = __wrap_reconnect(lambda conn: conn.add_s)
delete_s = __wrap_reconnect(lambda conn: conn.delete_s)
modify_s = __wrap_reconnect(lambda conn: conn.modify_s)
class LdapDriver(object):
"""Ldap Auth driver
Defines enter and exit and therefore supports the with/as syntax.
"""
project_pattern = '(owner=*)'
isadmin_attribute = 'isSynapsAdmin'
project_attribute = 'owner'
project_objectclass = 'groupOfNames'
conn = None
mc = None
def __init__(self):
"""Imports the LDAP module"""
self.ldap = __import__('ldap')
if FLAGS.ldap_schema_version == 1:
LdapDriver.project_pattern = '(objectclass=synapsProject)'
LdapDriver.isadmin_attribute = 'isAdmin'
LdapDriver.project_attribute = 'projectManager'
LdapDriver.project_objectclass = 'synapsProject'
self.__cache = None
if LdapDriver.conn is None:
LdapDriver.conn = LDAPWrapper(self.ldap, FLAGS.ldap_url,
FLAGS.ldap_user_dn,
FLAGS.ldap_password)
if LdapDriver.mc is None:
LdapDriver.mc = memcache.Client(FLAGS.memcached_servers, debug=0)
def __enter__(self):
# TODO(yorik-sar): Should be per-request cache, not per-driver-request
self.__cache = {}
return self
def __exit__(self, exc_type, exc_value, traceback):
self.__cache = None
return False
def __local_cache(key_fmt): # pylint: disable=E0213
"""Wrap function to cache it's result in self.__cache.
Works only with functions with one fixed argument.
"""
def do_wrap(fn):
@functools.wraps(fn)
def inner(self, arg, **kwargs):
cache_key = key_fmt % (arg,)
try:
res = self.__cache[cache_key]
LOG.debug('Local cache hit for %s by key %s' %
(fn.__name__, cache_key))
return res
except KeyError:
res = fn(self, arg, **kwargs)
self.__cache[cache_key] = res
return res
return inner
return do_wrap
@sanitize
@__local_cache('uid_user-%s')
def get_user(self, uid):
"""Retrieve user by id"""
attr = self.__get_ldap_user(uid)
if attr is None:
raise exception.LDAPUserNotFound(user_id=uid)
return self.__to_user(attr)
@sanitize
def get_user_from_access_key(self, access):
"""Retrieve user by access key"""
cache_key = 'uak_dn_%s' % (access,)
user_dn = self.mc.get(cache_key)
if user_dn:
user = self.__to_user(
self.__find_object(user_dn, scope=self.ldap.SCOPE_BASE))
if user:
if user['access'] == access:
return user
else:
self.mc.set(cache_key, None)
query = '(accessKey=%s)' % access
dn = FLAGS.ldap_user_subtree
user_obj = self.__find_object(dn, query)
user = self.__to_user(user_obj)
if user:
self.mc.set(cache_key, user_obj['dn'][0])
return user
@sanitize
@__local_cache('pid_project-%s')
def get_project(self, pid):
"""Retrieve project by id"""
dn = self.__project_to_dn(pid, search=False)
attr = self.__find_object(dn, LdapDriver.project_pattern,
scope=self.ldap.SCOPE_BASE)
return self.__to_project(attr)
@sanitize
def get_users(self):
"""Retrieve list of users"""
attrs = self.__find_objects(FLAGS.ldap_user_subtree,
'(objectclass=synapsUser)')
users = []
for attr in attrs:
user = self.__to_user(attr)
if user is not None:
users.append(user)
return users
@sanitize
def get_projects(self, uid=None):
"""Retrieve list of projects"""
pattern = LdapDriver.project_pattern
if uid:
pattern = "(&%s(member=%s))" % (pattern, self.__uid_to_dn(uid))
attrs = self.__find_objects(FLAGS.ldap_project_subtree,
pattern)
return [self.__to_project(attr) for attr in attrs]
@sanitize
def create_user(self, name, access_key, secret_key, is_admin):
"""Create a user"""
if self.__user_exists(name):
raise exception.LDAPUserExists(user=name)
if FLAGS.ldap_user_modify_only:
if self.__ldap_user_exists(name):
# Retrieve user by name
user = self.__get_ldap_user(name)
# Entry could be malformed, test for missing attrs.
# Malformed entries are useless, replace attributes found.
attr = []
if 'secretKey' in user.keys():
attr.append((self.ldap.MOD_REPLACE, 'secretKey',
[secret_key]))
else:
attr.append((self.ldap.MOD_ADD, 'secretKey',
[secret_key]))
if 'accessKey' in user.keys():
attr.append((self.ldap.MOD_REPLACE, 'accessKey',
[access_key]))
else:
attr.append((self.ldap.MOD_ADD, 'accessKey',
[access_key]))
if LdapDriver.isadmin_attribute in user.keys():
attr.append((self.ldap.MOD_REPLACE,
LdapDriver.isadmin_attribute,
[str(is_admin).upper()]))
else:
attr.append((self.ldap.MOD_ADD,
LdapDriver.isadmin_attribute,
[str(is_admin).upper()]))
self.conn.modify_s(self.__uid_to_dn(name), attr)
return self.get_user(name)
else:
raise exception.LDAPUserNotFound(user_id=name)
else:
attr = [
('objectclass', ['person',
'organizationalPerson',
'inetOrgPerson',
'synapsUser']),
('ou', [FLAGS.ldap_user_unit]),
(FLAGS.ldap_user_id_attribute, [name]),
('sn', [name]),
(FLAGS.ldap_user_name_attribute, [name]),
('secretKey', [secret_key]),
('accessKey', [access_key]),
(LdapDriver.isadmin_attribute, [str(is_admin).upper()]),
]
self.conn.add_s(self.__uid_to_dn(name), attr)
return self.__to_user(dict(attr))
@sanitize
def create_project(self, name, manager_uid,
description=None, member_uids=None):
"""Create a project"""
if self.__project_exists(name):
raise exception.ProjectExists(project=name)
if not self.__user_exists(manager_uid):
raise exception.LDAPUserNotFound(user_id=manager_uid)
manager_dn = self.__uid_to_dn(manager_uid)
# description is a required attribute
if description is None:
description = name
members = []
if member_uids is not None:
for member_uid in member_uids:
if not self.__user_exists(member_uid):
raise exception.LDAPUserNotFound(user_id=member_uid)
members.append(self.__uid_to_dn(member_uid))
# always add the manager as a member because members is required
if not manager_dn in members:
members.append(manager_dn)
attr = [
('objectclass', [LdapDriver.project_objectclass]),
('cn', [name]),
('description', [description]),
(LdapDriver.project_attribute, [manager_dn]),
('member', members)]
dn = self.__project_to_dn(name, search=False)
self.conn.add_s(dn, attr)
return self.__to_project(dict(attr))
@sanitize
def modify_project(self, project_id, manager_uid=None, description=None):
"""Modify an existing project"""
if not manager_uid and not description:
return
attr = []
if manager_uid:
if not self.__user_exists(manager_uid):
raise exception.LDAPUserNotFound(user_id=manager_uid)
manager_dn = self.__uid_to_dn(manager_uid)
attr.append((self.ldap.MOD_REPLACE, LdapDriver.project_attribute,
manager_dn))
if description:
attr.append((self.ldap.MOD_REPLACE, 'description', description))
dn = self.__project_to_dn(project_id)
self.conn.modify_s(dn, attr)
if not self.is_in_project(manager_uid, project_id):
self.add_to_project(manager_uid, project_id)
@sanitize
def add_to_project(self, uid, project_id):
"""Add user to project"""
dn = self.__project_to_dn(project_id)
return self.__add_to_group(uid, dn)
@sanitize
def remove_from_project(self, uid, project_id):
"""Remove user from project"""
dn = self.__project_to_dn(project_id)
return self.__remove_from_group(uid, dn)
@sanitize
def is_in_project(self, uid, project_id):
"""Check if user is in project"""
dn = self.__project_to_dn(project_id)
return self.__is_in_group(uid, dn)
@sanitize
def has_role(self, uid, role, project_id=None):
"""Check if user has role
If project is specified, it checks for local role, otherwise it
checks for global role
"""
role_dn = self.__role_to_dn(role, project_id)
return self.__is_in_group(uid, role_dn)
@sanitize
def add_role(self, uid, role, project_id=None):
"""Add role for user (or user and project)"""
role_dn = self.__role_to_dn(role, project_id)
if not self.__group_exists(role_dn):
# create the role if it doesn't exist
description = '%s role for %s' % (role, project_id)
self.__create_group(role_dn, role, uid, description)
else:
return self.__add_to_group(uid, role_dn)
@sanitize
def remove_role(self, uid, role, project_id=None):
"""Remove role for user (or user and project)"""
role_dn = self.__role_to_dn(role, project_id)
return self.__remove_from_group(uid, role_dn)
@sanitize
def get_user_roles(self, uid, project_id=None):
"""Retrieve list of roles for user (or user and project)"""
if project_id is None:
# NOTE(vish): This is unneccesarily slow, but since we can't
# guarantee that the global roles are located
# together in the ldap tree, we're doing this version.
roles = []
for role in FLAGS.allowed_roles:
role_dn = self.__role_to_dn(role)
if self.__is_in_group(uid, role_dn):
roles.append(role)
return roles
else:
project_dn = self.__project_to_dn(project_id)
query = ('(&(&(objectclass=groupOfNames)(!%s))(member=%s))' %
(LdapDriver.project_pattern, self.__uid_to_dn(uid)))
roles = self.__find_objects(project_dn, query)
return [role['cn'][0] for role in roles]
@sanitize
def delete_user(self, uid):
"""Delete a user"""
if not self.__user_exists(uid):
raise exception.LDAPUserNotFound(user_id=uid)
self.__remove_from_all(uid)
if FLAGS.ldap_user_modify_only:
# Delete attributes
attr = []
# Retrieve user by name
user = self.__get_ldap_user(uid)
if 'secretKey' in user.keys():
attr.append((self.ldap.MOD_DELETE, 'secretKey',
user['secretKey']))
if 'accessKey' in user.keys():
attr.append((self.ldap.MOD_DELETE, 'accessKey',
user['accessKey']))
if LdapDriver.isadmin_attribute in user.keys():
attr.append((self.ldap.MOD_DELETE,
LdapDriver.isadmin_attribute,
user[LdapDriver.isadmin_attribute]))
self.conn.modify_s(self.__uid_to_dn(uid), attr)
else:
# Delete entry
self.conn.delete_s(self.__uid_to_dn(uid))
@sanitize
def delete_project(self, project_id):
"""Delete a project"""
project_dn = self.__project_to_dn(project_id)
self.__delete_roles(project_dn)
self.__delete_group(project_dn)
@sanitize
def modify_user(self, uid, access_key=None, secret_key=None, admin=None):
"""Modify an existing user"""
if not access_key and not secret_key and admin is None:
return
attr = []
if access_key:
attr.append((self.ldap.MOD_REPLACE, 'accessKey', access_key))
if secret_key:
attr.append((self.ldap.MOD_REPLACE, 'secretKey', secret_key))
if admin is not None:
attr.append((self.ldap.MOD_REPLACE, LdapDriver.isadmin_attribute,
str(admin).upper()))
self.conn.modify_s(self.__uid_to_dn(uid), attr)
def __user_exists(self, uid):
"""Check if user exists"""
try:
return self.get_user(uid) is not None
except exception.LDAPUserNotFound:
return False
def __ldap_user_exists(self, uid):
"""Check if the user exists in ldap"""
return self.__get_ldap_user(uid) is not None
def __project_exists(self, project_id):
"""Check if project exists"""
return self.get_project(project_id) is not None
@__local_cache('uid_attrs-%s')
def __get_ldap_user(self, uid):
"""Retrieve LDAP user entry by id"""
dn = FLAGS.ldap_user_subtree
query = ('(&(%s=%s)(objectclass=synapsUser))' %
(FLAGS.ldap_user_id_attribute, uid))
return self.__find_object(dn, query)
def __find_object(self, dn, query=None, scope=None):
"""Find an object by dn and query"""
objects = self.__find_objects(dn, query, scope)
if len(objects) == 0:
return None
return objects[0]
def __find_dns(self, dn, query=None, scope=None):
"""Find dns by query"""
if scope is None:
# One of the flags is 0!
scope = self.ldap.SCOPE_SUBTREE
try:
res = self.conn.search_s(dn, scope, query)
except self.ldap.NO_SUCH_OBJECT:
return []
# Just return the DNs
return [dn for dn, _attributes in res]
def __find_objects(self, dn, query=None, scope=None):
"""Find objects by query"""
if scope is None:
# One of the flags is 0!
scope = self.ldap.SCOPE_SUBTREE
if query is None:
query = "(objectClass=*)"
try:
res = self.conn.search_s(dn, scope, query)
except self.ldap.NO_SUCH_OBJECT:
return []
# Just return the attributes
# FIXME(yorik-sar): Whole driver should be refactored to
# prevent this hack
res1 = []
for dn, attrs in res:
attrs['dn'] = [dn]
res1.append(attrs)
return res1
def __find_role_dns(self, tree):
"""Find dns of role objects in given tree"""
query = ('(&(objectclass=groupOfNames)(!%s))' %
LdapDriver.project_pattern)
return self.__find_dns(tree, query)
def __find_group_dns_with_member(self, tree, uid):
"""Find dns of group objects in a given tree that contain member"""
query = ('(&(objectclass=groupOfNames)(member=%s))' %
self.__uid_to_dn(uid))
dns = self.__find_dns(tree, query)
return dns
def __group_exists(self, dn):
"""Check if group exists"""
query = '(objectclass=groupOfNames)'
return self.__find_object(dn, query) is not None
def __role_to_dn(self, role, project_id=None):
"""Convert role to corresponding dn"""
if project_id is None:
return FLAGS["ldap_%s" % role]
else:
project_dn = self.__project_to_dn(project_id)
return 'cn=%s,%s' % (role, project_dn)
def __create_group(self, group_dn, name, uid,
description, member_uids=None):
"""Create a group"""
if self.__group_exists(group_dn):
raise exception.LDAPGroupExists(group=name)
members = []
if member_uids is not None:
for member_uid in member_uids:
if not self.__user_exists(member_uid):
raise exception.LDAPUserNotFound(user_id=member_uid)
members.append(self.__uid_to_dn(member_uid))
dn = self.__uid_to_dn(uid)
if not dn in members:
members.append(dn)
attr = [
('objectclass', ['groupOfNames']),
('cn', [name]),
('description', [description]),
('member', members)]
self.conn.add_s(group_dn, attr)
def __is_in_group(self, uid, group_dn):
"""Check if user is in group"""
if not self.__user_exists(uid):
raise exception.LDAPUserNotFound(user_id=uid)
if not self.__group_exists(group_dn):
return False
res = self.__find_object(group_dn,
'(member=%s)' % self.__uid_to_dn(uid),
self.ldap.SCOPE_BASE)
return res is not None
def __add_to_group(self, uid, group_dn):
"""Add user to group"""
if not self.__user_exists(uid):
raise exception.LDAPUserNotFound(user_id=uid)
if not self.__group_exists(group_dn):
raise exception.LDAPGroupNotFound(group_id=group_dn)
if self.__is_in_group(uid, group_dn):
raise exception.LDAPMembershipExists(uid=uid, group_dn=group_dn)
attr = [(self.ldap.MOD_ADD, 'member', self.__uid_to_dn(uid))]
self.conn.modify_s(group_dn, attr)
def __remove_from_group(self, uid, group_dn):
"""Remove user from group"""
if not self.__group_exists(group_dn):
raise exception.LDAPGroupNotFound(group_id=group_dn)
if not self.__user_exists(uid):
raise exception.LDAPUserNotFound(user_id=uid)
if not self.__is_in_group(uid, group_dn):
raise exception.LDAPGroupMembershipNotFound(user_id=uid,
group_id=group_dn)
# NOTE(vish): remove user from group and any sub_groups
sub_dns = self.__find_group_dns_with_member(group_dn, uid)
for sub_dn in sub_dns:
self.__safe_remove_from_group(uid, sub_dn)
def __safe_remove_from_group(self, uid, group_dn):
"""Remove user from group, deleting group if user is last member"""
# FIXME(vish): what if deleted user is a project manager?
attr = [(self.ldap.MOD_DELETE, 'member', self.__uid_to_dn(uid))]
try:
self.conn.modify_s(group_dn, attr)
except self.ldap.OBJECT_CLASS_VIOLATION:
LOG.debug(_("Attempted to remove the last member of a group. "
"Deleting the group at %s instead."), group_dn)
self.__delete_group(group_dn)
def __remove_from_all(self, uid):
"""Remove user from all roles and projects"""
if not self.__user_exists(uid):
raise exception.LDAPUserNotFound(user_id=uid)
role_dns = self.__find_group_dns_with_member(
FLAGS.role_project_subtree, uid)
for role_dn in role_dns:
self.__safe_remove_from_group(uid, role_dn)
project_dns = self.__find_group_dns_with_member(
FLAGS.ldap_project_subtree, uid)
for project_dn in project_dns:
self.__safe_remove_from_group(uid, project_dn)
def __delete_group(self, group_dn):
"""Delete Group"""
if not self.__group_exists(group_dn):
raise exception.LDAPGroupNotFound(group_id=group_dn)
self.conn.delete_s(group_dn)
def __delete_roles(self, project_dn):
"""Delete all roles for project"""
for role_dn in self.__find_role_dns(project_dn):
self.__delete_group(role_dn)
def __to_project(self, attr):
"""Convert ldap attributes to Project object"""
if attr is None:
return None
member_dns = attr.get('member', [])
return {
'id': attr['cn'][0],
'name': attr['cn'][0],
'project_manager_id':
self.__dn_to_uid(attr[LdapDriver.project_attribute][0]),
'description': attr.get('description', [None])[0],
'member_ids': [self.__dn_to_uid(x) for x in member_dns]}
@__local_cache('uid_dn-%s')
def __uid_to_dn(self, uid, search=True):
"""Convert uid to dn"""
# By default return a generated DN
userdn = (FLAGS.ldap_user_id_attribute + '=%s,%s'
% (uid, FLAGS.ldap_user_subtree))
if search:
query = ('%s=%s' % (FLAGS.ldap_user_id_attribute, uid))
user = self.__find_dns(FLAGS.ldap_user_subtree, query)
if len(user) > 0:
userdn = user[0]
return userdn
@__local_cache('pid_dn-%s')
def __project_to_dn(self, pid, search=True):
"""Convert pid to dn"""
# By default return a generated DN
projectdn = ('cn=%s,%s' % (pid, FLAGS.ldap_project_subtree))
if search:
query = ('(&(cn=%s)%s)' % (pid, LdapDriver.project_pattern))
project = self.__find_dns(FLAGS.ldap_project_subtree, query)
if len(project) > 0:
projectdn = project[0]
return projectdn
@staticmethod
def __to_user(attr):
"""Convert ldap attributes to User object"""
if attr is None:
return None
if ('accessKey' in attr.keys() and 'secretKey' in attr.keys() and
LdapDriver.isadmin_attribute in attr.keys()):
return {
'id': attr[FLAGS.ldap_user_id_attribute][0],
'name': attr[FLAGS.ldap_user_name_attribute][0],
'access': attr['accessKey'][0],
'secret': attr['secretKey'][0],
'admin': (attr[LdapDriver.isadmin_attribute][0] == 'TRUE')}
else:
return None
@__local_cache('dn_uid-%s')
def __dn_to_uid(self, dn):
"""Convert user dn to uid"""
query = '(objectclass=synapsUser)'
user = self.__find_object(dn, query, scope=self.ldap.SCOPE_BASE)
return user[FLAGS.ldap_user_id_attribute][0]
class FakeLdapDriver(LdapDriver):
"""Fake Ldap Auth driver"""
def __init__(self):
import synaps.auth.fakeldap
sys.modules['ldap'] = synaps.auth.fakeldap
super(FakeLdapDriver, self).__init__()
| apache-2.0 |
jfarrell/thrift | lib/py/src/transport/TTwisted.py | 30 | 10904 | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
from io import BytesIO
import struct
from zope.interface import implementer, Interface, Attribute
from twisted.internet.protocol import ServerFactory, ClientFactory, \
connectionDone
from twisted.internet import defer
from twisted.internet.threads import deferToThread
from twisted.protocols import basic
from twisted.web import server, resource, http
from thrift.transport import TTransport
class TMessageSenderTransport(TTransport.TTransportBase):
def __init__(self):
self.__wbuf = BytesIO()
def write(self, buf):
self.__wbuf.write(buf)
def flush(self):
msg = self.__wbuf.getvalue()
self.__wbuf = BytesIO()
return self.sendMessage(msg)
def sendMessage(self, message):
raise NotImplementedError
class TCallbackTransport(TMessageSenderTransport):
def __init__(self, func):
TMessageSenderTransport.__init__(self)
self.func = func
def sendMessage(self, message):
return self.func(message)
class ThriftClientProtocol(basic.Int32StringReceiver):
MAX_LENGTH = 2 ** 31 - 1
def __init__(self, client_class, iprot_factory, oprot_factory=None):
self._client_class = client_class
self._iprot_factory = iprot_factory
if oprot_factory is None:
self._oprot_factory = iprot_factory
else:
self._oprot_factory = oprot_factory
self.recv_map = {}
self.started = defer.Deferred()
def dispatch(self, msg):
self.sendString(msg)
def connectionMade(self):
tmo = TCallbackTransport(self.dispatch)
self.client = self._client_class(tmo, self._oprot_factory)
self.started.callback(self.client)
def connectionLost(self, reason=connectionDone):
# the called errbacks can add items to our client's _reqs,
# so we need to use a tmp, and iterate until no more requests
# are added during errbacks
if self.client:
tex = TTransport.TTransportException(
type=TTransport.TTransportException.END_OF_FILE,
message='Connection closed (%s)' % reason)
while self.client._reqs:
_, v = self.client._reqs.popitem()
v.errback(tex)
del self.client._reqs
self.client = None
def stringReceived(self, frame):
tr = TTransport.TMemoryBuffer(frame)
iprot = self._iprot_factory.getProtocol(tr)
(fname, mtype, rseqid) = iprot.readMessageBegin()
try:
method = self.recv_map[fname]
except KeyError:
method = getattr(self.client, 'recv_' + fname)
self.recv_map[fname] = method
method(iprot, mtype, rseqid)
class ThriftSASLClientProtocol(ThriftClientProtocol):
START = 1
OK = 2
BAD = 3
ERROR = 4
COMPLETE = 5
MAX_LENGTH = 2 ** 31 - 1
def __init__(self, client_class, iprot_factory, oprot_factory=None,
host=None, service=None, mechanism='GSSAPI', **sasl_kwargs):
"""
host: the name of the server, from a SASL perspective
service: the name of the server's service, from a SASL perspective
mechanism: the name of the preferred mechanism to use
All other kwargs will be passed to the puresasl.client.SASLClient
constructor.
"""
from puresasl.client import SASLClient
self.SASLCLient = SASLClient
ThriftClientProtocol.__init__(self, client_class, iprot_factory, oprot_factory)
self._sasl_negotiation_deferred = None
self._sasl_negotiation_status = None
self.client = None
if host is not None:
self.createSASLClient(host, service, mechanism, **sasl_kwargs)
def createSASLClient(self, host, service, mechanism, **kwargs):
self.sasl = self.SASLClient(host, service, mechanism, **kwargs)
def dispatch(self, msg):
encoded = self.sasl.wrap(msg)
len_and_encoded = ''.join((struct.pack('!i', len(encoded)), encoded))
ThriftClientProtocol.dispatch(self, len_and_encoded)
@defer.inlineCallbacks
def connectionMade(self):
self._sendSASLMessage(self.START, self.sasl.mechanism)
initial_message = yield deferToThread(self.sasl.process)
self._sendSASLMessage(self.OK, initial_message)
while True:
status, challenge = yield self._receiveSASLMessage()
if status == self.OK:
response = yield deferToThread(self.sasl.process, challenge)
self._sendSASLMessage(self.OK, response)
elif status == self.COMPLETE:
if not self.sasl.complete:
msg = "The server erroneously indicated that SASL " \
"negotiation was complete"
raise TTransport.TTransportException(msg, message=msg)
else:
break
else:
msg = "Bad SASL negotiation status: %d (%s)" % (status, challenge)
raise TTransport.TTransportException(msg, message=msg)
self._sasl_negotiation_deferred = None
ThriftClientProtocol.connectionMade(self)
def _sendSASLMessage(self, status, body):
if body is None:
body = ""
header = struct.pack(">BI", status, len(body))
self.transport.write(header + body)
def _receiveSASLMessage(self):
self._sasl_negotiation_deferred = defer.Deferred()
self._sasl_negotiation_status = None
return self._sasl_negotiation_deferred
def connectionLost(self, reason=connectionDone):
if self.client:
ThriftClientProtocol.connectionLost(self, reason)
def dataReceived(self, data):
if self._sasl_negotiation_deferred:
# we got a sasl challenge in the format (status, length, challenge)
# save the status, let IntNStringReceiver piece the challenge data together
self._sasl_negotiation_status, = struct.unpack("B", data[0])
ThriftClientProtocol.dataReceived(self, data[1:])
else:
# normal frame, let IntNStringReceiver piece it together
ThriftClientProtocol.dataReceived(self, data)
def stringReceived(self, frame):
if self._sasl_negotiation_deferred:
# the frame is just a SASL challenge
response = (self._sasl_negotiation_status, frame)
self._sasl_negotiation_deferred.callback(response)
else:
# there's a second 4 byte length prefix inside the frame
decoded_frame = self.sasl.unwrap(frame[4:])
ThriftClientProtocol.stringReceived(self, decoded_frame)
class ThriftServerProtocol(basic.Int32StringReceiver):
MAX_LENGTH = 2 ** 31 - 1
def dispatch(self, msg):
self.sendString(msg)
def processError(self, error):
self.transport.loseConnection()
def processOk(self, _, tmo):
msg = tmo.getvalue()
if len(msg) > 0:
self.dispatch(msg)
def stringReceived(self, frame):
tmi = TTransport.TMemoryBuffer(frame)
tmo = TTransport.TMemoryBuffer()
iprot = self.factory.iprot_factory.getProtocol(tmi)
oprot = self.factory.oprot_factory.getProtocol(tmo)
d = self.factory.processor.process(iprot, oprot)
d.addCallbacks(self.processOk, self.processError,
callbackArgs=(tmo,))
class IThriftServerFactory(Interface):
processor = Attribute("Thrift processor")
iprot_factory = Attribute("Input protocol factory")
oprot_factory = Attribute("Output protocol factory")
class IThriftClientFactory(Interface):
client_class = Attribute("Thrift client class")
iprot_factory = Attribute("Input protocol factory")
oprot_factory = Attribute("Output protocol factory")
@implementer(IThriftServerFactory)
class ThriftServerFactory(ServerFactory):
protocol = ThriftServerProtocol
def __init__(self, processor, iprot_factory, oprot_factory=None):
self.processor = processor
self.iprot_factory = iprot_factory
if oprot_factory is None:
self.oprot_factory = iprot_factory
else:
self.oprot_factory = oprot_factory
@implementer(IThriftClientFactory)
class ThriftClientFactory(ClientFactory):
protocol = ThriftClientProtocol
def __init__(self, client_class, iprot_factory, oprot_factory=None):
self.client_class = client_class
self.iprot_factory = iprot_factory
if oprot_factory is None:
self.oprot_factory = iprot_factory
else:
self.oprot_factory = oprot_factory
def buildProtocol(self, addr):
p = self.protocol(self.client_class, self.iprot_factory,
self.oprot_factory)
p.factory = self
return p
class ThriftResource(resource.Resource):
allowedMethods = ('POST',)
def __init__(self, processor, inputProtocolFactory,
outputProtocolFactory=None):
resource.Resource.__init__(self)
self.inputProtocolFactory = inputProtocolFactory
if outputProtocolFactory is None:
self.outputProtocolFactory = inputProtocolFactory
else:
self.outputProtocolFactory = outputProtocolFactory
self.processor = processor
def getChild(self, path, request):
return self
def _cbProcess(self, _, request, tmo):
msg = tmo.getvalue()
request.setResponseCode(http.OK)
request.setHeader("content-type", "application/x-thrift")
request.write(msg)
request.finish()
def render_POST(self, request):
request.content.seek(0, 0)
data = request.content.read()
tmi = TTransport.TMemoryBuffer(data)
tmo = TTransport.TMemoryBuffer()
iprot = self.inputProtocolFactory.getProtocol(tmi)
oprot = self.outputProtocolFactory.getProtocol(tmo)
d = self.processor.process(iprot, oprot)
d.addCallback(self._cbProcess, request, tmo)
return server.NOT_DONE_YET
| apache-2.0 |
jainanisha90/WeVoteServer | office/views_admin.py | 1 | 57667 | # office/views_admin.py
# Brought to you by We Vote. Be good.
# -*- coding: UTF-8 -*-
from .controllers import add_contest_office_name_to_next_spot, fetch_duplicate_office_count, \
find_duplicate_contest_office, figure_out_office_conflict_values, merge_if_duplicate_offices, \
offices_import_from_master_server
from .models import ContestOffice, ContestOfficeListManager, ContestOfficeManager, CONTEST_OFFICE_UNIQUE_IDENTIFIERS
from admin_tools.views import redirect_to_sign_in_page
from ballot.controllers import move_ballot_items_to_another_office
from bookmark.models import BookmarkItemList
from candidate.controllers import move_candidates_to_another_office
from candidate.models import CandidateCampaign, CandidateCampaignListManager, fetch_candidate_count_for_office
from config.base import get_environment_variable
from django.http import HttpResponseRedirect
from django.core.urlresolvers import reverse
from django.contrib import messages
from django.contrib.auth.decorators import login_required
from django.contrib.messages import get_messages
from django.shortcuts import render
from django.db.models import Q
from election.models import Election, ElectionManager
from exception.models import handle_record_found_more_than_one_exception,\
handle_record_not_found_exception, handle_record_not_saved_exception, print_to_log
from position.controllers import move_positions_to_another_office
from position.models import OPPOSE, PositionListManager, SUPPORT
from voter.models import voter_has_authority
import wevote_functions.admin
from wevote_functions.functions import convert_to_int, positive_value_exists, STATE_CODE_MAP
from django.http import HttpResponse
import json
OFFICES_SYNC_URL = get_environment_variable("OFFICES_SYNC_URL") # officesSyncOut
WE_VOTE_SERVER_ROOT_URL = get_environment_variable("WE_VOTE_SERVER_ROOT_URL")
logger = wevote_functions.admin.get_logger(__name__)
@login_required
def compare_two_offices_for_merge_view(request):
# admin, partner_organization, political_data_manager, political_data_viewer, verified_volunteer
authority_required = {'political_data_manager'}
if not voter_has_authority(request, authority_required):
return redirect_to_sign_in_page(request, authority_required)
contest_office1_we_vote_id = request.GET.get('contest_office1_we_vote_id', 0)
contest_office2_we_vote_id = request.GET.get('contest_office2_we_vote_id', 0)
google_civic_election_id = request.GET.get('google_civic_election_id', 0)
google_civic_election_id = convert_to_int(google_civic_election_id)
contest_office_manager = ContestOfficeManager()
contest_office_results = contest_office_manager.retrieve_contest_office_from_we_vote_id(contest_office1_we_vote_id)
if not contest_office_results['contest_office_found']:
messages.add_message(request, messages.ERROR, "Contest Office1 not found.")
return HttpResponseRedirect(reverse('office:office_list', args=()) +
"?google_civic_election_id=" + str(google_civic_election_id))
contest_office_option1_for_template = contest_office_results['contest_office']
contest_office_results = contest_office_manager.retrieve_contest_office_from_we_vote_id(contest_office2_we_vote_id)
if not contest_office_results['contest_office_found']:
messages.add_message(request, messages.ERROR, "Contest Office2 not found.")
return HttpResponseRedirect(reverse('office:office_summary', args=(contest_office_option1_for_template.id,)) +
"?google_civic_election_id=" + str(google_civic_election_id))
contest_office_option2_for_template = contest_office_results['contest_office']
contest_office_merge_conflict_values = figure_out_office_conflict_values(
contest_office_option1_for_template, contest_office_option2_for_template)
# This view function takes us to displaying a template
remove_duplicate_process = False # Do not try to find another office to merge after finishing
return render_contest_office_merge_form(request, contest_office_option1_for_template,
contest_office_option2_for_template,
contest_office_merge_conflict_values,
remove_duplicate_process)
# This page does not need to be protected.
# NOTE: @login_required() throws an error. Needs to be figured out if we ever want to secure this page.
# class OfficesSyncOutView(APIView):
# def get(self, request, format=None):
def offices_sync_out_view(request): # officesSyncOut
google_civic_election_id = convert_to_int(request.GET.get('google_civic_election_id', 0))
state_code = request.GET.get('state_code', '')
try:
contest_office_list = ContestOffice.objects.using('readonly').all()
if positive_value_exists(google_civic_election_id):
contest_office_list = contest_office_list.filter(google_civic_election_id=google_civic_election_id)
if positive_value_exists(state_code):
contest_office_list = contest_office_list.filter(state_code__iexact=state_code)
# serializer = ContestOfficeSerializer(contest_office_list, many=True)
# return Response(serializer.data)
# get the data using values_list
contest_office_list_dict = contest_office_list.values('we_vote_id', 'office_name', 'google_civic_election_id',
'ocd_division_id', 'maplight_id',
'ballotpedia_id', 'ballotpedia_office_id',
'ballotpedia_office_name', 'ballotpedia_office_url',
'ballotpedia_race_id', 'ballotpedia_race_office_level',
'google_ballot_placement',
'google_civic_office_name', 'google_civic_office_name2',
'google_civic_office_name3', 'google_civic_office_name4',
'google_civic_office_name5',
'wikipedia_id', 'number_voting_for', 'number_elected',
'state_code', 'primary_party', 'district_name',
'district_scope', 'district_id', 'contest_level0',
'contest_level1', 'contest_level2',
'electorate_specifications', 'special', 'state_code')
if contest_office_list_dict:
contest_office_list_json = list(contest_office_list_dict)
return HttpResponse(json.dumps(contest_office_list_json), content_type='application/json')
except ContestOffice.DoesNotExist:
pass
json_data = {
'success': False,
'status': 'CONTEST_OFFICE_MISSING'
}
return HttpResponse(json.dumps(json_data), content_type='application/json')
@login_required
def offices_import_from_master_server_view(request):
# admin, partner_organization, political_data_manager, political_data_viewer, verified_volunteer
authority_required = {'admin'}
if not voter_has_authority(request, authority_required):
return redirect_to_sign_in_page(request, authority_required)
if WE_VOTE_SERVER_ROOT_URL in OFFICES_SYNC_URL:
messages.add_message(request, messages.ERROR, "Cannot sync with Master We Vote Server -- "
"this is the Master We Vote Server.")
return HttpResponseRedirect(reverse('admin_tools:admin_home', args=()))
google_civic_election_id = convert_to_int(request.GET.get('google_civic_election_id', 0))
state_code = request.GET.get('state_code', '')
results = offices_import_from_master_server(request, google_civic_election_id, state_code)
if not results['success']:
messages.add_message(request, messages.ERROR, results['status'])
else:
messages.add_message(request, messages.INFO, 'Offices import completed. '
'Saved: {saved}, Updated: {updated}, '
'Duplicates skipped: '
'{duplicates_removed}, '
'Not processed: {not_processed}'
''.format(saved=results['saved'],
updated=results['updated'],
duplicates_removed=results['duplicates_removed'],
not_processed=results['not_processed']))
return HttpResponseRedirect(reverse('admin_tools:sync_dashboard', args=()) + "?google_civic_election_id=" +
str(google_civic_election_id) + "&state_code=" + str(state_code))
@login_required
def office_list_view(request):
# admin, partner_organization, political_data_manager, political_data_viewer, verified_volunteer
authority_required = {'partner_organization', 'verified_volunteer'}
if not voter_has_authority(request, authority_required):
return redirect_to_sign_in_page(request, authority_required)
status = ""
google_civic_election_id = convert_to_int(request.GET.get('google_civic_election_id', 0))
state_code = request.GET.get('state_code', '')
show_all_elections = request.GET.get('show_all_elections', False)
office_search = request.GET.get('office_search', '')
office_list_found = False
office_list = []
updated_office_list = []
office_list_count = 0
try:
office_queryset = ContestOffice.objects.all()
if positive_value_exists(google_civic_election_id):
office_queryset = office_queryset.filter(google_civic_election_id=google_civic_election_id)
else:
# TODO Limit this search to upcoming_elections only
pass
if positive_value_exists(state_code):
office_queryset = office_queryset.filter(state_code__iexact=state_code)
office_queryset = office_queryset.order_by("office_name")
if positive_value_exists(office_search):
search_words = office_search.split()
for one_word in search_words:
filters = [] # Reset for each search word
new_filter = Q(office_name__icontains=one_word)
filters.append(new_filter)
new_filter = Q(we_vote_id__icontains=one_word)
filters.append(new_filter)
new_filter = Q(wikipedia_id__icontains=one_word)
filters.append(new_filter)
new_filter = Q(ballotpedia_race_id__iexact=one_word)
filters.append(new_filter)
# Add the first query
if len(filters):
final_filters = filters.pop()
# ...and "OR" the remaining items in the list
for item in filters:
final_filters |= item
office_queryset = office_queryset.filter(final_filters)
office_list_count = office_queryset.count()
office_queryset = office_queryset[:500]
office_list = list(office_queryset)
if len(office_list):
office_list_found = True
status += 'OFFICES_RETRIEVED '
success = True
else:
status += 'NO_OFFICES_RETRIEVED '
success = True
except ContestOffice.DoesNotExist:
# No offices found. Not a problem.
status += 'NO_OFFICES_FOUND_DoesNotExist '
office_list = []
success = True
except Exception as e:
status += 'FAILED retrieve_all_offices_for_upcoming_election ' \
'{error} [type: {error_type}]'.format(error=e, error_type=type(e)) + " "
success = False
if office_list_found:
position_list_manager = PositionListManager()
for office in office_list:
office.candidate_count = fetch_candidate_count_for_office(office.id)
office.positions_count = position_list_manager.fetch_public_positions_count_for_contest_office(
office.id, office.we_vote_id)
updated_office_list.append(office)
election_manager = ElectionManager()
if positive_value_exists(show_all_elections):
results = election_manager.retrieve_elections()
election_list = results['election_list']
else:
results = election_manager.retrieve_upcoming_elections()
election_list = results['election_list']
# Make sure we always include the current election in the election_list, even if it is older
if positive_value_exists(google_civic_election_id):
this_election_found = False
for one_election in election_list:
if convert_to_int(one_election.google_civic_election_id) == convert_to_int(google_civic_election_id):
this_election_found = True
break
if not this_election_found:
results = election_manager.retrieve_election(google_civic_election_id)
if results['election_found']:
election = results['election']
election_list.append(election)
state_list = STATE_CODE_MAP
sorted_state_list = sorted(state_list.items())
status_print_list = ""
status_print_list += "office_list_count: " + \
str(office_list_count) + " "
messages.add_message(request, messages.INFO, status_print_list)
messages_on_stage = get_messages(request)
template_values = {
'messages_on_stage': messages_on_stage,
'office_list': updated_office_list,
'office_search': office_search,
'election_list': election_list,
'state_code': state_code,
'show_all_elections': show_all_elections,
'state_list': sorted_state_list,
'google_civic_election_id': google_civic_election_id,
}
return render(request, 'office/office_list.html', template_values)
@login_required
def office_new_view(request):
authority_required = {'verified_volunteer'} # admin, verified_volunteer
if not voter_has_authority(request, authority_required):
return redirect_to_sign_in_page(request, authority_required)
google_civic_election_id = request.GET.get('google_civic_election_id', 0)
state_code = request.GET.get('state_code', "")
office_list_manager = ContestOfficeListManager()
updated_office_list = []
results = office_list_manager.retrieve_all_offices_for_upcoming_election(google_civic_election_id, state_code, True)
if results['office_list_found']:
office_list = results['office_list_objects']
for office in office_list:
office.candidate_count = fetch_candidate_count_for_office(office.id)
updated_office_list.append(office)
messages_on_stage = get_messages(request)
template_values = {
'messages_on_stage': messages_on_stage,
'google_civic_election_id': google_civic_election_id,
'office_list': updated_office_list,
}
return render(request, 'office/office_edit.html', template_values)
@login_required
def office_edit_view(request, office_id=0, contest_office_we_vote_id=""):
authority_required = {'verified_volunteer'} # admin, verified_volunteer
if not voter_has_authority(request, authority_required):
return redirect_to_sign_in_page(request, authority_required)
messages_on_stage = get_messages(request)
office_id = convert_to_int(office_id)
google_civic_election_id = request.GET.get('google_civic_election_id', 0)
office_on_stage = ContestOffice()
office_on_stage_found = False
try:
if positive_value_exists(office_id):
office_on_stage = ContestOffice.objects.get(id=office_id)
else:
office_on_stage = ContestOffice.objects.get(we_vote_id=contest_office_we_vote_id)
office_on_stage_found = True
except ContestOffice.MultipleObjectsReturned as e:
handle_record_found_more_than_one_exception(e, logger=logger)
except ContestOffice.DoesNotExist:
# This is fine, create new
pass
if office_on_stage_found:
# Was a contest_office_merge_possibility_found?
office_on_stage.contest_office_merge_possibility_found = True # TODO DALE Make dynamic
template_values = {
'messages_on_stage': messages_on_stage,
'office': office_on_stage,
'google_civic_election_id': google_civic_election_id,
}
else:
template_values = {
'messages_on_stage': messages_on_stage,
'google_civic_election_id': google_civic_election_id,
}
return render(request, 'office/office_edit.html', template_values)
@login_required
def office_edit_process_view(request):
"""
Process the new or edit office forms
:param request:
:return:
"""
authority_required = {'verified_volunteer'} # admin, verified_volunteer
if not voter_has_authority(request, authority_required):
return redirect_to_sign_in_page(request, authority_required)
office_id = convert_to_int(request.POST.get('office_id', 0))
office_name = request.POST.get('office_name', False)
google_civic_office_name = request.POST.get('google_civic_office_name', False)
google_civic_office_name2 = request.POST.get('google_civic_office_name2', False)
google_civic_office_name3 = request.POST.get('google_civic_office_name3', False)
google_civic_office_name4 = request.POST.get('google_civic_office_name4', False)
google_civic_office_name5 = request.POST.get('google_civic_office_name5', False)
google_civic_election_id = request.POST.get('google_civic_election_id', 0)
ocd_division_id = request.POST.get('ocd_division_id', False)
primary_party = request.POST.get('primary_party', False)
state_code = request.POST.get('state_code', False)
ballotpedia_office_id = request.POST.get('ballotpedia_office_id', False) # Related to elected_office
ballotpedia_race_id = request.POST.get('ballotpedia_race_id', False) # Related to contest_office
ballotpedia_office_name = request.POST.get('ballotpedia_office_name', False)
remove_duplicate_process = request.POST.get('remove_duplicate_process', False)
redirect_to_contest_office_list = convert_to_int(request.POST.get('redirect_to_contest_office_list', 0))
election_state = ''
if state_code is not False:
election_state = state_code
elif google_civic_election_id:
election_manager = ElectionManager()
results = election_manager.retrieve_election(google_civic_election_id)
if results['election_found']:
election = results['election']
election_state = election.get_election_state()
# Check to see if this office is already in the database
office_on_stage_found = False
try:
office_query = ContestOffice.objects.filter(id=office_id)
if len(office_query):
office_on_stage = office_query[0]
office_on_stage_found = True
except Exception as e:
handle_record_not_found_exception(e, logger=logger)
try:
if office_on_stage_found:
# Update
# Removed for now: convert_to_int(office_on_stage.google_civic_election_id) >= 1000000 and
if office_name is not False:
office_on_stage.office_name = office_name
if google_civic_office_name is not False:
office_on_stage.google_civic_office_name = google_civic_office_name
if google_civic_office_name2 is not False:
office_on_stage.google_civic_office_name2 = google_civic_office_name2
if google_civic_office_name3 is not False:
office_on_stage.google_civic_office_name3 = google_civic_office_name3
if google_civic_office_name4 is not False:
office_on_stage.google_civic_office_name4 = google_civic_office_name4
if google_civic_office_name5 is not False:
office_on_stage.google_civic_office_name5 = google_civic_office_name5
if ocd_division_id is not False:
office_on_stage.ocd_division_id = ocd_division_id
if primary_party is not False:
office_on_stage.primary_party = primary_party
if positive_value_exists(election_state):
office_on_stage.state_code = election_state
if ballotpedia_office_id is not False:
office_on_stage.ballotpedia_office_id = convert_to_int(ballotpedia_office_id)
if ballotpedia_office_name is not False:
office_on_stage.ballotpedia_office_name = ballotpedia_office_name
if ballotpedia_race_id is not False:
office_on_stage.ballotpedia_race_id = convert_to_int(ballotpedia_race_id)
office_on_stage.save()
office_on_stage_id = office_on_stage.id
messages.add_message(request, messages.INFO, 'Office updated.')
google_civic_election_id = office_on_stage.google_civic_election_id
return HttpResponseRedirect(reverse('office:office_summary', args=(office_on_stage_id,)) +
"?google_civic_election_id=" + str(google_civic_election_id) +
"&state_code=" + str(state_code))
else:
# Create new
office_on_stage = ContestOffice(
office_name=office_name,
google_civic_election_id=google_civic_election_id,
state_code=election_state,
)
# Removing this limitation: convert_to_int(office_on_stage.google_civic_election_id) >= 1000000 and
if google_civic_office_name is not False:
office_on_stage.google_civic_office_name = google_civic_office_name
if ocd_division_id is not False:
office_on_stage.ocd_division_id = ocd_division_id
if primary_party is not False:
office_on_stage.primary_party = primary_party
if ballotpedia_office_id is not False:
office_on_stage.ballotpedia_office_id = convert_to_int(ballotpedia_office_id)
if ballotpedia_office_name is not False:
office_on_stage.ballotpedia_office_name = ballotpedia_office_name
if ballotpedia_race_id is not False:
office_on_stage.ballotpedia_race_id = convert_to_int(ballotpedia_race_id)
office_on_stage.save()
messages.add_message(request, messages.INFO, 'New office saved.')
# Come back to the "Create New Office" page
return HttpResponseRedirect(reverse('office:office_new', args=()) +
"?google_civic_election_id=" + str(google_civic_election_id) +
"&state_code=" + str(state_code))
except Exception as e:
handle_record_not_saved_exception(e, logger=logger)
messages.add_message(request, messages.ERROR, 'Could not save office.')
if redirect_to_contest_office_list:
return HttpResponseRedirect(reverse('office:office_list', args=()) +
'?google_civic_election_id=' + str(google_civic_election_id) +
'&state_code=' + str(state_code))
if remove_duplicate_process:
return HttpResponseRedirect(reverse('office:find_and_merge_duplicate_offices', args=()) +
"?google_civic_election_id=" + str(google_civic_election_id) +
"&state_code=" + str(state_code))
else:
return HttpResponseRedirect(reverse('office:office_edit', args=(office_id,)))
@login_required
def office_summary_view(request, office_id):
# admin, partner_organization, political_data_manager, political_data_viewer, verified_volunteer
authority_required = {'partner_organization', 'verified_volunteer'}
if not voter_has_authority(request, authority_required):
return redirect_to_sign_in_page(request, authority_required)
messages_on_stage = get_messages(request)
office_id = convert_to_int(office_id)
office_we_vote_id = ""
contest_office_found = False
google_civic_election_id = convert_to_int(request.GET.get('google_civic_election_id', 0))
state_code = request.GET.get('state_code', "")
office_search = request.GET.get('office_search', "")
try:
contest_office = ContestOffice.objects.get(id=office_id)
contest_office_found = True
office_we_vote_id = contest_office.we_vote_id
google_civic_election_id = contest_office.google_civic_election_id
except ContestOffice.MultipleObjectsReturned as e:
handle_record_found_more_than_one_exception(e, logger=logger)
except ContestOffice.DoesNotExist:
# This is fine, create new
pass
candidate_list_modified = []
position_list_manager = PositionListManager()
# Cache the full names of candidates for the root contest_office so we can check to see if possible duplicate
# offices share the same candidates
root_office_candidate_last_names = ""
try:
candidate_list = CandidateCampaign.objects.filter(contest_office_id=office_id)
if positive_value_exists(google_civic_election_id):
candidate_list = candidate_list.filter(google_civic_election_id=google_civic_election_id)
candidate_list = candidate_list.order_by('candidate_name')
support_total = 0
for one_candidate in candidate_list:
# Find the count of Voters that support this candidate (Organizations are not included in this)
one_candidate.support_count = position_list_manager.fetch_voter_positions_count_for_candidate_campaign(
one_candidate.id, "", SUPPORT)
one_candidate.oppose_count = position_list_manager.fetch_voter_positions_count_for_candidate_campaign(
one_candidate.id, "", OPPOSE)
support_total += one_candidate.support_count
root_office_candidate_last_names += " " + one_candidate.extract_last_name()
for one_candidate in candidate_list:
if positive_value_exists(support_total):
percentage_of_support_number = one_candidate.support_count / support_total * 100
one_candidate.percentage_of_support = "%.1f" % percentage_of_support_number
candidate_list_modified.append(one_candidate)
except CandidateCampaign.DoesNotExist:
# This is fine, create new
pass
root_office_candidate_last_names = root_office_candidate_last_names.lower()
election_list = Election.objects.order_by('-election_day_text')
if positive_value_exists(google_civic_election_id):
election = Election.objects.get(google_civic_election_id=google_civic_election_id)
office_search_results_list = []
if positive_value_exists(office_search):
office_queryset = ContestOffice.objects.all()
office_queryset = office_queryset.filter(google_civic_election_id=google_civic_election_id)
office_queryset = office_queryset.exclude(we_vote_id__iexact=office_we_vote_id)
if positive_value_exists(state_code):
office_queryset = office_queryset.filter(state_code__iexact=state_code)
search_words = office_search.split()
for one_word in search_words:
filters = [] # Reset for each search word
new_filter = Q(office_name__icontains=one_word)
filters.append(new_filter)
new_filter = Q(we_vote_id__icontains=one_word)
filters.append(new_filter)
new_filter = Q(wikipedia_id__icontains=one_word)
filters.append(new_filter)
# Add the first query
if len(filters):
final_filters = filters.pop()
# ...and "OR" the remaining items in the list
for item in filters:
final_filters |= item
office_queryset = office_queryset.filter(final_filters)
office_search_results_list = list(office_queryset)
elif contest_office_found:
ignore_office_we_vote_id_list = []
ignore_office_we_vote_id_list.append(contest_office.we_vote_id)
results = find_duplicate_contest_office(contest_office, ignore_office_we_vote_id_list)
if results['contest_office_merge_possibility_found']:
office_search_results_list = results['contest_office_list']
# Show the candidates under each office
candidate_list_read_only = True
candidate_list_manager = CandidateCampaignListManager()
office_search_results_list_modified = []
for one_office in office_search_results_list:
office_id = 0
if positive_value_exists(one_office.we_vote_id):
contest_office_option1_results = candidate_list_manager.retrieve_all_candidates_for_office(
office_id, one_office.we_vote_id, candidate_list_read_only)
if contest_office_option1_results['candidate_list_found']:
one_office.candidates_string = ""
candidate_list = contest_office_option1_results['candidate_list']
for one_candidate in candidate_list:
one_office.candidates_string += one_candidate.display_candidate_name() + ", "
candidate_last_name = one_candidate.extract_last_name()
candidate_last_name_lower = candidate_last_name.lower()
if candidate_last_name_lower in root_office_candidate_last_names:
one_office.candidates_match_root_office = True
office_search_results_list_modified.append(one_office)
if contest_office_found:
template_values = {
'messages_on_stage': messages_on_stage,
'office': contest_office,
'candidate_list': candidate_list_modified,
'state_code': state_code,
'election': election,
'election_list': election_list,
'office_search': office_search,
'office_search_results_list': office_search_results_list_modified,
'google_civic_election_id': google_civic_election_id,
}
else:
template_values = {
'messages_on_stage': messages_on_stage,
}
return render(request, 'office/office_summary.html', template_values)
@login_required
def office_delete_process_view(request):
authority_required = {'verified_volunteer'} # admin, verified_volunteer
if not voter_has_authority(request, authority_required):
return redirect_to_sign_in_page(request, authority_required)
office_id = convert_to_int(request.GET.get('office_id', 0))
google_civic_election_id = convert_to_int(request.GET.get('google_civic_election_id', 0))
office_on_stage_found = False
office_on_stage = ContestOffice()
try:
office_on_stage = ContestOffice.objects.get(id=office_id)
office_on_stage_found = True
google_civic_election_id = office_on_stage.google_civic_election_id
except ContestOffice.MultipleObjectsReturned as e:
pass
except ContestOffice.DoesNotExist:
pass
candidates_found_for_this_office = False
if office_on_stage_found:
try:
candidate_list = CandidateCampaign.objects.filter(contest_office_id=office_id)
# if positive_value_exists(google_civic_election_id):
# candidate_list = candidate_list.filter(google_civic_election_id=google_civic_election_id)
candidate_list = candidate_list.order_by('candidate_name')
if len(candidate_list):
candidates_found_for_this_office = True
except CandidateCampaign.DoesNotExist:
pass
try:
if not candidates_found_for_this_office:
# Delete the office
office_on_stage.delete()
messages.add_message(request, messages.INFO, 'Office deleted.')
else:
messages.add_message(request, messages.ERROR, 'Could not delete -- '
'candidates still attached to this office.')
return HttpResponseRedirect(reverse('office:office_summary', args=(office_id,)))
except Exception as e:
messages.add_message(request, messages.ERROR, 'Could not delete office -- exception.')
return HttpResponseRedirect(reverse('office:office_summary', args=(office_id,)))
return HttpResponseRedirect(reverse('office:office_list', args=()) +
"?google_civic_election_id=" + str(google_civic_election_id))
@login_required
def find_duplicate_office_view(request, office_id=0):
# admin, partner_organization, political_data_manager, political_data_viewer, verified_volunteer
authority_required = {'political_data_manager'}
if not voter_has_authority(request, authority_required):
return redirect_to_sign_in_page(request, authority_required)
office_list = []
number_of_duplicate_contest_offices_processed = 0
number_of_duplicate_contest_offices_failed = 0
number_of_duplicates_could_not_process = 0
google_civic_election_id = request.GET.get('google_civic_election_id', 0)
google_civic_election_id = convert_to_int(google_civic_election_id)
contest_office_manager = ContestOfficeManager()
contest_office_results = contest_office_manager.retrieve_contest_office_from_id(office_id)
if not contest_office_results['contest_office_found']:
messages.add_message(request, messages.ERROR, "Contest Office not found.")
return HttpResponseRedirect(reverse('office:office_list', args=()) +
"?google_civic_election_id=" + str(google_civic_election_id))
contest_office = contest_office_results['contest_office']
if not positive_value_exists(google_civic_election_id):
messages.add_message(request, messages.ERROR,
"Contest Office must have a google_civic_election_id in order to merge.")
return HttpResponseRedirect(reverse('office:office_edit', args=(office_id,)))
ignore_office_we_vote_id_list = []
ignore_office_we_vote_id_list.append(contest_office.we_vote_id)
results = find_duplicate_contest_office(contest_office, ignore_office_we_vote_id_list)
# If we find contest offices to merge, stop and ask for confirmation
if results['contest_office_merge_possibility_found']:
contest_office_option1_for_template = contest_office
contest_office_option2_for_template = results['contest_office_merge_possibility']
# This view function takes us to displaying a template
remove_duplicate_process = True # Try to find another office to merge after finishing
return render_contest_office_merge_form(request, contest_office_option1_for_template,
contest_office_option2_for_template,
results['contest_office_merge_conflict_values'],
remove_duplicate_process)
message = "Google Civic Election ID: {election_id}, " \
"{number_of_duplicate_contest_offices_processed} duplicates processed, " \
"{number_of_duplicate_contest_offices_failed} duplicate merges failed, " \
"{number_of_duplicates_could_not_process} could not be processed because 3 exist " \
"".format(election_id=google_civic_election_id,
number_of_duplicate_contest_offices_processed=number_of_duplicate_contest_offices_processed,
number_of_duplicate_contest_offices_failed=number_of_duplicate_contest_offices_failed,
number_of_duplicates_could_not_process=number_of_duplicates_could_not_process)
messages.add_message(request, messages.INFO, message)
return HttpResponseRedirect(reverse('office:office_edit', args=(office_id,)) +
"?google_civic_election_id={var}".format(
var=google_civic_election_id))
@login_required
def find_and_merge_duplicate_offices_view(request):
# admin, partner_organization, political_data_manager, political_data_viewer, verified_volunteer
authority_required = {'political_data_manager'}
if not voter_has_authority(request, authority_required):
return redirect_to_sign_in_page(request, authority_required)
contest_office_list = []
ignore_office_we_vote_id_list = []
find_number_of_duplicates = request.GET.get('find_number_of_duplicates', 0)
google_civic_election_id = request.GET.get('google_civic_election_id', 0)
google_civic_election_id = convert_to_int(google_civic_election_id)
state_code = request.GET.get('state_code', "")
contest_office_manager = ContestOfficeManager()
# We only want to process if a google_civic_election_id comes in
if not positive_value_exists(google_civic_election_id):
messages.add_message(request, messages.ERROR, "Google Civic Election ID required.")
return HttpResponseRedirect(reverse('office:office_list', args=()))
try:
# We sort by ID so that the entry which was saved first becomes the "master"
contest_office_query = ContestOffice.objects.order_by('id')
contest_office_query = contest_office_query.filter(google_civic_election_id=google_civic_election_id)
contest_office_list = list(contest_office_query)
except ContestOffice.DoesNotExist:
pass
# Loop through all of the offices in this election to see how many have possible duplicates
if positive_value_exists(find_number_of_duplicates):
duplicate_office_count = 0
for contest_office in contest_office_list:
# Note that we don't reset the ignore_office_we_vote_id_list, so we don't search for a duplicate
# both directions
ignore_office_we_vote_id_list.append(contest_office.we_vote_id)
duplicate_office_count_temp = fetch_duplicate_office_count(contest_office,
ignore_office_we_vote_id_list)
duplicate_office_count += duplicate_office_count_temp
if positive_value_exists(duplicate_office_count):
messages.add_message(request, messages.INFO, "There are approximately {duplicate_office_count} "
"possible duplicates."
"".format(duplicate_office_count=duplicate_office_count))
# Loop through all of the contest offices in this election
ignore_office_we_vote_id_list = []
for contest_office in contest_office_list:
# Add current contest office entry to the ignore list
ignore_office_we_vote_id_list.append(contest_office.we_vote_id)
# Now check to for other contest offices we have labeled as "not a duplicate"
not_a_duplicate_list = contest_office_manager.fetch_offices_are_not_duplicates_list_we_vote_ids(
contest_office.we_vote_id)
ignore_office_we_vote_id_list += not_a_duplicate_list
results = find_duplicate_contest_office(contest_office, ignore_office_we_vote_id_list)
ignore_office_we_vote_id_list = []
# If we find contest offices to merge, stop and ask for confirmation
if results['contest_office_merge_possibility_found']:
contest_office_option1_for_template = contest_office
contest_office_option2_for_template = results['contest_office_merge_possibility']
# Can we automatically merge these offices?
merge_results = merge_if_duplicate_offices(
contest_office_option1_for_template, contest_office_option2_for_template,
results['contest_office_merge_conflict_values'])
if merge_results['offices_merged']:
office = merge_results['office']
message = "Office '{office_name}' automatically merged.".format(office_name=office.office_name)
# print_to_log(logger, exception_message_optional=message)
print(message)
# try:
# messages.add_message(request, messages.INFO, "Office {office_name} automatically merged."
# "".format(office_name=office.office_name))
# except Exception as e:
# pass
return HttpResponseRedirect(reverse('office:find_and_merge_duplicate_offices', args=()) +
"?google_civic_election_id=" + str(google_civic_election_id) +
"&state_code=" + str(state_code))
else:
if merge_results['success'] is False:
messages.add_message(request, messages.INFO, "AUTO_MERGE_ATTEMPT_FAILED: {status} "
"".format(status=merge_results['status']))
# This view function takes us to displaying a template
remove_duplicate_process = True # Try to find another office to merge after finishing
return render_contest_office_merge_form(request, contest_office_option1_for_template,
contest_office_option2_for_template,
results['contest_office_merge_conflict_values'],
remove_duplicate_process)
message = "Google Civic Election ID: {election_id}, " \
"No duplicate contest offices found for this election." \
"".format(election_id=google_civic_election_id)
messages.add_message(request, messages.INFO, message)
return HttpResponseRedirect(reverse('office:office_list', args=()) + "?google_civic_election_id={var}"
"".format(var=google_civic_election_id))
def render_contest_office_merge_form(
request, contest_office_option1_for_template, contest_office_option2_for_template,
contest_office_merge_conflict_values, remove_duplicate_process=True):
position_list_manager = PositionListManager()
bookmark_item_list_manager = BookmarkItemList()
# Get positions counts for both offices
contest_office_option1_for_template.public_positions_count = \
position_list_manager.fetch_public_positions_count_for_contest_office(
contest_office_option1_for_template.id, contest_office_option1_for_template.we_vote_id)
contest_office_option1_for_template.friends_positions_count = \
position_list_manager.fetch_friends_only_positions_count_for_contest_office(
contest_office_option1_for_template.id, contest_office_option1_for_template.we_vote_id)
# Bookmarks for option 1
bookmark_results1 = bookmark_item_list_manager.retrieve_bookmark_item_list_for_contest_office(
contest_office_option1_for_template.we_vote_id)
if bookmark_results1['bookmark_item_list_found']:
bookmark_item_list = bookmark_results1['bookmark_item_list']
contest_office_option1_bookmark_count = len(bookmark_item_list)
else:
contest_office_option1_bookmark_count = 0
contest_office_option1_for_template.bookmarks_count = contest_office_option1_bookmark_count
contest_office_option2_for_template.public_positions_count = \
position_list_manager.fetch_public_positions_count_for_contest_office(
contest_office_option2_for_template.id, contest_office_option2_for_template.we_vote_id)
contest_office_option2_for_template.friends_positions_count = \
position_list_manager.fetch_friends_only_positions_count_for_contest_office(
contest_office_option2_for_template.id, contest_office_option2_for_template.we_vote_id)
# Bookmarks for option 2
bookmark_results2 = bookmark_item_list_manager.retrieve_bookmark_item_list_for_contest_office(
contest_office_option2_for_template.we_vote_id)
if bookmark_results2['bookmark_item_list_found']:
bookmark_item_list = bookmark_results2['bookmark_item_list']
contest_office_option2_bookmark_count = len(bookmark_item_list)
else:
contest_office_option2_bookmark_count = 0
contest_office_option2_for_template.bookmarks_count = contest_office_option2_bookmark_count
# Show the candidates under each office
office_id = 0
candidate_list_read_only = True
candidate_list_manager = CandidateCampaignListManager()
if positive_value_exists(contest_office_option1_for_template.we_vote_id):
contest_office_option1_results = candidate_list_manager.retrieve_all_candidates_for_office(
office_id, contest_office_option1_for_template.we_vote_id, candidate_list_read_only)
if contest_office_option1_results['candidate_list_found']:
contest_office_option1_for_template.candidates_string = ""
candidate_list = contest_office_option1_results['candidate_list']
for one_candidate in candidate_list:
contest_office_option1_for_template.candidates_string += one_candidate.display_candidate_name() + ", "
if positive_value_exists(contest_office_option2_for_template.we_vote_id):
contest_office_option2_results = candidate_list_manager.retrieve_all_candidates_for_office(
office_id, contest_office_option2_for_template.we_vote_id, candidate_list_read_only)
if contest_office_option2_results['candidate_list_found']:
contest_office_option2_for_template.candidates_string = ""
candidate_list = contest_office_option2_results['candidate_list']
for one_candidate in candidate_list:
contest_office_option2_for_template.candidates_string += one_candidate.display_candidate_name() + ", "
messages_on_stage = get_messages(request)
template_values = {
'messages_on_stage': messages_on_stage,
'contest_office_option1': contest_office_option1_for_template,
'contest_office_option2': contest_office_option2_for_template,
'conflict_values': contest_office_merge_conflict_values,
'google_civic_election_id': contest_office_option1_for_template.google_civic_election_id,
'remove_duplicate_process': remove_duplicate_process,
}
return render(request, 'office/office_merge.html', template_values)
@login_required
def office_merge_process_view(request):
"""
Process the merging of two offices. Note this is similar to office/controllers.py "merge_these_two_offices"
:param request:
:return:
"""
authority_required = {'verified_volunteer'} # admin, verified_volunteer
if not voter_has_authority(request, authority_required):
return redirect_to_sign_in_page(request, authority_required)
contest_office_manager = ContestOfficeManager()
merge = request.POST.get('merge', False)
skip = request.POST.get('skip', False)
# Contest office 1 is the one we keep, and Contest office 2 is the one we will merge into Contest office 1
contest_office1_we_vote_id = request.POST.get('contest_office1_we_vote_id', 0)
contest_office2_we_vote_id = request.POST.get('contest_office2_we_vote_id', 0)
google_civic_election_id = request.POST.get('google_civic_election_id', 0)
redirect_to_contest_office_list = positive_value_exists(request.POST.get('redirect_to_contest_office_list', False))
remove_duplicate_process = positive_value_exists(request.POST.get('remove_duplicate_process', False))
state_code = request.POST.get('state_code', '')
if positive_value_exists(skip):
results = contest_office_manager.update_or_create_contest_offices_are_not_duplicates(
contest_office1_we_vote_id, contest_office2_we_vote_id)
if not results['new_contest_offices_are_not_duplicates_created']:
messages.add_message(request, messages.ERROR, 'Could not save contest_offices_are_not_duplicates entry: ' +
results['status'])
messages.add_message(request, messages.INFO, 'Prior contest offices skipped, and not merged.')
return HttpResponseRedirect(reverse('office:find_and_merge_duplicate_offices', args=()) +
"?google_civic_election_id=" + str(google_civic_election_id) +
"&state_code=" + str(state_code))
contest_office1_results = contest_office_manager.retrieve_contest_office_from_we_vote_id(contest_office1_we_vote_id)
if contest_office1_results['contest_office_found']:
contest_office1_on_stage = contest_office1_results['contest_office']
contest_office1_id = contest_office1_on_stage.id
else:
messages.add_message(request, messages.ERROR, 'Could not retrieve office 1.')
return HttpResponseRedirect(reverse('office:office_list', args=()) +
'?google_civic_election_id=' + str(google_civic_election_id) +
'&state_code=' + str(state_code))
contest_office2_results = contest_office_manager.retrieve_contest_office_from_we_vote_id(contest_office2_we_vote_id)
if contest_office2_results['contest_office_found']:
contest_office2_on_stage = contest_office2_results['contest_office']
contest_office2_id = contest_office2_on_stage.id
else:
messages.add_message(request, messages.ERROR, 'Could not retrieve contest office 2.')
return HttpResponseRedirect(reverse('office:office_list', args=()) +
'?google_civic_election_id=' + str(google_civic_election_id) +
'&state_code=' + str(state_code))
# TODO: Migrate bookmarks - for now stop the merge process if there are bookmarks
bookmark_item_list_manager = BookmarkItemList()
bookmark_results = bookmark_item_list_manager.retrieve_bookmark_item_list_for_contest_office(
contest_office2_we_vote_id)
if bookmark_results['bookmark_item_list_found']:
messages.add_message(request, messages.ERROR, "Bookmarks found for Contest Office 2 - "
"automatic merge not working yet.")
return HttpResponseRedirect(reverse('office:find_and_merge_duplicate_offices', args=()) +
"?google_civic_election_id=" + str(google_civic_election_id) +
"&state_code=" + str(state_code))
# Merge attribute values
conflict_values = figure_out_office_conflict_values(contest_office1_on_stage, contest_office2_on_stage)
for attribute in CONTEST_OFFICE_UNIQUE_IDENTIFIERS:
conflict_value = conflict_values.get(attribute, None)
if conflict_value == "CONFLICT":
choice = request.POST.get(attribute + '_choice', '')
if contest_office2_we_vote_id == choice:
setattr(contest_office1_on_stage, attribute, getattr(contest_office2_on_stage, attribute))
elif conflict_value == "CONTEST_OFFICE2":
setattr(contest_office1_on_stage, attribute, getattr(contest_office2_on_stage, attribute))
# Preserve unique google_civic_office_name, _name2, _name3, _name4, and _name5
if positive_value_exists(contest_office2_on_stage.google_civic_office_name):
contest_office1_on_stage = add_contest_office_name_to_next_spot(
contest_office1_on_stage, contest_office2_on_stage.google_civic_office_name)
if positive_value_exists(contest_office2_on_stage.google_civic_office_name2):
contest_office1_on_stage = add_contest_office_name_to_next_spot(
contest_office1_on_stage, contest_office2_on_stage.google_civic_office_name2)
if positive_value_exists(contest_office2_on_stage.google_civic_office_name3):
contest_office1_on_stage = add_contest_office_name_to_next_spot(
contest_office1_on_stage, contest_office2_on_stage.google_civic_office_name3)
if positive_value_exists(contest_office2_on_stage.google_civic_office_name4):
contest_office1_on_stage = add_contest_office_name_to_next_spot(
contest_office1_on_stage, contest_office2_on_stage.google_civic_office_name4)
if positive_value_exists(contest_office2_on_stage.google_civic_office_name5):
contest_office1_on_stage = add_contest_office_name_to_next_spot(
contest_office1_on_stage, contest_office2_on_stage.google_civic_office_name5)
# Merge candidate's office details
candidates_results = move_candidates_to_another_office(contest_office2_id, contest_office2_we_vote_id,
contest_office1_id, contest_office1_we_vote_id,
contest_office1_on_stage)
if not candidates_results['success']:
messages.add_message(request, messages.ERROR, candidates_results['status'])
return HttpResponseRedirect(reverse('office:find_and_merge_duplicate_offices', args=()) +
"?google_civic_election_id=" + str(google_civic_election_id) +
"&state_code=" + str(state_code))
# TODO: Merge quick_info's office details in future
# Merge ballot item's office details
ballot_items_results = move_ballot_items_to_another_office(contest_office2_id, contest_office2_we_vote_id,
contest_office1_id, contest_office1_we_vote_id,
contest_office1_on_stage)
if not ballot_items_results['success']:
messages.add_message(request, messages.ERROR, ballot_items_results['status'])
return HttpResponseRedirect(reverse('office:find_and_merge_duplicate_offices', args=()) +
"?google_civic_election_id=" + str(google_civic_election_id) +
"&state_code=" + str(state_code))
# Merge public positions
public_positions_results = move_positions_to_another_office(contest_office2_id, contest_office2_we_vote_id,
contest_office1_id, contest_office1_we_vote_id,
True)
if not public_positions_results['success']:
messages.add_message(request, messages.ERROR, public_positions_results['status'])
return HttpResponseRedirect(reverse('office:find_and_merge_duplicate_offices', args=()) +
"?google_civic_election_id=" + str(google_civic_election_id) +
"&state_code=" + str(state_code))
# Merge friends-only positions
friends_positions_results = move_positions_to_another_office(contest_office2_id, contest_office2_we_vote_id,
contest_office1_id, contest_office1_we_vote_id,
False)
if not friends_positions_results['success']:
messages.add_message(request, messages.ERROR, friends_positions_results['status'])
return HttpResponseRedirect(reverse('office:find_and_merge_duplicate_offices', args=()) +
"?google_civic_election_id=" + str(google_civic_election_id) +
"&state_code=" + str(state_code))
# TODO: Migrate images?
# Note: wait to wrap in try/except block
contest_office1_on_stage.save()
# There isn't any office data to refresh from other master tables
# Remove contest office 2
contest_office2_on_stage.delete()
if redirect_to_contest_office_list:
return HttpResponseRedirect(reverse('office:office_list', args=()) +
'?google_civic_election_id=' + str(google_civic_election_id) +
'&state_code=' + str(state_code))
if remove_duplicate_process:
return HttpResponseRedirect(reverse('office:find_and_merge_duplicate_offices', args=()) +
"?google_civic_election_id=" + str(google_civic_election_id) +
"&state_code=" + str(state_code))
return HttpResponseRedirect(reverse('office:office_edit', args=(contest_office1_on_stage.id,)))
| mit |
jordanemedlock/psychtruths | temboo/Library/Twitter/DirectMessages/DirectMessagesSent.py | 5 | 6163 | # -*- coding: utf-8 -*-
###############################################################################
#
# DirectMessagesSent
# Retrieves the 20 most recent direct messages sent by the authenticating user.
#
# Python versions 2.6, 2.7, 3.x
#
# Copyright 2014, Temboo Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
# either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
#
#
###############################################################################
from temboo.core.choreography import Choreography
from temboo.core.choreography import InputSet
from temboo.core.choreography import ResultSet
from temboo.core.choreography import ChoreographyExecution
import json
class DirectMessagesSent(Choreography):
def __init__(self, temboo_session):
"""
Create a new instance of the DirectMessagesSent Choreo. A TembooSession object, containing a valid
set of Temboo credentials, must be supplied.
"""
super(DirectMessagesSent, self).__init__(temboo_session, '/Library/Twitter/DirectMessages/DirectMessagesSent')
def new_input_set(self):
return DirectMessagesSentInputSet()
def _make_result_set(self, result, path):
return DirectMessagesSentResultSet(result, path)
def _make_execution(self, session, exec_id, path):
return DirectMessagesSentChoreographyExecution(session, exec_id, path)
class DirectMessagesSentInputSet(InputSet):
"""
An InputSet with methods appropriate for specifying the inputs to the DirectMessagesSent
Choreo. The InputSet object is used to specify input parameters when executing this Choreo.
"""
def set_AccessTokenSecret(self, value):
"""
Set the value of the AccessTokenSecret input for this Choreo. ((required, string) The Access Token Secret provided by Twitter or retrieved during the OAuth process.)
"""
super(DirectMessagesSentInputSet, self)._set_input('AccessTokenSecret', value)
def set_AccessToken(self, value):
"""
Set the value of the AccessToken input for this Choreo. ((required, string) The Access Token provided by Twitter or retrieved during the OAuth process.)
"""
super(DirectMessagesSentInputSet, self)._set_input('AccessToken', value)
def set_ConsumerKey(self, value):
"""
Set the value of the ConsumerKey input for this Choreo. ((required, string) The API Key (or Consumer Key) provided by Twitter.)
"""
super(DirectMessagesSentInputSet, self)._set_input('ConsumerKey', value)
def set_ConsumerSecret(self, value):
"""
Set the value of the ConsumerSecret input for this Choreo. ((required, string) The API Secret (or Consumer Secret) provided by Twitter.)
"""
super(DirectMessagesSentInputSet, self)._set_input('ConsumerSecret', value)
def set_Count(self, value):
"""
Set the value of the Count input for this Choreo. ((optional, integer) Specifies the number of records to retrieve up to a maximum of 200.)
"""
super(DirectMessagesSentInputSet, self)._set_input('Count', value)
def set_IncludeEntities(self, value):
"""
Set the value of the IncludeEntities input for this Choreo. ((optional, boolean) The "entities" node containing extra metadata will not be included when set to false.)
"""
super(DirectMessagesSentInputSet, self)._set_input('IncludeEntities', value)
def set_MaxID(self, value):
"""
Set the value of the MaxID input for this Choreo. ((optional, string) Returns results with an ID less than (older than) or equal to the specified ID.)
"""
super(DirectMessagesSentInputSet, self)._set_input('MaxID', value)
def set_Page(self, value):
"""
Set the value of the Page input for this Choreo. ((optional, integer) Specifies the page of results to retrieve.)
"""
super(DirectMessagesSentInputSet, self)._set_input('Page', value)
def set_SinceID(self, value):
"""
Set the value of the SinceID input for this Choreo. ((optional, string) Returns results with an ID greater than (more recent than) the specified ID.)
"""
super(DirectMessagesSentInputSet, self)._set_input('SinceID', value)
class DirectMessagesSentResultSet(ResultSet):
"""
A ResultSet with methods tailored to the values returned by the DirectMessagesSent Choreo.
The ResultSet object is used to retrieve the results of a Choreo execution.
"""
def getJSONFromString(self, str):
return json.loads(str)
def get_Response(self):
"""
Retrieve the value for the "Response" output from this Choreo execution. ((json) The response from Twitter.)
"""
return self._output.get('Response', None)
def get_Limit(self):
"""
Retrieve the value for the "Limit" output from this Choreo execution. ((integer) The rate limit ceiling for this particular request.)
"""
return self._output.get('Limit', None)
def get_Remaining(self):
"""
Retrieve the value for the "Remaining" output from this Choreo execution. ((integer) The number of requests left for the 15 minute window.)
"""
return self._output.get('Remaining', None)
def get_Reset(self):
"""
Retrieve the value for the "Reset" output from this Choreo execution. ((date) The remaining window before the rate limit resets in UTC epoch seconds.)
"""
return self._output.get('Reset', None)
class DirectMessagesSentChoreographyExecution(ChoreographyExecution):
def _make_result_set(self, response, path):
return DirectMessagesSentResultSet(response, path)
| apache-2.0 |
milinbhakta/flaskjinja | flask1/Lib/site-packages/werkzeug/datastructures.py | 122 | 87447 | # -*- coding: utf-8 -*-
"""
werkzeug.datastructures
~~~~~~~~~~~~~~~~~~~~~~~
This module provides mixins and classes with an immutable interface.
:copyright: (c) 2014 by the Werkzeug Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
import re
import codecs
import mimetypes
from copy import deepcopy
from itertools import repeat
from werkzeug._internal import _missing, _empty_stream
from werkzeug._compat import iterkeys, itervalues, iteritems, iterlists, \
PY2, text_type, integer_types, string_types, make_literal_wrapper, \
to_native
from werkzeug.filesystem import get_filesystem_encoding
_locale_delim_re = re.compile(r'[_-]')
def is_immutable(self):
raise TypeError('%r objects are immutable' % self.__class__.__name__)
def iter_multi_items(mapping):
"""Iterates over the items of a mapping yielding keys and values
without dropping any from more complex structures.
"""
if isinstance(mapping, MultiDict):
for item in iteritems(mapping, multi=True):
yield item
elif isinstance(mapping, dict):
for key, value in iteritems(mapping):
if isinstance(value, (tuple, list)):
for value in value:
yield key, value
else:
yield key, value
else:
for item in mapping:
yield item
def native_itermethods(names):
if not PY2:
return lambda x: x
def setmethod(cls, name):
itermethod = getattr(cls, name)
setattr(cls, 'iter%s' % name, itermethod)
listmethod = lambda self, *a, **kw: list(itermethod(self, *a, **kw))
listmethod.__doc__ = \
'Like :py:meth:`iter%s`, but returns a list.' % name
setattr(cls, name, listmethod)
def wrap(cls):
for name in names:
setmethod(cls, name)
return cls
return wrap
class ImmutableListMixin(object):
"""Makes a :class:`list` immutable.
.. versionadded:: 0.5
:private:
"""
_hash_cache = None
def __hash__(self):
if self._hash_cache is not None:
return self._hash_cache
rv = self._hash_cache = hash(tuple(self))
return rv
def __reduce_ex__(self, protocol):
return type(self), (list(self),)
def __delitem__(self, key):
is_immutable(self)
def __delslice__(self, i, j):
is_immutable(self)
def __iadd__(self, other):
is_immutable(self)
__imul__ = __iadd__
def __setitem__(self, key, value):
is_immutable(self)
def __setslice__(self, i, j, value):
is_immutable(self)
def append(self, item):
is_immutable(self)
remove = append
def extend(self, iterable):
is_immutable(self)
def insert(self, pos, value):
is_immutable(self)
def pop(self, index=-1):
is_immutable(self)
def reverse(self):
is_immutable(self)
def sort(self, cmp=None, key=None, reverse=None):
is_immutable(self)
class ImmutableList(ImmutableListMixin, list):
"""An immutable :class:`list`.
.. versionadded:: 0.5
:private:
"""
def __repr__(self):
return '%s(%s)' % (
self.__class__.__name__,
list.__repr__(self),
)
class ImmutableDictMixin(object):
"""Makes a :class:`dict` immutable.
.. versionadded:: 0.5
:private:
"""
_hash_cache = None
@classmethod
def fromkeys(cls, keys, value=None):
instance = super(cls, cls).__new__(cls)
instance.__init__(zip(keys, repeat(value)))
return instance
def __reduce_ex__(self, protocol):
return type(self), (dict(self),)
def _iter_hashitems(self):
return iteritems(self)
def __hash__(self):
if self._hash_cache is not None:
return self._hash_cache
rv = self._hash_cache = hash(frozenset(self._iter_hashitems()))
return rv
def setdefault(self, key, default=None):
is_immutable(self)
def update(self, *args, **kwargs):
is_immutable(self)
def pop(self, key, default=None):
is_immutable(self)
def popitem(self):
is_immutable(self)
def __setitem__(self, key, value):
is_immutable(self)
def __delitem__(self, key):
is_immutable(self)
def clear(self):
is_immutable(self)
class ImmutableMultiDictMixin(ImmutableDictMixin):
"""Makes a :class:`MultiDict` immutable.
.. versionadded:: 0.5
:private:
"""
def __reduce_ex__(self, protocol):
return type(self), (list(iteritems(self, multi=True)),)
def _iter_hashitems(self):
return iteritems(self, multi=True)
def add(self, key, value):
is_immutable(self)
def popitemlist(self):
is_immutable(self)
def poplist(self, key):
is_immutable(self)
def setlist(self, key, new_list):
is_immutable(self)
def setlistdefault(self, key, default_list=None):
is_immutable(self)
class UpdateDictMixin(object):
"""Makes dicts call `self.on_update` on modifications.
.. versionadded:: 0.5
:private:
"""
on_update = None
def calls_update(name):
def oncall(self, *args, **kw):
rv = getattr(super(UpdateDictMixin, self), name)(*args, **kw)
if self.on_update is not None:
self.on_update(self)
return rv
oncall.__name__ = name
return oncall
def setdefault(self, key, default=None):
modified = key not in self
rv = super(UpdateDictMixin, self).setdefault(key, default)
if modified and self.on_update is not None:
self.on_update(self)
return rv
def pop(self, key, default=_missing):
modified = key in self
if default is _missing:
rv = super(UpdateDictMixin, self).pop(key)
else:
rv = super(UpdateDictMixin, self).pop(key, default)
if modified and self.on_update is not None:
self.on_update(self)
return rv
__setitem__ = calls_update('__setitem__')
__delitem__ = calls_update('__delitem__')
clear = calls_update('clear')
popitem = calls_update('popitem')
update = calls_update('update')
del calls_update
class TypeConversionDict(dict):
"""Works like a regular dict but the :meth:`get` method can perform
type conversions. :class:`MultiDict` and :class:`CombinedMultiDict`
are subclasses of this class and provide the same feature.
.. versionadded:: 0.5
"""
def get(self, key, default=None, type=None):
"""Return the default value if the requested data doesn't exist.
If `type` is provided and is a callable it should convert the value,
return it or raise a :exc:`ValueError` if that is not possible. In
this case the function will return the default as if the value was not
found:
>>> d = TypeConversionDict(foo='42', bar='blub')
>>> d.get('foo', type=int)
42
>>> d.get('bar', -1, type=int)
-1
:param key: The key to be looked up.
:param default: The default value to be returned if the key can't
be looked up. If not further specified `None` is
returned.
:param type: A callable that is used to cast the value in the
:class:`MultiDict`. If a :exc:`ValueError` is raised
by this callable the default value is returned.
"""
try:
rv = self[key]
if type is not None:
rv = type(rv)
except (KeyError, ValueError):
rv = default
return rv
class ImmutableTypeConversionDict(ImmutableDictMixin, TypeConversionDict):
"""Works like a :class:`TypeConversionDict` but does not support
modifications.
.. versionadded:: 0.5
"""
def copy(self):
"""Return a shallow mutable copy of this object. Keep in mind that
the standard library's :func:`copy` function is a no-op for this class
like for any other python immutable type (eg: :class:`tuple`).
"""
return TypeConversionDict(self)
def __copy__(self):
return self
@native_itermethods(['keys', 'values', 'items', 'lists', 'listvalues'])
class MultiDict(TypeConversionDict):
"""A :class:`MultiDict` is a dictionary subclass customized to deal with
multiple values for the same key which is for example used by the parsing
functions in the wrappers. This is necessary because some HTML form
elements pass multiple values for the same key.
:class:`MultiDict` implements all standard dictionary methods.
Internally, it saves all values for a key as a list, but the standard dict
access methods will only return the first value for a key. If you want to
gain access to the other values, too, you have to use the `list` methods as
explained below.
Basic Usage:
>>> d = MultiDict([('a', 'b'), ('a', 'c')])
>>> d
MultiDict([('a', 'b'), ('a', 'c')])
>>> d['a']
'b'
>>> d.getlist('a')
['b', 'c']
>>> 'a' in d
True
It behaves like a normal dict thus all dict functions will only return the
first value when multiple values for one key are found.
From Werkzeug 0.3 onwards, the `KeyError` raised by this class is also a
subclass of the :exc:`~exceptions.BadRequest` HTTP exception and will
render a page for a ``400 BAD REQUEST`` if caught in a catch-all for HTTP
exceptions.
A :class:`MultiDict` can be constructed from an iterable of
``(key, value)`` tuples, a dict, a :class:`MultiDict` or from Werkzeug 0.2
onwards some keyword parameters.
:param mapping: the initial value for the :class:`MultiDict`. Either a
regular dict, an iterable of ``(key, value)`` tuples
or `None`.
"""
def __init__(self, mapping=None):
if isinstance(mapping, MultiDict):
dict.__init__(self, ((k, l[:]) for k, l in iterlists(mapping)))
elif isinstance(mapping, dict):
tmp = {}
for key, value in iteritems(mapping):
if isinstance(value, (tuple, list)):
value = list(value)
else:
value = [value]
tmp[key] = value
dict.__init__(self, tmp)
else:
tmp = {}
for key, value in mapping or ():
tmp.setdefault(key, []).append(value)
dict.__init__(self, tmp)
def __getstate__(self):
return dict(self.lists())
def __setstate__(self, value):
dict.clear(self)
dict.update(self, value)
def __getitem__(self, key):
"""Return the first data value for this key;
raises KeyError if not found.
:param key: The key to be looked up.
:raise KeyError: if the key does not exist.
"""
if key in self:
return dict.__getitem__(self, key)[0]
raise exceptions.BadRequestKeyError(key)
def __setitem__(self, key, value):
"""Like :meth:`add` but removes an existing key first.
:param key: the key for the value.
:param value: the value to set.
"""
dict.__setitem__(self, key, [value])
def add(self, key, value):
"""Adds a new value for the key.
.. versionadded:: 0.6
:param key: the key for the value.
:param value: the value to add.
"""
dict.setdefault(self, key, []).append(value)
def getlist(self, key, type=None):
"""Return the list of items for a given key. If that key is not in the
`MultiDict`, the return value will be an empty list. Just as `get`
`getlist` accepts a `type` parameter. All items will be converted
with the callable defined there.
:param key: The key to be looked up.
:param type: A callable that is used to cast the value in the
:class:`MultiDict`. If a :exc:`ValueError` is raised
by this callable the value will be removed from the list.
:return: a :class:`list` of all the values for the key.
"""
try:
rv = dict.__getitem__(self, key)
except KeyError:
return []
if type is None:
return list(rv)
result = []
for item in rv:
try:
result.append(type(item))
except ValueError:
pass
return result
def setlist(self, key, new_list):
"""Remove the old values for a key and add new ones. Note that the list
you pass the values in will be shallow-copied before it is inserted in
the dictionary.
>>> d = MultiDict()
>>> d.setlist('foo', ['1', '2'])
>>> d['foo']
'1'
>>> d.getlist('foo')
['1', '2']
:param key: The key for which the values are set.
:param new_list: An iterable with the new values for the key. Old values
are removed first.
"""
dict.__setitem__(self, key, list(new_list))
def setdefault(self, key, default=None):
"""Returns the value for the key if it is in the dict, otherwise it
returns `default` and sets that value for `key`.
:param key: The key to be looked up.
:param default: The default value to be returned if the key is not
in the dict. If not further specified it's `None`.
"""
if key not in self:
self[key] = default
else:
default = self[key]
return default
def setlistdefault(self, key, default_list=None):
"""Like `setdefault` but sets multiple values. The list returned
is not a copy, but the list that is actually used internally. This
means that you can put new values into the dict by appending items
to the list:
>>> d = MultiDict({"foo": 1})
>>> d.setlistdefault("foo").extend([2, 3])
>>> d.getlist("foo")
[1, 2, 3]
:param key: The key to be looked up.
:param default: An iterable of default values. It is either copied
(in case it was a list) or converted into a list
before returned.
:return: a :class:`list`
"""
if key not in self:
default_list = list(default_list or ())
dict.__setitem__(self, key, default_list)
else:
default_list = dict.__getitem__(self, key)
return default_list
def items(self, multi=False):
"""Return an iterator of ``(key, value)`` pairs.
:param multi: If set to `True` the iterator returned will have a pair
for each value of each key. Otherwise it will only
contain pairs for the first value of each key.
"""
for key, values in iteritems(dict, self):
if multi:
for value in values:
yield key, value
else:
yield key, values[0]
def lists(self):
"""Return a list of ``(key, values)`` pairs, where values is the list
of all values associated with the key."""
for key, values in iteritems(dict, self):
yield key, list(values)
def keys(self):
return iterkeys(dict, self)
__iter__ = keys
def values(self):
"""Returns an iterator of the first value on every key's value list."""
for values in itervalues(dict, self):
yield values[0]
def listvalues(self):
"""Return an iterator of all values associated with a key. Zipping
:meth:`keys` and this is the same as calling :meth:`lists`:
>>> d = MultiDict({"foo": [1, 2, 3]})
>>> zip(d.keys(), d.listvalues()) == d.lists()
True
"""
return itervalues(dict, self)
def copy(self):
"""Return a shallow copy of this object."""
return self.__class__(self)
def deepcopy(self, memo=None):
"""Return a deep copy of this object."""
return self.__class__(deepcopy(self.to_dict(flat=False), memo))
def to_dict(self, flat=True):
"""Return the contents as regular dict. If `flat` is `True` the
returned dict will only have the first item present, if `flat` is
`False` all values will be returned as lists.
:param flat: If set to `False` the dict returned will have lists
with all the values in it. Otherwise it will only
contain the first value for each key.
:return: a :class:`dict`
"""
if flat:
return dict(iteritems(self))
return dict(self.lists())
def update(self, other_dict):
"""update() extends rather than replaces existing key lists:
>>> a = MultiDict({'x': 1})
>>> b = MultiDict({'x': 2, 'y': 3})
>>> a.update(b)
>>> a
MultiDict([('y', 3), ('x', 1), ('x', 2)])
If the value list for a key in ``other_dict`` is empty, no new values
will be added to the dict and the key will not be created:
>>> x = {'empty_list': []}
>>> y = MultiDict()
>>> y.update(x)
>>> y
MultiDict([])
"""
for key, value in iter_multi_items(other_dict):
MultiDict.add(self, key, value)
def pop(self, key, default=_missing):
"""Pop the first item for a list on the dict. Afterwards the
key is removed from the dict, so additional values are discarded:
>>> d = MultiDict({"foo": [1, 2, 3]})
>>> d.pop("foo")
1
>>> "foo" in d
False
:param key: the key to pop.
:param default: if provided the value to return if the key was
not in the dictionary.
"""
try:
return dict.pop(self, key)[0]
except KeyError as e:
if default is not _missing:
return default
raise exceptions.BadRequestKeyError(str(e))
def popitem(self):
"""Pop an item from the dict."""
try:
item = dict.popitem(self)
return (item[0], item[1][0])
except KeyError as e:
raise exceptions.BadRequestKeyError(str(e))
def poplist(self, key):
"""Pop the list for a key from the dict. If the key is not in the dict
an empty list is returned.
.. versionchanged:: 0.5
If the key does no longer exist a list is returned instead of
raising an error.
"""
return dict.pop(self, key, [])
def popitemlist(self):
"""Pop a ``(key, list)`` tuple from the dict."""
try:
return dict.popitem(self)
except KeyError as e:
raise exceptions.BadRequestKeyError(str(e))
def __copy__(self):
return self.copy()
def __deepcopy__(self, memo):
return self.deepcopy(memo=memo)
def __repr__(self):
return '%s(%r)' % (self.__class__.__name__, list(iteritems(self, multi=True)))
class _omd_bucket(object):
"""Wraps values in the :class:`OrderedMultiDict`. This makes it
possible to keep an order over multiple different keys. It requires
a lot of extra memory and slows down access a lot, but makes it
possible to access elements in O(1) and iterate in O(n).
"""
__slots__ = ('prev', 'key', 'value', 'next')
def __init__(self, omd, key, value):
self.prev = omd._last_bucket
self.key = key
self.value = value
self.next = None
if omd._first_bucket is None:
omd._first_bucket = self
if omd._last_bucket is not None:
omd._last_bucket.next = self
omd._last_bucket = self
def unlink(self, omd):
if self.prev:
self.prev.next = self.next
if self.next:
self.next.prev = self.prev
if omd._first_bucket is self:
omd._first_bucket = self.next
if omd._last_bucket is self:
omd._last_bucket = self.prev
@native_itermethods(['keys', 'values', 'items', 'lists', 'listvalues'])
class OrderedMultiDict(MultiDict):
"""Works like a regular :class:`MultiDict` but preserves the
order of the fields. To convert the ordered multi dict into a
list you can use the :meth:`items` method and pass it ``multi=True``.
In general an :class:`OrderedMultiDict` is an order of magnitude
slower than a :class:`MultiDict`.
.. admonition:: note
Due to a limitation in Python you cannot convert an ordered
multi dict into a regular dict by using ``dict(multidict)``.
Instead you have to use the :meth:`to_dict` method, otherwise
the internal bucket objects are exposed.
"""
def __init__(self, mapping=None):
dict.__init__(self)
self._first_bucket = self._last_bucket = None
if mapping is not None:
OrderedMultiDict.update(self, mapping)
def __eq__(self, other):
if not isinstance(other, MultiDict):
return NotImplemented
if isinstance(other, OrderedMultiDict):
iter1 = iteritems(self, multi=True)
iter2 = iteritems(other, multi=True)
try:
for k1, v1 in iter1:
k2, v2 = next(iter2)
if k1 != k2 or v1 != v2:
return False
except StopIteration:
return False
try:
next(iter2)
except StopIteration:
return True
return False
if len(self) != len(other):
return False
for key, values in iterlists(self):
if other.getlist(key) != values:
return False
return True
def __ne__(self, other):
return not self.__eq__(other)
def __reduce_ex__(self, protocol):
return type(self), (list(iteritems(self, multi=True)),)
def __getstate__(self):
return list(iteritems(self, multi=True))
def __setstate__(self, values):
dict.clear(self)
for key, value in values:
self.add(key, value)
def __getitem__(self, key):
if key in self:
return dict.__getitem__(self, key)[0].value
raise exceptions.BadRequestKeyError(key)
def __setitem__(self, key, value):
self.poplist(key)
self.add(key, value)
def __delitem__(self, key):
self.pop(key)
def keys(self):
return (key for key, value in iteritems(self))
__iter__ = keys
def values(self):
return (value for key, value in iteritems(self))
def items(self, multi=False):
ptr = self._first_bucket
if multi:
while ptr is not None:
yield ptr.key, ptr.value
ptr = ptr.next
else:
returned_keys = set()
while ptr is not None:
if ptr.key not in returned_keys:
returned_keys.add(ptr.key)
yield ptr.key, ptr.value
ptr = ptr.next
def lists(self):
returned_keys = set()
ptr = self._first_bucket
while ptr is not None:
if ptr.key not in returned_keys:
yield ptr.key, self.getlist(ptr.key)
returned_keys.add(ptr.key)
ptr = ptr.next
def listvalues(self):
for key, values in iterlists(self):
yield values
def add(self, key, value):
dict.setdefault(self, key, []).append(_omd_bucket(self, key, value))
def getlist(self, key, type=None):
try:
rv = dict.__getitem__(self, key)
except KeyError:
return []
if type is None:
return [x.value for x in rv]
result = []
for item in rv:
try:
result.append(type(item.value))
except ValueError:
pass
return result
def setlist(self, key, new_list):
self.poplist(key)
for value in new_list:
self.add(key, value)
def setlistdefault(self, key, default_list=None):
raise TypeError('setlistdefault is unsupported for '
'ordered multi dicts')
def update(self, mapping):
for key, value in iter_multi_items(mapping):
OrderedMultiDict.add(self, key, value)
def poplist(self, key):
buckets = dict.pop(self, key, ())
for bucket in buckets:
bucket.unlink(self)
return [x.value for x in buckets]
def pop(self, key, default=_missing):
try:
buckets = dict.pop(self, key)
except KeyError as e:
if default is not _missing:
return default
raise exceptions.BadRequestKeyError(str(e))
for bucket in buckets:
bucket.unlink(self)
return buckets[0].value
def popitem(self):
try:
key, buckets = dict.popitem(self)
except KeyError as e:
raise exceptions.BadRequestKeyError(str(e))
for bucket in buckets:
bucket.unlink(self)
return key, buckets[0].value
def popitemlist(self):
try:
key, buckets = dict.popitem(self)
except KeyError as e:
raise exceptions.BadRequestKeyError(str(e))
for bucket in buckets:
bucket.unlink(self)
return key, [x.value for x in buckets]
def _options_header_vkw(value, kw):
return dump_options_header(value, dict((k.replace('_', '-'), v)
for k, v in kw.items()))
def _unicodify_header_value(value):
if isinstance(value, bytes):
value = value.decode('latin-1')
if not isinstance(value, text_type):
value = text_type(value)
return value
@native_itermethods(['keys', 'values', 'items'])
class Headers(object):
"""An object that stores some headers. It has a dict-like interface
but is ordered and can store the same keys multiple times.
This data structure is useful if you want a nicer way to handle WSGI
headers which are stored as tuples in a list.
From Werkzeug 0.3 onwards, the :exc:`KeyError` raised by this class is
also a subclass of the :class:`~exceptions.BadRequest` HTTP exception
and will render a page for a ``400 BAD REQUEST`` if caught in a
catch-all for HTTP exceptions.
Headers is mostly compatible with the Python :class:`wsgiref.headers.Headers`
class, with the exception of `__getitem__`. :mod:`wsgiref` will return
`None` for ``headers['missing']``, whereas :class:`Headers` will raise
a :class:`KeyError`.
To create a new :class:`Headers` object pass it a list or dict of headers
which are used as default values. This does not reuse the list passed
to the constructor for internal usage.
:param defaults: The list of default values for the :class:`Headers`.
.. versionchanged:: 0.9
This data structure now stores unicode values similar to how the
multi dicts do it. The main difference is that bytes can be set as
well which will automatically be latin1 decoded.
.. versionchanged:: 0.9
The :meth:`linked` function was removed without replacement as it
was an API that does not support the changes to the encoding model.
"""
def __init__(self, defaults=None):
self._list = []
if defaults is not None:
if isinstance(defaults, (list, Headers)):
self._list.extend(defaults)
else:
self.extend(defaults)
def __getitem__(self, key, _get_mode=False):
if not _get_mode:
if isinstance(key, integer_types):
return self._list[key]
elif isinstance(key, slice):
return self.__class__(self._list[key])
if not isinstance(key, string_types):
raise exceptions.BadRequestKeyError(key)
ikey = key.lower()
for k, v in self._list:
if k.lower() == ikey:
return v
# micro optimization: if we are in get mode we will catch that
# exception one stack level down so we can raise a standard
# key error instead of our special one.
if _get_mode:
raise KeyError()
raise exceptions.BadRequestKeyError(key)
def __eq__(self, other):
return other.__class__ is self.__class__ and \
set(other._list) == set(self._list)
def __ne__(self, other):
return not self.__eq__(other)
def get(self, key, default=None, type=None, as_bytes=False):
"""Return the default value if the requested data doesn't exist.
If `type` is provided and is a callable it should convert the value,
return it or raise a :exc:`ValueError` if that is not possible. In
this case the function will return the default as if the value was not
found:
>>> d = Headers([('Content-Length', '42')])
>>> d.get('Content-Length', type=int)
42
If a headers object is bound you must not add unicode strings
because no encoding takes place.
.. versionadded:: 0.9
Added support for `as_bytes`.
:param key: The key to be looked up.
:param default: The default value to be returned if the key can't
be looked up. If not further specified `None` is
returned.
:param type: A callable that is used to cast the value in the
:class:`Headers`. If a :exc:`ValueError` is raised
by this callable the default value is returned.
:param as_bytes: return bytes instead of unicode strings.
"""
try:
rv = self.__getitem__(key, _get_mode=True)
except KeyError:
return default
if as_bytes:
rv = rv.encode('latin1')
if type is None:
return rv
try:
return type(rv)
except ValueError:
return default
def getlist(self, key, type=None, as_bytes=False):
"""Return the list of items for a given key. If that key is not in the
:class:`Headers`, the return value will be an empty list. Just as
:meth:`get` :meth:`getlist` accepts a `type` parameter. All items will
be converted with the callable defined there.
.. versionadded:: 0.9
Added support for `as_bytes`.
:param key: The key to be looked up.
:param type: A callable that is used to cast the value in the
:class:`Headers`. If a :exc:`ValueError` is raised
by this callable the value will be removed from the list.
:return: a :class:`list` of all the values for the key.
:param as_bytes: return bytes instead of unicode strings.
"""
ikey = key.lower()
result = []
for k, v in self:
if k.lower() == ikey:
if as_bytes:
v = v.encode('latin1')
if type is not None:
try:
v = type(v)
except ValueError:
continue
result.append(v)
return result
def get_all(self, name):
"""Return a list of all the values for the named field.
This method is compatible with the :mod:`wsgiref`
:meth:`~wsgiref.headers.Headers.get_all` method.
"""
return self.getlist(name)
def items(self, lower=False):
for key, value in self:
if lower:
key = key.lower()
yield key, value
def keys(self, lower=False):
for key, _ in iteritems(self, lower):
yield key
def values(self):
for _, value in iteritems(self):
yield value
def extend(self, iterable):
"""Extend the headers with a dict or an iterable yielding keys and
values.
"""
if isinstance(iterable, dict):
for key, value in iteritems(iterable):
if isinstance(value, (tuple, list)):
for v in value:
self.add(key, v)
else:
self.add(key, value)
else:
for key, value in iterable:
self.add(key, value)
def __delitem__(self, key, _index_operation=True):
if _index_operation and isinstance(key, (integer_types, slice)):
del self._list[key]
return
key = key.lower()
new = []
for k, v in self._list:
if k.lower() != key:
new.append((k, v))
self._list[:] = new
def remove(self, key):
"""Remove a key.
:param key: The key to be removed.
"""
return self.__delitem__(key, _index_operation=False)
def pop(self, key=None, default=_missing):
"""Removes and returns a key or index.
:param key: The key to be popped. If this is an integer the item at
that position is removed, if it's a string the value for
that key is. If the key is omitted or `None` the last
item is removed.
:return: an item.
"""
if key is None:
return self._list.pop()
if isinstance(key, integer_types):
return self._list.pop(key)
try:
rv = self[key]
self.remove(key)
except KeyError:
if default is not _missing:
return default
raise
return rv
def popitem(self):
"""Removes a key or index and returns a (key, value) item."""
return self.pop()
def __contains__(self, key):
"""Check if a key is present."""
try:
self.__getitem__(key, _get_mode=True)
except KeyError:
return False
return True
has_key = __contains__
def __iter__(self):
"""Yield ``(key, value)`` tuples."""
return iter(self._list)
def __len__(self):
return len(self._list)
def add(self, _key, _value, **kw):
"""Add a new header tuple to the list.
Keyword arguments can specify additional parameters for the header
value, with underscores converted to dashes::
>>> d = Headers()
>>> d.add('Content-Type', 'text/plain')
>>> d.add('Content-Disposition', 'attachment', filename='foo.png')
The keyword argument dumping uses :func:`dump_options_header`
behind the scenes.
.. versionadded:: 0.4.1
keyword arguments were added for :mod:`wsgiref` compatibility.
"""
if kw:
_value = _options_header_vkw(_value, kw)
_value = _unicodify_header_value(_value)
self._validate_value(_value)
self._list.append((_key, _value))
def _validate_value(self, value):
if not isinstance(value, text_type):
raise TypeError('Value should be unicode.')
if u'\n' in value or u'\r' in value:
raise ValueError('Detected newline in header value. This is '
'a potential security problem')
def add_header(self, _key, _value, **_kw):
"""Add a new header tuple to the list.
An alias for :meth:`add` for compatibility with the :mod:`wsgiref`
:meth:`~wsgiref.headers.Headers.add_header` method.
"""
self.add(_key, _value, **_kw)
def clear(self):
"""Clears all headers."""
del self._list[:]
def set(self, _key, _value, **kw):
"""Remove all header tuples for `key` and add a new one. The newly
added key either appears at the end of the list if there was no
entry or replaces the first one.
Keyword arguments can specify additional parameters for the header
value, with underscores converted to dashes. See :meth:`add` for
more information.
.. versionchanged:: 0.6.1
:meth:`set` now accepts the same arguments as :meth:`add`.
:param key: The key to be inserted.
:param value: The value to be inserted.
"""
if kw:
_value = _options_header_vkw(_value, kw)
_value = _unicodify_header_value(_value)
self._validate_value(_value)
if not self._list:
self._list.append((_key, _value))
return
listiter = iter(self._list)
ikey = _key.lower()
for idx, (old_key, old_value) in enumerate(listiter):
if old_key.lower() == ikey:
# replace first ocurrence
self._list[idx] = (_key, _value)
break
else:
self._list.append((_key, _value))
return
self._list[idx + 1:] = [t for t in listiter if t[0].lower() != ikey]
def setdefault(self, key, value):
"""Returns the value for the key if it is in the dict, otherwise it
returns `default` and sets that value for `key`.
:param key: The key to be looked up.
:param default: The default value to be returned if the key is not
in the dict. If not further specified it's `None`.
"""
if key in self:
return self[key]
self.set(key, value)
return value
def __setitem__(self, key, value):
"""Like :meth:`set` but also supports index/slice based setting."""
if isinstance(key, (slice, integer_types)):
if isinstance(key, integer_types):
value = [value]
value = [(k, _unicodify_header_value(v)) for (k, v) in value]
[self._validate_value(v) for (k, v) in value]
if isinstance(key, integer_types):
self._list[key] = value[0]
else:
self._list[key] = value
else:
self.set(key, value)
def to_list(self, charset='iso-8859-1'):
"""Convert the headers into a list suitable for WSGI."""
from warnings import warn
warn(DeprecationWarning('Method removed, use to_wsgi_list instead'),
stacklevel=2)
return self.to_wsgi_list()
def to_wsgi_list(self):
"""Convert the headers into a list suitable for WSGI.
The values are byte strings in Python 2 converted to latin1 and unicode
strings in Python 3 for the WSGI server to encode.
:return: list
"""
if PY2:
return [(to_native(k), v.encode('latin1')) for k, v in self]
return list(self)
def copy(self):
return self.__class__(self._list)
def __copy__(self):
return self.copy()
def __str__(self):
"""Returns formatted headers suitable for HTTP transmission."""
strs = []
for key, value in self.to_wsgi_list():
strs.append('%s: %s' % (key, value))
strs.append('\r\n')
return '\r\n'.join(strs)
def __repr__(self):
return '%s(%r)' % (
self.__class__.__name__,
list(self)
)
class ImmutableHeadersMixin(object):
"""Makes a :class:`Headers` immutable. We do not mark them as
hashable though since the only usecase for this datastructure
in Werkzeug is a view on a mutable structure.
.. versionadded:: 0.5
:private:
"""
def __delitem__(self, key):
is_immutable(self)
def __setitem__(self, key, value):
is_immutable(self)
set = __setitem__
def add(self, item):
is_immutable(self)
remove = add_header = add
def extend(self, iterable):
is_immutable(self)
def insert(self, pos, value):
is_immutable(self)
def pop(self, index=-1):
is_immutable(self)
def popitem(self):
is_immutable(self)
def setdefault(self, key, default):
is_immutable(self)
class EnvironHeaders(ImmutableHeadersMixin, Headers):
"""Read only version of the headers from a WSGI environment. This
provides the same interface as `Headers` and is constructed from
a WSGI environment.
From Werkzeug 0.3 onwards, the `KeyError` raised by this class is also a
subclass of the :exc:`~exceptions.BadRequest` HTTP exception and will
render a page for a ``400 BAD REQUEST`` if caught in a catch-all for
HTTP exceptions.
"""
def __init__(self, environ):
self.environ = environ
def __eq__(self, other):
return self.environ is other.environ
def __getitem__(self, key, _get_mode=False):
# _get_mode is a no-op for this class as there is no index but
# used because get() calls it.
key = key.upper().replace('-', '_')
if key in ('CONTENT_TYPE', 'CONTENT_LENGTH'):
return _unicodify_header_value(self.environ[key])
return _unicodify_header_value(self.environ['HTTP_' + key])
def __len__(self):
# the iter is necessary because otherwise list calls our
# len which would call list again and so forth.
return len(list(iter(self)))
def __iter__(self):
for key, value in iteritems(self.environ):
if key.startswith('HTTP_') and key not in \
('HTTP_CONTENT_TYPE', 'HTTP_CONTENT_LENGTH'):
yield (key[5:].replace('_', '-').title(),
_unicodify_header_value(value))
elif key in ('CONTENT_TYPE', 'CONTENT_LENGTH'):
yield (key.replace('_', '-').title(),
_unicodify_header_value(value))
def copy(self):
raise TypeError('cannot create %r copies' % self.__class__.__name__)
@native_itermethods(['keys', 'values', 'items', 'lists', 'listvalues'])
class CombinedMultiDict(ImmutableMultiDictMixin, MultiDict):
"""A read only :class:`MultiDict` that you can pass multiple :class:`MultiDict`
instances as sequence and it will combine the return values of all wrapped
dicts:
>>> from werkzeug.datastructures import CombinedMultiDict, MultiDict
>>> post = MultiDict([('foo', 'bar')])
>>> get = MultiDict([('blub', 'blah')])
>>> combined = CombinedMultiDict([get, post])
>>> combined['foo']
'bar'
>>> combined['blub']
'blah'
This works for all read operations and will raise a `TypeError` for
methods that usually change data which isn't possible.
From Werkzeug 0.3 onwards, the `KeyError` raised by this class is also a
subclass of the :exc:`~exceptions.BadRequest` HTTP exception and will
render a page for a ``400 BAD REQUEST`` if caught in a catch-all for HTTP
exceptions.
"""
def __reduce_ex__(self, protocol):
return type(self), (self.dicts,)
def __init__(self, dicts=None):
self.dicts = dicts or []
@classmethod
def fromkeys(cls):
raise TypeError('cannot create %r instances by fromkeys' %
cls.__name__)
def __getitem__(self, key):
for d in self.dicts:
if key in d:
return d[key]
raise exceptions.BadRequestKeyError(key)
def get(self, key, default=None, type=None):
for d in self.dicts:
if key in d:
if type is not None:
try:
return type(d[key])
except ValueError:
continue
return d[key]
return default
def getlist(self, key, type=None):
rv = []
for d in self.dicts:
rv.extend(d.getlist(key, type))
return rv
def _keys_impl(self):
"""This function exists so __len__ can be implemented more efficiently,
saving one list creation from an iterator.
Using this for Python 2's ``dict.keys`` behavior would be useless since
`dict.keys` in Python 2 returns a list, while we have a set here.
"""
rv = set()
for d in self.dicts:
rv.update(iterkeys(d))
return rv
def keys(self):
return iter(self._keys_impl())
__iter__ = keys
def items(self, multi=False):
found = set()
for d in self.dicts:
for key, value in iteritems(d, multi):
if multi:
yield key, value
elif key not in found:
found.add(key)
yield key, value
def values(self):
for key, value in iteritems(self):
yield value
def lists(self):
rv = {}
for d in self.dicts:
for key, values in iterlists(d):
rv.setdefault(key, []).extend(values)
return iteritems(rv)
def listvalues(self):
return (x[1] for x in self.lists())
def copy(self):
"""Return a shallow copy of this object."""
return self.__class__(self.dicts[:])
def to_dict(self, flat=True):
"""Return the contents as regular dict. If `flat` is `True` the
returned dict will only have the first item present, if `flat` is
`False` all values will be returned as lists.
:param flat: If set to `False` the dict returned will have lists
with all the values in it. Otherwise it will only
contain the first item for each key.
:return: a :class:`dict`
"""
rv = {}
for d in reversed(self.dicts):
rv.update(d.to_dict(flat))
return rv
def __len__(self):
return len(self._keys_impl())
def __contains__(self, key):
for d in self.dicts:
if key in d:
return True
return False
has_key = __contains__
def __repr__(self):
return '%s(%r)' % (self.__class__.__name__, self.dicts)
class FileMultiDict(MultiDict):
"""A special :class:`MultiDict` that has convenience methods to add
files to it. This is used for :class:`EnvironBuilder` and generally
useful for unittesting.
.. versionadded:: 0.5
"""
def add_file(self, name, file, filename=None, content_type=None):
"""Adds a new file to the dict. `file` can be a file name or
a :class:`file`-like or a :class:`FileStorage` object.
:param name: the name of the field.
:param file: a filename or :class:`file`-like object
:param filename: an optional filename
:param content_type: an optional content type
"""
if isinstance(file, FileStorage):
value = file
else:
if isinstance(file, string_types):
if filename is None:
filename = file
file = open(file, 'rb')
if filename and content_type is None:
content_type = mimetypes.guess_type(filename)[0] or \
'application/octet-stream'
value = FileStorage(file, filename, name, content_type)
self.add(name, value)
class ImmutableDict(ImmutableDictMixin, dict):
"""An immutable :class:`dict`.
.. versionadded:: 0.5
"""
def __repr__(self):
return '%s(%s)' % (
self.__class__.__name__,
dict.__repr__(self),
)
def copy(self):
"""Return a shallow mutable copy of this object. Keep in mind that
the standard library's :func:`copy` function is a no-op for this class
like for any other python immutable type (eg: :class:`tuple`).
"""
return dict(self)
def __copy__(self):
return self
class ImmutableMultiDict(ImmutableMultiDictMixin, MultiDict):
"""An immutable :class:`MultiDict`.
.. versionadded:: 0.5
"""
def copy(self):
"""Return a shallow mutable copy of this object. Keep in mind that
the standard library's :func:`copy` function is a no-op for this class
like for any other python immutable type (eg: :class:`tuple`).
"""
return MultiDict(self)
def __copy__(self):
return self
class ImmutableOrderedMultiDict(ImmutableMultiDictMixin, OrderedMultiDict):
"""An immutable :class:`OrderedMultiDict`.
.. versionadded:: 0.6
"""
def _iter_hashitems(self):
return enumerate(iteritems(self, multi=True))
def copy(self):
"""Return a shallow mutable copy of this object. Keep in mind that
the standard library's :func:`copy` function is a no-op for this class
like for any other python immutable type (eg: :class:`tuple`).
"""
return OrderedMultiDict(self)
def __copy__(self):
return self
@native_itermethods(['values'])
class Accept(ImmutableList):
"""An :class:`Accept` object is just a list subclass for lists of
``(value, quality)`` tuples. It is automatically sorted by quality.
All :class:`Accept` objects work similar to a list but provide extra
functionality for working with the data. Containment checks are
normalized to the rules of that header:
>>> a = CharsetAccept([('ISO-8859-1', 1), ('utf-8', 0.7)])
>>> a.best
'ISO-8859-1'
>>> 'iso-8859-1' in a
True
>>> 'UTF8' in a
True
>>> 'utf7' in a
False
To get the quality for an item you can use normal item lookup:
>>> print a['utf-8']
0.7
>>> a['utf7']
0
.. versionchanged:: 0.5
:class:`Accept` objects are forced immutable now.
"""
def __init__(self, values=()):
if values is None:
list.__init__(self)
self.provided = False
elif isinstance(values, Accept):
self.provided = values.provided
list.__init__(self, values)
else:
self.provided = True
values = [(a, b) for b, a in values]
values.sort()
values.reverse()
list.__init__(self, [(a, b) for b, a in values])
def _value_matches(self, value, item):
"""Check if a value matches a given accept item."""
return item == '*' or item.lower() == value.lower()
def __getitem__(self, key):
"""Besides index lookup (getting item n) you can also pass it a string
to get the quality for the item. If the item is not in the list, the
returned quality is ``0``.
"""
if isinstance(key, string_types):
return self.quality(key)
return list.__getitem__(self, key)
def quality(self, key):
"""Returns the quality of the key.
.. versionadded:: 0.6
In previous versions you had to use the item-lookup syntax
(eg: ``obj[key]`` instead of ``obj.quality(key)``)
"""
for item, quality in self:
if self._value_matches(key, item):
return quality
return 0
def __contains__(self, value):
for item, quality in self:
if self._value_matches(value, item):
return True
return False
def __repr__(self):
return '%s([%s])' % (
self.__class__.__name__,
', '.join('(%r, %s)' % (x, y) for x, y in self)
)
def index(self, key):
"""Get the position of an entry or raise :exc:`ValueError`.
:param key: The key to be looked up.
.. versionchanged:: 0.5
This used to raise :exc:`IndexError`, which was inconsistent
with the list API.
"""
if isinstance(key, string_types):
for idx, (item, quality) in enumerate(self):
if self._value_matches(key, item):
return idx
raise ValueError(key)
return list.index(self, key)
def find(self, key):
"""Get the position of an entry or return -1.
:param key: The key to be looked up.
"""
try:
return self.index(key)
except ValueError:
return -1
def values(self):
"""Iterate over all values."""
for item in self:
yield item[0]
def to_header(self):
"""Convert the header set into an HTTP header string."""
result = []
for value, quality in self:
if quality != 1:
value = '%s;q=%s' % (value, quality)
result.append(value)
return ','.join(result)
def __str__(self):
return self.to_header()
def best_match(self, matches, default=None):
"""Returns the best match from a list of possible matches based
on the quality of the client. If two items have the same quality,
the one is returned that comes first.
:param matches: a list of matches to check for
:param default: the value that is returned if none match
"""
best_quality = -1
result = default
for server_item in matches:
for client_item, quality in self:
if quality <= best_quality:
break
if self._value_matches(server_item, client_item) \
and quality > 0:
best_quality = quality
result = server_item
return result
@property
def best(self):
"""The best match as value."""
if self:
return self[0][0]
class MIMEAccept(Accept):
"""Like :class:`Accept` but with special methods and behavior for
mimetypes.
"""
def _value_matches(self, value, item):
def _normalize(x):
x = x.lower()
return x == '*' and ('*', '*') or x.split('/', 1)
# this is from the application which is trusted. to avoid developer
# frustration we actually check these for valid values
if '/' not in value:
raise ValueError('invalid mimetype %r' % value)
value_type, value_subtype = _normalize(value)
if value_type == '*' and value_subtype != '*':
raise ValueError('invalid mimetype %r' % value)
if '/' not in item:
return False
item_type, item_subtype = _normalize(item)
if item_type == '*' and item_subtype != '*':
return False
return (
(item_type == item_subtype == '*' or
value_type == value_subtype == '*') or
(item_type == value_type and (item_subtype == '*' or
value_subtype == '*' or
item_subtype == value_subtype))
)
@property
def accept_html(self):
"""True if this object accepts HTML."""
return (
'text/html' in self or
'application/xhtml+xml' in self or
self.accept_xhtml
)
@property
def accept_xhtml(self):
"""True if this object accepts XHTML."""
return (
'application/xhtml+xml' in self or
'application/xml' in self
)
@property
def accept_json(self):
"""True if this object accepts JSON."""
return 'application/json' in self
class LanguageAccept(Accept):
"""Like :class:`Accept` but with normalization for languages."""
def _value_matches(self, value, item):
def _normalize(language):
return _locale_delim_re.split(language.lower())
return item == '*' or _normalize(value) == _normalize(item)
class CharsetAccept(Accept):
"""Like :class:`Accept` but with normalization for charsets."""
def _value_matches(self, value, item):
def _normalize(name):
try:
return codecs.lookup(name).name
except LookupError:
return name.lower()
return item == '*' or _normalize(value) == _normalize(item)
def cache_property(key, empty, type):
"""Return a new property object for a cache header. Useful if you
want to add support for a cache extension in a subclass."""
return property(lambda x: x._get_cache_value(key, empty, type),
lambda x, v: x._set_cache_value(key, v, type),
lambda x: x._del_cache_value(key),
'accessor for %r' % key)
class _CacheControl(UpdateDictMixin, dict):
"""Subclass of a dict that stores values for a Cache-Control header. It
has accessors for all the cache-control directives specified in RFC 2616.
The class does not differentiate between request and response directives.
Because the cache-control directives in the HTTP header use dashes the
python descriptors use underscores for that.
To get a header of the :class:`CacheControl` object again you can convert
the object into a string or call the :meth:`to_header` method. If you plan
to subclass it and add your own items have a look at the sourcecode for
that class.
.. versionchanged:: 0.4
Setting `no_cache` or `private` to boolean `True` will set the implicit
none-value which is ``*``:
>>> cc = ResponseCacheControl()
>>> cc.no_cache = True
>>> cc
<ResponseCacheControl 'no-cache'>
>>> cc.no_cache
'*'
>>> cc.no_cache = None
>>> cc
<ResponseCacheControl ''>
In versions before 0.5 the behavior documented here affected the now
no longer existing `CacheControl` class.
"""
no_cache = cache_property('no-cache', '*', None)
no_store = cache_property('no-store', None, bool)
max_age = cache_property('max-age', -1, int)
no_transform = cache_property('no-transform', None, None)
def __init__(self, values=(), on_update=None):
dict.__init__(self, values or ())
self.on_update = on_update
self.provided = values is not None
def _get_cache_value(self, key, empty, type):
"""Used internally by the accessor properties."""
if type is bool:
return key in self
if key in self:
value = self[key]
if value is None:
return empty
elif type is not None:
try:
value = type(value)
except ValueError:
pass
return value
def _set_cache_value(self, key, value, type):
"""Used internally by the accessor properties."""
if type is bool:
if value:
self[key] = None
else:
self.pop(key, None)
else:
if value is None:
self.pop(key)
elif value is True:
self[key] = None
else:
self[key] = value
def _del_cache_value(self, key):
"""Used internally by the accessor properties."""
if key in self:
del self[key]
def to_header(self):
"""Convert the stored values into a cache control header."""
return dump_header(self)
def __str__(self):
return self.to_header()
def __repr__(self):
return '<%s %s>' % (
self.__class__.__name__,
" ".join(
"%s=%r" % (k, v) for k, v in sorted(self.items())
),
)
class RequestCacheControl(ImmutableDictMixin, _CacheControl):
"""A cache control for requests. This is immutable and gives access
to all the request-relevant cache control headers.
To get a header of the :class:`RequestCacheControl` object again you can
convert the object into a string or call the :meth:`to_header` method. If
you plan to subclass it and add your own items have a look at the sourcecode
for that class.
.. versionadded:: 0.5
In previous versions a `CacheControl` class existed that was used
both for request and response.
"""
max_stale = cache_property('max-stale', '*', int)
min_fresh = cache_property('min-fresh', '*', int)
no_transform = cache_property('no-transform', None, None)
only_if_cached = cache_property('only-if-cached', None, bool)
class ResponseCacheControl(_CacheControl):
"""A cache control for responses. Unlike :class:`RequestCacheControl`
this is mutable and gives access to response-relevant cache control
headers.
To get a header of the :class:`ResponseCacheControl` object again you can
convert the object into a string or call the :meth:`to_header` method. If
you plan to subclass it and add your own items have a look at the sourcecode
for that class.
.. versionadded:: 0.5
In previous versions a `CacheControl` class existed that was used
both for request and response.
"""
public = cache_property('public', None, bool)
private = cache_property('private', '*', None)
must_revalidate = cache_property('must-revalidate', None, bool)
proxy_revalidate = cache_property('proxy-revalidate', None, bool)
s_maxage = cache_property('s-maxage', None, None)
# attach cache_property to the _CacheControl as staticmethod
# so that others can reuse it.
_CacheControl.cache_property = staticmethod(cache_property)
class CallbackDict(UpdateDictMixin, dict):
"""A dict that calls a function passed every time something is changed.
The function is passed the dict instance.
"""
def __init__(self, initial=None, on_update=None):
dict.__init__(self, initial or ())
self.on_update = on_update
def __repr__(self):
return '<%s %s>' % (
self.__class__.__name__,
dict.__repr__(self)
)
class HeaderSet(object):
"""Similar to the :class:`ETags` class this implements a set-like structure.
Unlike :class:`ETags` this is case insensitive and used for vary, allow, and
content-language headers.
If not constructed using the :func:`parse_set_header` function the
instantiation works like this:
>>> hs = HeaderSet(['foo', 'bar', 'baz'])
>>> hs
HeaderSet(['foo', 'bar', 'baz'])
"""
def __init__(self, headers=None, on_update=None):
self._headers = list(headers or ())
self._set = set([x.lower() for x in self._headers])
self.on_update = on_update
def add(self, header):
"""Add a new header to the set."""
self.update((header,))
def remove(self, header):
"""Remove a header from the set. This raises an :exc:`KeyError` if the
header is not in the set.
.. versionchanged:: 0.5
In older versions a :exc:`IndexError` was raised instead of a
:exc:`KeyError` if the object was missing.
:param header: the header to be removed.
"""
key = header.lower()
if key not in self._set:
raise KeyError(header)
self._set.remove(key)
for idx, key in enumerate(self._headers):
if key.lower() == header:
del self._headers[idx]
break
if self.on_update is not None:
self.on_update(self)
def update(self, iterable):
"""Add all the headers from the iterable to the set.
:param iterable: updates the set with the items from the iterable.
"""
inserted_any = False
for header in iterable:
key = header.lower()
if key not in self._set:
self._headers.append(header)
self._set.add(key)
inserted_any = True
if inserted_any and self.on_update is not None:
self.on_update(self)
def discard(self, header):
"""Like :meth:`remove` but ignores errors.
:param header: the header to be discarded.
"""
try:
return self.remove(header)
except KeyError:
pass
def find(self, header):
"""Return the index of the header in the set or return -1 if not found.
:param header: the header to be looked up.
"""
header = header.lower()
for idx, item in enumerate(self._headers):
if item.lower() == header:
return idx
return -1
def index(self, header):
"""Return the index of the header in the set or raise an
:exc:`IndexError`.
:param header: the header to be looked up.
"""
rv = self.find(header)
if rv < 0:
raise IndexError(header)
return rv
def clear(self):
"""Clear the set."""
self._set.clear()
del self._headers[:]
if self.on_update is not None:
self.on_update(self)
def as_set(self, preserve_casing=False):
"""Return the set as real python set type. When calling this, all
the items are converted to lowercase and the ordering is lost.
:param preserve_casing: if set to `True` the items in the set returned
will have the original case like in the
:class:`HeaderSet`, otherwise they will
be lowercase.
"""
if preserve_casing:
return set(self._headers)
return set(self._set)
def to_header(self):
"""Convert the header set into an HTTP header string."""
return ', '.join(map(quote_header_value, self._headers))
def __getitem__(self, idx):
return self._headers[idx]
def __delitem__(self, idx):
rv = self._headers.pop(idx)
self._set.remove(rv.lower())
if self.on_update is not None:
self.on_update(self)
def __setitem__(self, idx, value):
old = self._headers[idx]
self._set.remove(old.lower())
self._headers[idx] = value
self._set.add(value.lower())
if self.on_update is not None:
self.on_update(self)
def __contains__(self, header):
return header.lower() in self._set
def __len__(self):
return len(self._set)
def __iter__(self):
return iter(self._headers)
def __nonzero__(self):
return bool(self._set)
def __str__(self):
return self.to_header()
def __repr__(self):
return '%s(%r)' % (
self.__class__.__name__,
self._headers
)
class ETags(object):
"""A set that can be used to check if one etag is present in a collection
of etags.
"""
def __init__(self, strong_etags=None, weak_etags=None, star_tag=False):
self._strong = frozenset(not star_tag and strong_etags or ())
self._weak = frozenset(weak_etags or ())
self.star_tag = star_tag
def as_set(self, include_weak=False):
"""Convert the `ETags` object into a python set. Per default all the
weak etags are not part of this set."""
rv = set(self._strong)
if include_weak:
rv.update(self._weak)
return rv
def is_weak(self, etag):
"""Check if an etag is weak."""
return etag in self._weak
def contains_weak(self, etag):
"""Check if an etag is part of the set including weak and strong tags."""
return self.is_weak(etag) or self.contains(etag)
def contains(self, etag):
"""Check if an etag is part of the set ignoring weak tags.
It is also possible to use the ``in`` operator.
"""
if self.star_tag:
return True
return etag in self._strong
def contains_raw(self, etag):
"""When passed a quoted tag it will check if this tag is part of the
set. If the tag is weak it is checked against weak and strong tags,
otherwise strong only."""
etag, weak = unquote_etag(etag)
if weak:
return self.contains_weak(etag)
return self.contains(etag)
def to_header(self):
"""Convert the etags set into a HTTP header string."""
if self.star_tag:
return '*'
return ', '.join(
['"%s"' % x for x in self._strong] +
['w/"%s"' % x for x in self._weak]
)
def __call__(self, etag=None, data=None, include_weak=False):
if [etag, data].count(None) != 1:
raise TypeError('either tag or data required, but at least one')
if etag is None:
etag = generate_etag(data)
if include_weak:
if etag in self._weak:
return True
return etag in self._strong
def __bool__(self):
return bool(self.star_tag or self._strong or self._weak)
__nonzero__ = __bool__
def __str__(self):
return self.to_header()
def __iter__(self):
return iter(self._strong)
def __contains__(self, etag):
return self.contains(etag)
def __repr__(self):
return '<%s %r>' % (self.__class__.__name__, str(self))
class IfRange(object):
"""Very simple object that represents the `If-Range` header in parsed
form. It will either have neither a etag or date or one of either but
never both.
.. versionadded:: 0.7
"""
def __init__(self, etag=None, date=None):
#: The etag parsed and unquoted. Ranges always operate on strong
#: etags so the weakness information is not necessary.
self.etag = etag
#: The date in parsed format or `None`.
self.date = date
def to_header(self):
"""Converts the object back into an HTTP header."""
if self.date is not None:
return http_date(self.date)
if self.etag is not None:
return quote_etag(self.etag)
return ''
def __str__(self):
return self.to_header()
def __repr__(self):
return '<%s %r>' % (self.__class__.__name__, str(self))
class Range(object):
"""Represents a range header. All the methods are only supporting bytes
as unit. It does store multiple ranges but :meth:`range_for_length` will
only work if only one range is provided.
.. versionadded:: 0.7
"""
def __init__(self, units, ranges):
#: The units of this range. Usually "bytes".
self.units = units
#: A list of ``(begin, end)`` tuples for the range header provided.
#: The ranges are non-inclusive.
self.ranges = ranges
def range_for_length(self, length):
"""If the range is for bytes, the length is not None and there is
exactly one range and it is satisfiable it returns a ``(start, stop)``
tuple, otherwise `None`.
"""
if self.units != 'bytes' or length is None or len(self.ranges) != 1:
return None
start, end = self.ranges[0]
if end is None:
end = length
if start < 0:
start += length
if is_byte_range_valid(start, end, length):
return start, min(end, length)
def make_content_range(self, length):
"""Creates a :class:`~werkzeug.datastructures.ContentRange` object
from the current range and given content length.
"""
rng = self.range_for_length(length)
if rng is not None:
return ContentRange(self.units, rng[0], rng[1], length)
def to_header(self):
"""Converts the object back into an HTTP header."""
ranges = []
for begin, end in self.ranges:
if end is None:
ranges.append(begin >= 0 and '%s-' % begin or str(begin))
else:
ranges.append('%s-%s' % (begin, end - 1))
return '%s=%s' % (self.units, ','.join(ranges))
def __str__(self):
return self.to_header()
def __repr__(self):
return '<%s %r>' % (self.__class__.__name__, str(self))
class ContentRange(object):
"""Represents the content range header.
.. versionadded:: 0.7
"""
def __init__(self, units, start, stop, length=None, on_update=None):
assert is_byte_range_valid(start, stop, length), \
'Bad range provided'
self.on_update = on_update
self.set(start, stop, length, units)
def _callback_property(name):
def fget(self):
return getattr(self, name)
def fset(self, value):
setattr(self, name, value)
if self.on_update is not None:
self.on_update(self)
return property(fget, fset)
#: The units to use, usually "bytes"
units = _callback_property('_units')
#: The start point of the range or `None`.
start = _callback_property('_start')
#: The stop point of the range (non-inclusive) or `None`. Can only be
#: `None` if also start is `None`.
stop = _callback_property('_stop')
#: The length of the range or `None`.
length = _callback_property('_length')
def set(self, start, stop, length=None, units='bytes'):
"""Simple method to update the ranges."""
assert is_byte_range_valid(start, stop, length), \
'Bad range provided'
self._units = units
self._start = start
self._stop = stop
self._length = length
if self.on_update is not None:
self.on_update(self)
def unset(self):
"""Sets the units to `None` which indicates that the header should
no longer be used.
"""
self.set(None, None, units=None)
def to_header(self):
if self.units is None:
return ''
if self.length is None:
length = '*'
else:
length = self.length
if self.start is None:
return '%s */%s' % (self.units, length)
return '%s %s-%s/%s' % (
self.units,
self.start,
self.stop - 1,
length
)
def __nonzero__(self):
return self.units is not None
__bool__ = __nonzero__
def __str__(self):
return self.to_header()
def __repr__(self):
return '<%s %r>' % (self.__class__.__name__, str(self))
class Authorization(ImmutableDictMixin, dict):
"""Represents an `Authorization` header sent by the client. You should
not create this kind of object yourself but use it when it's returned by
the `parse_authorization_header` function.
This object is a dict subclass and can be altered by setting dict items
but it should be considered immutable as it's returned by the client and
not meant for modifications.
.. versionchanged:: 0.5
This object became immutable.
"""
def __init__(self, auth_type, data=None):
dict.__init__(self, data or {})
self.type = auth_type
username = property(lambda x: x.get('username'), doc='''
The username transmitted. This is set for both basic and digest
auth all the time.''')
password = property(lambda x: x.get('password'), doc='''
When the authentication type is basic this is the password
transmitted by the client, else `None`.''')
realm = property(lambda x: x.get('realm'), doc='''
This is the server realm sent back for HTTP digest auth.''')
nonce = property(lambda x: x.get('nonce'), doc='''
The nonce the server sent for digest auth, sent back by the client.
A nonce should be unique for every 401 response for HTTP digest
auth.''')
uri = property(lambda x: x.get('uri'), doc='''
The URI from Request-URI of the Request-Line; duplicated because
proxies are allowed to change the Request-Line in transit. HTTP
digest auth only.''')
nc = property(lambda x: x.get('nc'), doc='''
The nonce count value transmitted by clients if a qop-header is
also transmitted. HTTP digest auth only.''')
cnonce = property(lambda x: x.get('cnonce'), doc='''
If the server sent a qop-header in the ``WWW-Authenticate``
header, the client has to provide this value for HTTP digest auth.
See the RFC for more details.''')
response = property(lambda x: x.get('response'), doc='''
A string of 32 hex digits computed as defined in RFC 2617, which
proves that the user knows a password. Digest auth only.''')
opaque = property(lambda x: x.get('opaque'), doc='''
The opaque header from the server returned unchanged by the client.
It is recommended that this string be base64 or hexadecimal data.
Digest auth only.''')
@property
def qop(self):
"""Indicates what "quality of protection" the client has applied to
the message for HTTP digest auth."""
def on_update(header_set):
if not header_set and 'qop' in self:
del self['qop']
elif header_set:
self['qop'] = header_set.to_header()
return parse_set_header(self.get('qop'), on_update)
class WWWAuthenticate(UpdateDictMixin, dict):
"""Provides simple access to `WWW-Authenticate` headers."""
#: list of keys that require quoting in the generated header
_require_quoting = frozenset(['domain', 'nonce', 'opaque', 'realm', 'qop'])
def __init__(self, auth_type=None, values=None, on_update=None):
dict.__init__(self, values or ())
if auth_type:
self['__auth_type__'] = auth_type
self.on_update = on_update
def set_basic(self, realm='authentication required'):
"""Clear the auth info and enable basic auth."""
dict.clear(self)
dict.update(self, {'__auth_type__': 'basic', 'realm': realm})
if self.on_update:
self.on_update(self)
def set_digest(self, realm, nonce, qop=('auth',), opaque=None,
algorithm=None, stale=False):
"""Clear the auth info and enable digest auth."""
d = {
'__auth_type__': 'digest',
'realm': realm,
'nonce': nonce,
'qop': dump_header(qop)
}
if stale:
d['stale'] = 'TRUE'
if opaque is not None:
d['opaque'] = opaque
if algorithm is not None:
d['algorithm'] = algorithm
dict.clear(self)
dict.update(self, d)
if self.on_update:
self.on_update(self)
def to_header(self):
"""Convert the stored values into a WWW-Authenticate header."""
d = dict(self)
auth_type = d.pop('__auth_type__', None) or 'basic'
return '%s %s' % (auth_type.title(), ', '.join([
'%s=%s' % (key, quote_header_value(value,
allow_token=key not in self._require_quoting))
for key, value in iteritems(d)
]))
def __str__(self):
return self.to_header()
def __repr__(self):
return '<%s %r>' % (
self.__class__.__name__,
self.to_header()
)
def auth_property(name, doc=None):
"""A static helper function for subclasses to add extra authentication
system properties onto a class::
class FooAuthenticate(WWWAuthenticate):
special_realm = auth_property('special_realm')
For more information have a look at the sourcecode to see how the
regular properties (:attr:`realm` etc.) are implemented.
"""
def _set_value(self, value):
if value is None:
self.pop(name, None)
else:
self[name] = str(value)
return property(lambda x: x.get(name), _set_value, doc=doc)
def _set_property(name, doc=None):
def fget(self):
def on_update(header_set):
if not header_set and name in self:
del self[name]
elif header_set:
self[name] = header_set.to_header()
return parse_set_header(self.get(name), on_update)
return property(fget, doc=doc)
type = auth_property('__auth_type__', doc='''
The type of the auth mechanism. HTTP currently specifies
`Basic` and `Digest`.''')
realm = auth_property('realm', doc='''
A string to be displayed to users so they know which username and
password to use. This string should contain at least the name of
the host performing the authentication and might additionally
indicate the collection of users who might have access.''')
domain = _set_property('domain', doc='''
A list of URIs that define the protection space. If a URI is an
absolute path, it is relative to the canonical root URL of the
server being accessed.''')
nonce = auth_property('nonce', doc='''
A server-specified data string which should be uniquely generated
each time a 401 response is made. It is recommended that this
string be base64 or hexadecimal data.''')
opaque = auth_property('opaque', doc='''
A string of data, specified by the server, which should be returned
by the client unchanged in the Authorization header of subsequent
requests with URIs in the same protection space. It is recommended
that this string be base64 or hexadecimal data.''')
algorithm = auth_property('algorithm', doc='''
A string indicating a pair of algorithms used to produce the digest
and a checksum. If this is not present it is assumed to be "MD5".
If the algorithm is not understood, the challenge should be ignored
(and a different one used, if there is more than one).''')
qop = _set_property('qop', doc='''
A set of quality-of-privacy directives such as auth and auth-int.''')
def _get_stale(self):
val = self.get('stale')
if val is not None:
return val.lower() == 'true'
def _set_stale(self, value):
if value is None:
self.pop('stale', None)
else:
self['stale'] = value and 'TRUE' or 'FALSE'
stale = property(_get_stale, _set_stale, doc='''
A flag, indicating that the previous request from the client was
rejected because the nonce value was stale.''')
del _get_stale, _set_stale
# make auth_property a staticmethod so that subclasses of
# `WWWAuthenticate` can use it for new properties.
auth_property = staticmethod(auth_property)
del _set_property
class FileStorage(object):
"""The :class:`FileStorage` class is a thin wrapper over incoming files.
It is used by the request object to represent uploaded files. All the
attributes of the wrapper stream are proxied by the file storage so
it's possible to do ``storage.read()`` instead of the long form
``storage.stream.read()``.
"""
def __init__(self, stream=None, filename=None, name=None,
content_type=None, content_length=None,
headers=None):
self.name = name
self.stream = stream or _empty_stream
# if no filename is provided we can attempt to get the filename
# from the stream object passed. There we have to be careful to
# skip things like <fdopen>, <stderr> etc. Python marks these
# special filenames with angular brackets.
if filename is None:
filename = getattr(stream, 'name', None)
s = make_literal_wrapper(filename)
if filename and filename[0] == s('<') and filename[-1] == s('>'):
filename = None
# On Python 3 we want to make sure the filename is always unicode.
# This might not be if the name attribute is bytes due to the
# file being opened from the bytes API.
if not PY2 and isinstance(filename, bytes):
filename = filename.decode(get_filesystem_encoding(),
'replace')
self.filename = filename
if headers is None:
headers = Headers()
self.headers = headers
if content_type is not None:
headers['Content-Type'] = content_type
if content_length is not None:
headers['Content-Length'] = str(content_length)
def _parse_content_type(self):
if not hasattr(self, '_parsed_content_type'):
self._parsed_content_type = \
parse_options_header(self.content_type)
@property
def content_type(self):
"""The content-type sent in the header. Usually not available"""
return self.headers.get('content-type')
@property
def content_length(self):
"""The content-length sent in the header. Usually not available"""
return int(self.headers.get('content-length') or 0)
@property
def mimetype(self):
"""Like :attr:`content_type`, but without parameters (eg, without
charset, type etc.) and always lowercase. For example if the content
type is ``text/HTML; charset=utf-8`` the mimetype would be
``'text/html'``.
.. versionadded:: 0.7
"""
self._parse_content_type()
return self._parsed_content_type[0].lower()
@property
def mimetype_params(self):
"""The mimetype parameters as dict. For example if the content
type is ``text/html; charset=utf-8`` the params would be
``{'charset': 'utf-8'}``.
.. versionadded:: 0.7
"""
self._parse_content_type()
return self._parsed_content_type[1]
def save(self, dst, buffer_size=16384):
"""Save the file to a destination path or file object. If the
destination is a file object you have to close it yourself after the
call. The buffer size is the number of bytes held in memory during
the copy process. It defaults to 16KB.
For secure file saving also have a look at :func:`secure_filename`.
:param dst: a filename or open file object the uploaded file
is saved to.
:param buffer_size: the size of the buffer. This works the same as
the `length` parameter of
:func:`shutil.copyfileobj`.
"""
from shutil import copyfileobj
close_dst = False
if isinstance(dst, string_types):
dst = open(dst, 'wb')
close_dst = True
try:
copyfileobj(self.stream, dst, buffer_size)
finally:
if close_dst:
dst.close()
def close(self):
"""Close the underlying file if possible."""
try:
self.stream.close()
except Exception:
pass
def __nonzero__(self):
return bool(self.filename)
__bool__ = __nonzero__
def __getattr__(self, name):
return getattr(self.stream, name)
def __iter__(self):
return iter(self.readline, '')
def __repr__(self):
return '<%s: %r (%r)>' % (
self.__class__.__name__,
self.filename,
self.content_type
)
# circular dependencies
from werkzeug.http import dump_options_header, dump_header, generate_etag, \
quote_header_value, parse_set_header, unquote_etag, quote_etag, \
parse_options_header, http_date, is_byte_range_valid
from werkzeug import exceptions
| gpl-2.0 |
maybelinot/df2gspread | tests/test_df2gspread.py | 1 | 16942 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Author: Eduard Trott
# @Date: 2015-09-16 13:25:41
# @Email: [email protected]
# @Last modified by: etrott
# @Last Modified time: 2016-03-08 12:38:03
import pytest
# MAKE THIS SO IT ONLY EVER GETS RUN ONCE PER "SESSION"
def test_global_import():
from df2gspread import df2gspread as d2g
from df2gspread import gspread2df as g2d
def test_version_check():
from df2gspread import _version
######################################################
# THIS NEEDS TO BE UPDATED EVERY TIME THE MAIN PACKAGE
# VERSION IS UPDATED!!!
######################################################
_v = '1.0.5'
if _version.__version__ != _v:
raise SystemError('SYNC VERSION in tests/test_df2gspread.py')
def test_spreadsheet_invalid_file_id(user_credentials_not_available):
if user_credentials_not_available:
pytest.xfail(reason='Credentials')
from df2gspread import gspread2df as g2d
with pytest.raises(RuntimeError):
g2d.download(gfile='invalid_file_id')
def test_worksheet_invalid_name(user_credentials_not_available):
if user_credentials_not_available:
pytest.xfail(reason='Credentials')
import pandas as pd
from df2gspread import gspread2df as g2d
from df2gspread import df2gspread as d2g
filepath = '/df2gspread_tests/invalid_wks_name'
df_upload = pd.DataFrame(['test'])
d2g.upload(df_upload, filepath)
with pytest.raises(RuntimeError):
g2d.download(gfile=filepath, wks_name='invalid_wks_name')
def test_worksheet(user_credentials_not_available):
if user_credentials_not_available:
pytest.xfail(reason='Credentials')
import string
import random
import numpy as np
import pandas as pd
from pandas.util.testing import assert_frame_equal
from df2gspread import df2gspread as d2g
from df2gspread import gspread2df as g2d
from df2gspread.utils import get_credentials
from df2gspread.gfiles import get_file_id
from df2gspread.gfiles import delete_file
df_upload = pd.DataFrame(
{0: ['1', '2', 'x', '4']},
index=[0, 1, 2, 3])
filepath = '/df2gspread_tests/' + ''.join(
random.choice(string.ascii_uppercase + string.digits)
for _ in range(10))
# First worksheet as default
d2g.upload(df_upload, filepath)
df_download = g2d.download(filepath, col_names=True, row_names=True)
df_download.columns = df_download.columns.astype(np.int64)
df_download.index = df_download.index.astype(np.int64)
assert_frame_equal(df_upload, df_download)
# Updating existed spreadsheet
d2g.upload(df_upload, filepath, wks_name='Sheet2')
df_download = g2d.download(filepath, col_names=True, row_names=True)
df_download.columns = df_download.columns.astype(np.int64)
df_download.index = df_download.index.astype(np.int64)
assert_frame_equal(df_upload, df_download)
# Updating with file_id
credentials = get_credentials()
file_id = get_file_id(credentials, filepath)
d2g.upload(df_upload, file_id)
df_download = g2d.download(file_id, col_names=True, row_names=True)
df_download.columns = df_download.columns.astype(np.int64)
df_download.index = df_download.index.astype(np.int64)
assert_frame_equal(df_upload, df_download)
# Only row_names
wks = d2g.upload(df_upload, filepath, col_names=False)
df_download = g2d.download(filepath, row_names=True)
df_download.index = df_download.index.astype(np.int64)
# df_download.columns = df_download.columns.astype(np.int64)
assert_frame_equal(df_upload, df_download)
# Only col_names
wks = d2g.upload(df_upload, filepath, row_names=False)
df_download = g2d.download(filepath, col_names=True)
df_download.columns = df_download.columns.astype(np.int64)
assert_frame_equal(df_upload, df_download)
# Without column or row names
wks = d2g.upload(df_upload, filepath, row_names=False, col_names=False)
df_download = g2d.download(filepath)
assert_frame_equal(df_upload, df_download)
# Clear created file from drive
delete_file(credentials, file_id)
def test_gspread2df_start_cell(user_credentials_not_available):
if user_credentials_not_available:
pytest.xfail(reason='Credentials')
import string
import random
import numpy as np
import pandas as pd
from numpy.testing import assert_array_equal
from pandas.util.testing import assert_frame_equal
from df2gspread import df2gspread as d2g
from df2gspread import gspread2df as g2d
from df2gspread.utils import get_credentials
from df2gspread.gfiles import get_file_id
from df2gspread.gfiles import delete_file
filepath = '/df2gspread_tests/' + ''.join(
random.choice(string.ascii_uppercase + string.digits)
for _ in range(10))
df_upload = pd.DataFrame(
{0: ['1', '2', 'x', '4'],
1: ['2', '2', 'y', '4'],
2: ['3', '2', 'w', '4'],
3: ['4', '2', 'z', '4']},
index=[0, 1, 2, 3])
# Start cell out of the table size
d2g.upload(df_upload, filepath, row_names=False, col_names=False)
with pytest.raises(RuntimeError):
df_download = g2d.download(filepath, start_cell='A5')
with pytest.raises(RuntimeError):
df_download = g2d.download(filepath, start_cell='E1')
# Should be fixed in gspread
# with pytest.raises(RuntimeError):
# df_download = g2d.download(filepath, start_cell='A0')
# start_cell = 'A3'
d2g.upload(df_upload, filepath, row_names=False, col_names=False)
df_download = g2d.download(filepath, start_cell='A3')
assert_array_equal(df_upload.iloc[2:, :], df_download)
# start_cell = 'B3'
d2g.upload(df_upload, filepath, row_names=False, col_names=False)
df_download = g2d.download(filepath, start_cell='B3')
assert_array_equal(df_upload.iloc[2:, 1:], df_download)
# Clear created file from drive
credentials = get_credentials()
file_id = get_file_id(credentials, filepath)
delete_file(credentials, file_id)
def test_big_worksheet(user_credentials_not_available):
if user_credentials_not_available:
pytest.xfail(reason='Credentials')
import string
import random
import numpy as np
import pandas as pd
from pandas.util.testing import assert_frame_equal
from df2gspread import df2gspread as d2g
from df2gspread import gspread2df as g2d
from df2gspread.utils import get_credentials
from df2gspread.gfiles import get_file_id
from df2gspread.gfiles import delete_file
filepath = '/df2gspread_tests/' + ''.join(
random.choice(string.ascii_uppercase + string.digits)
for _ in range(10))
# Indexes count out of default 1000
df_upload = pd.DataFrame(index=range(1001),
columns=range(2))
df_upload = df_upload.fillna('0')
d2g.upload(df_upload, filepath, row_names=False, col_names=False)
df_download = g2d.download(filepath)
# df_download.columns = df_download.columns.astype(np.int64)
assert_frame_equal(df_upload, df_download)
# Columns count out of default 100
df_upload = pd.DataFrame(index=range(1),
columns=range(101))
df_upload = df_upload.fillna('0')
d2g.upload(df_upload, filepath, row_names=False, col_names=False)
df_download = g2d.download(filepath)
# df_download.columns = df_download.columns.astype(np.int64)
assert_frame_equal(df_upload, df_download)
# Clear created file from drive
credentials = get_credentials()
file_id = get_file_id(credentials, filepath)
delete_file(credentials, file_id)
def test_df2gspread_start_cell(user_credentials_not_available):
if user_credentials_not_available:
pytest.xfail(reason='Credentials')
import string
import random
import numpy as np
import pandas as pd
from pandas.util.testing import assert_frame_equal
from df2gspread import df2gspread as d2g
from df2gspread import gspread2df as g2d
from df2gspread.utils import get_credentials
from df2gspread.gfiles import get_file_id
from df2gspread.gfiles import delete_file
filepath = '/df2gspread_tests/' + ''.join(
random.choice(string.ascii_uppercase + string.digits)
for _ in range(10))
df_upload_0 = pd.DataFrame(
{0: ['1', '2', 'x', '4']},
index=[0, 1, 2, 3])
d2g.upload(df_upload_0, filepath, row_names=False,
col_names=False, start_cell='A1')
df_download = g2d.download(filepath)
df_upload = df_upload_0
assert_frame_equal(df_upload, df_download)
d2g.upload(df_upload_0, filepath, row_names=False,
col_names=False, start_cell='A2')
df_download = g2d.download(filepath)
df_upload = df_upload_0
new_rows = 1
new_rows_array = np.chararray((new_rows, len(df_upload.columns)))
new_rows_array[:] = ''
df_new_rows = pd.DataFrame(data=new_rows_array)
df_upload = df_new_rows.append(df_upload, ignore_index=True)
assert_frame_equal(df_upload, df_download)
d2g.upload(df_upload_0, filepath, row_names=False,
col_names=False, start_cell='B1')
df_download = g2d.download(filepath)
df_upload = df_upload_0
df_upload.insert(0, '-1', '')
df_upload.columns = range(0, len(df_upload.columns))
assert_frame_equal(df_upload, df_download)
d2g.upload(df_upload_0, filepath, row_names=False,
col_names=False, start_cell='AB10')
df_download = g2d.download(filepath)
df_upload = df_upload_0
new_cols = 27
new_cols_array = np.chararray((len(df_upload), new_cols))
new_cols_array[:] = ''
df_new_cols = pd.DataFrame(data=new_cols_array)
df_upload = pd.concat([df_new_cols, df_upload], axis=1)
df_upload.columns = range(0, len(df_upload.columns))
new_rows = 9
new_rows_array = np.chararray((new_rows, len(df_upload.columns)))
new_rows_array[:] = ''
df_new_rows = pd.DataFrame(data=new_rows_array)
df_upload = df_new_rows.append(df_upload, ignore_index=True)
assert_frame_equal(df_upload, df_download)
# Backward compatibility df2gspread => gspread2df
d2g.upload(df_upload_0, filepath, row_names=False,
col_names=False, start_cell='AB10')
df_upload = df_upload_0
df_download = g2d.download(filepath, start_cell='AB10')
assert_frame_equal(df_upload, df_download)
d2g.upload(df_upload_0, filepath, start_cell='AB10')
df_upload = df_upload_0
df_download = g2d.download(
filepath, row_names=True, col_names=True, start_cell='AB10')
df_download.index = df_download.index.astype(np.int64)
df_download.columns = df_download.columns.astype(np.int64)
assert_frame_equal(df_upload, df_download)
# Clear created file from drive
credentials = get_credentials()
file_id = get_file_id(credentials, filepath)
delete_file(credentials, file_id)
def test_df2gspread_df_size(user_credentials_not_available):
if user_credentials_not_available:
pytest.xfail(reason='Credentials')
import string
import random
import numpy as np
import pandas as pd
import gspread
from pandas.util.testing import assert_frame_equal
from df2gspread import df2gspread as d2g
from df2gspread import gspread2df as g2d
from df2gspread.utils import get_credentials
from df2gspread.gfiles import get_file_id, delete_file, get_worksheet
filepath = '/df2gspread_tests/' + ''.join(
random.choice(string.ascii_uppercase + string.digits)
for _ in range(10))
credentials = get_credentials()
gc = gspread.authorize(credentials)
gfile_id = get_file_id(credentials, filepath, write_access=True)
df_upload_a = pd.DataFrame(
{0: ['1', '2', 'x', '4']},
index=[0, 1, 2, 3])
df_upload_b = pd.DataFrame(data=np.array(
[np.arange(1500)]*2).T).applymap(str)
# Uploading a small DF to new sheet to test for sizing down from default
d2g.upload(df_upload_a, filepath, "test1",
row_names=False, col_names=False, df_size=True)
df_download = g2d.download(filepath, "test1")
df_upload = df_upload_a
wks = get_worksheet(gc, gfile_id, "test1")
assert wks.row_count == len(df_upload)
assert len(df_upload.columns) == wks.col_count
assert len(df_download) == len(df_upload)
assert_frame_equal(df_upload, df_download)
# Upload a large DF to existing, smaller sheet to test for proper expansion
d2g.upload(df_upload_b, filepath, "test1",
row_names=False, col_names=False, df_size=True)
df_download = g2d.download(filepath, "test1")
df_upload = df_upload_b
wks = get_worksheet(gc, gfile_id, "test1")
assert wks.row_count == len(df_upload)
assert len(df_upload.columns) == wks.col_count
assert len(df_download) == len(df_upload)
assert_frame_equal(df_upload, df_download)
# Uploading a small DF to existing large sheet to test for sizing down from default
d2g.upload(df_upload_a, filepath, "test1",
row_names=False, col_names=False, df_size=True)
df_download = g2d.download(filepath, "test1")
df_upload = df_upload_a
wks = get_worksheet(gc, gfile_id, "test1")
assert wks.row_count == len(df_upload)
assert len(df_upload.columns) == wks.col_count
assert len(df_download) == len(df_upload)
assert_frame_equal(df_upload, df_download)
# New sheet with col names, make sure 1 extra row and column
d2g.upload(df_upload_a, filepath, "test2",
row_names=True, col_names=True, df_size=True)
df_download = g2d.download(filepath, "test2")
df_upload = df_upload_a
wks = get_worksheet(gc, gfile_id, "test2")
assert wks.row_count == len(df_upload) + 1
assert len(df_upload.columns) + 1 == wks.col_count
assert len(df_download) == len(df_upload) + 1
# Upload to new sheet with specified dimensions
d2g.upload(df_upload_a, filepath, "test3", row_names=False,
col_names=False, new_sheet_dimensions=(100, 10))
df_download = g2d.download(filepath, "test3")
df_upload = df_upload_a
wks = get_worksheet(gc, gfile_id, "test3")
assert wks.row_count == 100
assert 10 == wks.col_count
assert_frame_equal(df_upload, df_download)
# Test df_size with start_cell
d2g.upload(df_upload_a, filepath, "test4", row_names=False, col_names=False, start_cell='AB10',
df_size=True)
df_download = g2d.download(filepath, "test4")
df_upload = df_upload_a
new_cols = 27
new_cols_array = np.chararray((len(df_upload), new_cols))
new_cols_array[:] = ''
df_new_cols = pd.DataFrame(data=new_cols_array)
df_upload = pd.concat([df_new_cols, df_upload], axis=1)
df_upload.columns = range(0, len(df_upload.columns))
new_rows = 9
new_rows_array = np.chararray((new_rows, len(df_upload.columns)))
new_rows_array[:] = ''
df_new_rows = pd.DataFrame(data=new_rows_array)
df_upload = df_new_rows.append(df_upload, ignore_index=True)
wks = get_worksheet(gc, gfile_id, "test4")
assert wks.row_count == len(df_upload)
assert len(df_upload.columns) == wks.col_count
assert len(df_download) == len(df_upload)
assert_frame_equal(df_upload, df_download)
# Test df_size with start_cell and sheet dimensions which need to be expanded
d2g.upload(df_upload_a, filepath, "test5", row_names=False, col_names=False, start_cell='AB10',
df_size=True, new_sheet_dimensions=(10, 27))
df_download = g2d.download(filepath, "test5")
df_upload = df_upload_a
new_cols = 27
new_cols_array = np.chararray((len(df_upload), new_cols))
new_cols_array[:] = ''
df_new_cols = pd.DataFrame(data=new_cols_array)
df_upload = pd.concat([df_new_cols, df_upload], axis=1)
df_upload.columns = range(0, len(df_upload.columns))
new_rows = 9
new_rows_array = np.chararray((new_rows, len(df_upload.columns)))
new_rows_array[:] = ''
df_new_rows = pd.DataFrame(data=new_rows_array)
df_upload = df_new_rows.append(df_upload, ignore_index=True)
wks = get_worksheet(gc, gfile_id, "test5")
assert wks.row_count == len(df_upload)
assert len(df_upload.columns) == wks.col_count
assert len(df_download) == len(df_upload)
assert_frame_equal(df_upload, df_download)
# Clear created file from drive
delete_file(credentials, gfile_id)
def test_delete_file(user_credentials_not_available):
if user_credentials_not_available:
pytest.xfail(reason='Credentials')
from df2gspread.gfiles import delete_file
from df2gspread.utils import get_credentials
from df2gspread.gfiles import get_file_id
# Clear created folder for testing
credentials = get_credentials()
file_id = get_file_id(credentials, '/df2gspread_tests')
delete_file(credentials, file_id)
| gpl-3.0 |
tomkralidis/geonode | geonode/compat.py | 6 | 1429 | # -*- coding: utf-8 -*-
#########################################################################
#
# Copyright (C) 2020 OSGeo
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#########################################################################
long, unicode, basestring = int, str, str
unicode = str
string_type = str
def ensure_string(payload_bytes):
import re
_payload = payload_bytes
try:
_payload = payload_bytes.decode("utf-8")
except AttributeError:
# when _payload is already a string
pass
except UnicodeDecodeError:
# when payload is a byte-like object (e.g bytearray)
# primarily used in when _payload is an image
return _payload
if re.match(r'b\'(.*)\'', _payload):
_payload = re.match(r'b\'(.*)\'', _payload).groups()[0]
return _payload
| gpl-3.0 |
giggsey/SickRage | lib/github/Organization.py | 28 | 28358 | # -*- coding: utf-8 -*-
# ########################## Copyrights and license ############################
# #
# Copyright 2012 Steve English <[email protected]> #
# Copyright 2012 Vincent Jacques <[email protected]> #
# Copyright 2012 Zearin <[email protected]> #
# Copyright 2013 AKFish <[email protected]> #
# Copyright 2013 Vincent Jacques <[email protected]> #
# Copyright 2013 martinqt <[email protected]> #
# #
# This file is part of PyGithub. http://jacquev6.github.com/PyGithub/ #
# #
# PyGithub is free software: you can redistribute it and/or modify it under #
# the terms of the GNU Lesser General Public License as published by the Free #
# Software Foundation, either version 3 of the License, or (at your option) #
# any later version. #
# #
# PyGithub is distributed in the hope that it will be useful, but WITHOUT ANY #
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS #
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more #
# details. #
# #
# You should have received a copy of the GNU Lesser General Public License #
# along with PyGithub. If not, see <http://www.gnu.org/licenses/>. #
# #
# ##############################################################################
import datetime
import github.GithubObject
import github.PaginatedList
import github.Plan
import github.Team
import github.Event
import github.Repository
import github.NamedUser
class Organization(github.GithubObject.CompletableGithubObject):
"""
This class represents Organizations. The reference can be found here http://developer.github.com/v3/orgs/
"""
@property
def avatar_url(self):
"""
:type: string
"""
self._completeIfNotSet(self._avatar_url)
return self._avatar_url.value
@property
def billing_email(self):
"""
:type: string
"""
self._completeIfNotSet(self._billing_email)
return self._billing_email.value
@property
def blog(self):
"""
:type: string
"""
self._completeIfNotSet(self._blog)
return self._blog.value
@property
def collaborators(self):
"""
:type: integer
"""
self._completeIfNotSet(self._collaborators)
return self._collaborators.value
@property
def company(self):
"""
:type: string
"""
self._completeIfNotSet(self._company)
return self._company.value
@property
def created_at(self):
"""
:type: datetime.datetime
"""
self._completeIfNotSet(self._created_at)
return self._created_at.value
@property
def disk_usage(self):
"""
:type: integer
"""
self._completeIfNotSet(self._disk_usage)
return self._disk_usage.value
@property
def email(self):
"""
:type: string
"""
self._completeIfNotSet(self._email)
return self._email.value
@property
def events_url(self):
"""
:type: string
"""
self._completeIfNotSet(self._events_url)
return self._events_url.value
@property
def followers(self):
"""
:type: integer
"""
self._completeIfNotSet(self._followers)
return self._followers.value
@property
def following(self):
"""
:type: integer
"""
self._completeIfNotSet(self._following)
return self._following.value
@property
def gravatar_id(self):
"""
:type: string
"""
self._completeIfNotSet(self._gravatar_id)
return self._gravatar_id.value
@property
def html_url(self):
"""
:type: string
"""
self._completeIfNotSet(self._html_url)
return self._html_url.value
@property
def id(self):
"""
:type: integer
"""
self._completeIfNotSet(self._id)
return self._id.value
@property
def location(self):
"""
:type: string
"""
self._completeIfNotSet(self._location)
return self._location.value
@property
def login(self):
"""
:type: string
"""
self._completeIfNotSet(self._login)
return self._login.value
@property
def members_url(self):
"""
:type: string
"""
self._completeIfNotSet(self._members_url)
return self._members_url.value
@property
def name(self):
"""
:type: string
"""
self._completeIfNotSet(self._name)
return self._name.value
@property
def owned_private_repos(self):
"""
:type: integer
"""
self._completeIfNotSet(self._owned_private_repos)
return self._owned_private_repos.value
@property
def plan(self):
"""
:type: :class:`github.Plan.Plan`
"""
self._completeIfNotSet(self._plan)
return self._plan.value
@property
def private_gists(self):
"""
:type: integer
"""
self._completeIfNotSet(self._private_gists)
return self._private_gists.value
@property
def public_gists(self):
"""
:type: integer
"""
self._completeIfNotSet(self._public_gists)
return self._public_gists.value
@property
def public_members_url(self):
"""
:type: string
"""
self._completeIfNotSet(self._public_members_url)
return self._public_members_url.value
@property
def public_repos(self):
"""
:type: integer
"""
self._completeIfNotSet(self._public_repos)
return self._public_repos.value
@property
def repos_url(self):
"""
:type: string
"""
self._completeIfNotSet(self._repos_url)
return self._repos_url.value
@property
def total_private_repos(self):
"""
:type: integer
"""
self._completeIfNotSet(self._total_private_repos)
return self._total_private_repos.value
@property
def type(self):
"""
:type: string
"""
self._completeIfNotSet(self._type)
return self._type.value
@property
def updated_at(self):
"""
:type: datetime.datetime
"""
self._completeIfNotSet(self._updated_at)
return self._updated_at.value
@property
def url(self):
"""
:type: string
"""
self._completeIfNotSet(self._url)
return self._url.value
def add_to_public_members(self, public_member):
"""
:calls: `PUT /orgs/:org/public_members/:user <http://developer.github.com/v3/orgs/members>`_
:param public_member: :class:`github.NamedUser.NamedUser`
:rtype: None
"""
assert isinstance(public_member, github.NamedUser.NamedUser), public_member
headers, data = self._requester.requestJsonAndCheck(
"PUT",
self.url + "/public_members/" + public_member._identity
)
def create_fork(self, repo):
"""
:calls: `POST /repos/:owner/:repo/forks <http://developer.github.com/v3/repos/forks>`_
:param repo: :class:`github.Repository.Repository`
:rtype: :class:`github.Repository.Repository`
"""
assert isinstance(repo, github.Repository.Repository), repo
url_parameters = {
"org": self.login,
}
headers, data = self._requester.requestJsonAndCheck(
"POST",
"/repos/" + repo.owner.login + "/" + repo.name + "/forks",
parameters=url_parameters
)
return github.Repository.Repository(self._requester, headers, data, completed=True)
def create_repo(self, name, description=github.GithubObject.NotSet, homepage=github.GithubObject.NotSet, private=github.GithubObject.NotSet, has_issues=github.GithubObject.NotSet, has_wiki=github.GithubObject.NotSet, has_downloads=github.GithubObject.NotSet, team_id=github.GithubObject.NotSet, auto_init=github.GithubObject.NotSet, gitignore_template=github.GithubObject.NotSet):
"""
:calls: `POST /orgs/:org/repos <http://developer.github.com/v3/repos>`_
:param name: string
:param description: string
:param homepage: string
:param private: bool
:param has_issues: bool
:param has_wiki: bool
:param has_downloads: bool
:param team_id: :class:`github.Team.Team`
:param auto_init: bool
:param gitignore_template: string
:rtype: :class:`github.Repository.Repository`
"""
assert isinstance(name, (str, unicode)), name
assert description is github.GithubObject.NotSet or isinstance(description, (str, unicode)), description
assert homepage is github.GithubObject.NotSet or isinstance(homepage, (str, unicode)), homepage
assert private is github.GithubObject.NotSet or isinstance(private, bool), private
assert has_issues is github.GithubObject.NotSet or isinstance(has_issues, bool), has_issues
assert has_wiki is github.GithubObject.NotSet or isinstance(has_wiki, bool), has_wiki
assert has_downloads is github.GithubObject.NotSet or isinstance(has_downloads, bool), has_downloads
assert team_id is github.GithubObject.NotSet or isinstance(team_id, github.Team.Team), team_id
assert auto_init is github.GithubObject.NotSet or isinstance(auto_init, bool), auto_init
assert gitignore_template is github.GithubObject.NotSet or isinstance(gitignore_template, (str, unicode)), gitignore_template
post_parameters = {
"name": name,
}
if description is not github.GithubObject.NotSet:
post_parameters["description"] = description
if homepage is not github.GithubObject.NotSet:
post_parameters["homepage"] = homepage
if private is not github.GithubObject.NotSet:
post_parameters["private"] = private
if has_issues is not github.GithubObject.NotSet:
post_parameters["has_issues"] = has_issues
if has_wiki is not github.GithubObject.NotSet:
post_parameters["has_wiki"] = has_wiki
if has_downloads is not github.GithubObject.NotSet:
post_parameters["has_downloads"] = has_downloads
if team_id is not github.GithubObject.NotSet:
post_parameters["team_id"] = team_id._identity
if auto_init is not github.GithubObject.NotSet:
post_parameters["auto_init"] = auto_init
if gitignore_template is not github.GithubObject.NotSet:
post_parameters["gitignore_template"] = gitignore_template
headers, data = self._requester.requestJsonAndCheck(
"POST",
self.url + "/repos",
input=post_parameters
)
return github.Repository.Repository(self._requester, headers, data, completed=True)
def create_team(self, name, repo_names=github.GithubObject.NotSet, permission=github.GithubObject.NotSet):
"""
:calls: `POST /orgs/:org/teams <http://developer.github.com/v3/orgs/teams>`_
:param name: string
:param repo_names: list of :class:`github.Repository.Repository`
:param permission: string
:rtype: :class:`github.Team.Team`
"""
assert isinstance(name, (str, unicode)), name
assert repo_names is github.GithubObject.NotSet or all(isinstance(element, github.Repository.Repository) for element in repo_names), repo_names
assert permission is github.GithubObject.NotSet or isinstance(permission, (str, unicode)), permission
post_parameters = {
"name": name,
}
if repo_names is not github.GithubObject.NotSet:
post_parameters["repo_names"] = [element._identity for element in repo_names]
if permission is not github.GithubObject.NotSet:
post_parameters["permission"] = permission
headers, data = self._requester.requestJsonAndCheck(
"POST",
self.url + "/teams",
input=post_parameters
)
return github.Team.Team(self._requester, headers, data, completed=True)
def edit(self, billing_email=github.GithubObject.NotSet, blog=github.GithubObject.NotSet, company=github.GithubObject.NotSet, email=github.GithubObject.NotSet, location=github.GithubObject.NotSet, name=github.GithubObject.NotSet):
"""
:calls: `PATCH /orgs/:org <http://developer.github.com/v3/orgs>`_
:param billing_email: string
:param blog: string
:param company: string
:param email: string
:param location: string
:param name: string
:rtype: None
"""
assert billing_email is github.GithubObject.NotSet or isinstance(billing_email, (str, unicode)), billing_email
assert blog is github.GithubObject.NotSet or isinstance(blog, (str, unicode)), blog
assert company is github.GithubObject.NotSet or isinstance(company, (str, unicode)), company
assert email is github.GithubObject.NotSet or isinstance(email, (str, unicode)), email
assert location is github.GithubObject.NotSet or isinstance(location, (str, unicode)), location
assert name is github.GithubObject.NotSet or isinstance(name, (str, unicode)), name
post_parameters = dict()
if billing_email is not github.GithubObject.NotSet:
post_parameters["billing_email"] = billing_email
if blog is not github.GithubObject.NotSet:
post_parameters["blog"] = blog
if company is not github.GithubObject.NotSet:
post_parameters["company"] = company
if email is not github.GithubObject.NotSet:
post_parameters["email"] = email
if location is not github.GithubObject.NotSet:
post_parameters["location"] = location
if name is not github.GithubObject.NotSet:
post_parameters["name"] = name
headers, data = self._requester.requestJsonAndCheck(
"PATCH",
self.url,
input=post_parameters
)
self._useAttributes(data)
def get_events(self):
"""
:calls: `GET /orgs/:org/events <http://developer.github.com/v3/activity/events>`_
:rtype: :class:`github.PaginatedList.PaginatedList` of :class:`github.Event.Event`
"""
return github.PaginatedList.PaginatedList(
github.Event.Event,
self._requester,
self.url + "/events",
None
)
def get_issues(self, filter=github.GithubObject.NotSet, state=github.GithubObject.NotSet, labels=github.GithubObject.NotSet, sort=github.GithubObject.NotSet, direction=github.GithubObject.NotSet, since=github.GithubObject.NotSet):
"""
:calls: `GET /orgs/:org/issues <http://developer.github.com/v3/issues>`_
:rtype: :class:`github.PaginatedList.PaginatedList` of :class:`github.Issue.Issue`
:param filter: string
:param state: string
:param labels: list of :class:`github.Label.Label`
:param sort: string
:param direction: string
:param since: datetime.datetime
:rtype: :class:`github.PaginatedList.PaginatedList` of :class:`github.Issue.Issue`
"""
assert filter is github.GithubObject.NotSet or isinstance(filter, (str, unicode)), filter
assert state is github.GithubObject.NotSet or isinstance(state, (str, unicode)), state
assert labels is github.GithubObject.NotSet or all(isinstance(element, github.Label.Label) for element in labels), labels
assert sort is github.GithubObject.NotSet or isinstance(sort, (str, unicode)), sort
assert direction is github.GithubObject.NotSet or isinstance(direction, (str, unicode)), direction
assert since is github.GithubObject.NotSet or isinstance(since, datetime.datetime), since
url_parameters = dict()
if filter is not github.GithubObject.NotSet:
url_parameters["filter"] = filter
if state is not github.GithubObject.NotSet:
url_parameters["state"] = state
if labels is not github.GithubObject.NotSet:
url_parameters["labels"] = ",".join(label.name for label in labels)
if sort is not github.GithubObject.NotSet:
url_parameters["sort"] = sort
if direction is not github.GithubObject.NotSet:
url_parameters["direction"] = direction
if since is not github.GithubObject.NotSet:
url_parameters["since"] = since.strftime("%Y-%m-%dT%H:%M:%SZ")
return github.PaginatedList.PaginatedList(
github.Issue.Issue,
self._requester,
self.url + "/issues",
url_parameters
)
def get_members(self):
"""
:calls: `GET /orgs/:org/members <http://developer.github.com/v3/orgs/members>`_
:rtype: :class:`github.PaginatedList.PaginatedList` of :class:`github.NamedUser.NamedUser`
"""
return github.PaginatedList.PaginatedList(
github.NamedUser.NamedUser,
self._requester,
self.url + "/members",
None
)
def get_public_members(self):
"""
:calls: `GET /orgs/:org/public_members <http://developer.github.com/v3/orgs/members>`_
:rtype: :class:`github.PaginatedList.PaginatedList` of :class:`github.NamedUser.NamedUser`
"""
return github.PaginatedList.PaginatedList(
github.NamedUser.NamedUser,
self._requester,
self.url + "/public_members",
None
)
def get_repo(self, name):
"""
:calls: `GET /repos/:owner/:repo <http://developer.github.com/v3/repos>`_
:param name: string
:rtype: :class:`github.Repository.Repository`
"""
assert isinstance(name, (str, unicode)), name
headers, data = self._requester.requestJsonAndCheck(
"GET",
"/repos/" + self.login + "/" + name
)
return github.Repository.Repository(self._requester, headers, data, completed=True)
def get_repos(self, type=github.GithubObject.NotSet):
"""
:calls: `GET /orgs/:org/repos <http://developer.github.com/v3/repos>`_
:param type: string
:rtype: :class:`github.PaginatedList.PaginatedList` of :class:`github.Repository.Repository`
"""
assert type is github.GithubObject.NotSet or isinstance(type, (str, unicode)), type
url_parameters = dict()
if type is not github.GithubObject.NotSet:
url_parameters["type"] = type
return github.PaginatedList.PaginatedList(
github.Repository.Repository,
self._requester,
self.url + "/repos",
url_parameters
)
def get_team(self, id):
"""
:calls: `GET /teams/:id <http://developer.github.com/v3/orgs/teams>`_
:param id: integer
:rtype: :class:`github.Team.Team`
"""
assert isinstance(id, (int, long)), id
headers, data = self._requester.requestJsonAndCheck(
"GET",
"/teams/" + str(id)
)
return github.Team.Team(self._requester, headers, data, completed=True)
def get_teams(self):
"""
:calls: `GET /orgs/:org/teams <http://developer.github.com/v3/orgs/teams>`_
:rtype: :class:`github.PaginatedList.PaginatedList` of :class:`github.Team.Team`
"""
return github.PaginatedList.PaginatedList(
github.Team.Team,
self._requester,
self.url + "/teams",
None
)
def has_in_members(self, member):
"""
:calls: `GET /orgs/:org/members/:user <http://developer.github.com/v3/orgs/members>`_
:param member: :class:`github.NamedUser.NamedUser`
:rtype: bool
"""
assert isinstance(member, github.NamedUser.NamedUser), member
status, headers, data = self._requester.requestJson(
"GET",
self.url + "/members/" + member._identity
)
return status == 204
def has_in_public_members(self, public_member):
"""
:calls: `GET /orgs/:org/public_members/:user <http://developer.github.com/v3/orgs/members>`_
:param public_member: :class:`github.NamedUser.NamedUser`
:rtype: bool
"""
assert isinstance(public_member, github.NamedUser.NamedUser), public_member
status, headers, data = self._requester.requestJson(
"GET",
self.url + "/public_members/" + public_member._identity
)
return status == 204
def remove_from_members(self, member):
"""
:calls: `DELETE /orgs/:org/members/:user <http://developer.github.com/v3/orgs/members>`_
:param member: :class:`github.NamedUser.NamedUser`
:rtype: None
"""
assert isinstance(member, github.NamedUser.NamedUser), member
headers, data = self._requester.requestJsonAndCheck(
"DELETE",
self.url + "/members/" + member._identity
)
def remove_from_public_members(self, public_member):
"""
:calls: `DELETE /orgs/:org/public_members/:user <http://developer.github.com/v3/orgs/members>`_
:param public_member: :class:`github.NamedUser.NamedUser`
:rtype: None
"""
assert isinstance(public_member, github.NamedUser.NamedUser), public_member
headers, data = self._requester.requestJsonAndCheck(
"DELETE",
self.url + "/public_members/" + public_member._identity
)
def _initAttributes(self):
self._avatar_url = github.GithubObject.NotSet
self._billing_email = github.GithubObject.NotSet
self._blog = github.GithubObject.NotSet
self._collaborators = github.GithubObject.NotSet
self._company = github.GithubObject.NotSet
self._created_at = github.GithubObject.NotSet
self._disk_usage = github.GithubObject.NotSet
self._email = github.GithubObject.NotSet
self._events_url = github.GithubObject.NotSet
self._followers = github.GithubObject.NotSet
self._following = github.GithubObject.NotSet
self._gravatar_id = github.GithubObject.NotSet
self._html_url = github.GithubObject.NotSet
self._id = github.GithubObject.NotSet
self._location = github.GithubObject.NotSet
self._login = github.GithubObject.NotSet
self._members_url = github.GithubObject.NotSet
self._name = github.GithubObject.NotSet
self._owned_private_repos = github.GithubObject.NotSet
self._plan = github.GithubObject.NotSet
self._private_gists = github.GithubObject.NotSet
self._public_gists = github.GithubObject.NotSet
self._public_members_url = github.GithubObject.NotSet
self._public_repos = github.GithubObject.NotSet
self._repos_url = github.GithubObject.NotSet
self._total_private_repos = github.GithubObject.NotSet
self._type = github.GithubObject.NotSet
self._updated_at = github.GithubObject.NotSet
self._url = github.GithubObject.NotSet
def _useAttributes(self, attributes):
if "avatar_url" in attributes: # pragma no branch
self._avatar_url = self._makeStringAttribute(attributes["avatar_url"])
if "billing_email" in attributes: # pragma no branch
self._billing_email = self._makeStringAttribute(attributes["billing_email"])
if "blog" in attributes: # pragma no branch
self._blog = self._makeStringAttribute(attributes["blog"])
if "collaborators" in attributes: # pragma no branch
self._collaborators = self._makeIntAttribute(attributes["collaborators"])
if "company" in attributes: # pragma no branch
self._company = self._makeStringAttribute(attributes["company"])
if "created_at" in attributes: # pragma no branch
self._created_at = self._makeDatetimeAttribute(attributes["created_at"])
if "disk_usage" in attributes: # pragma no branch
self._disk_usage = self._makeIntAttribute(attributes["disk_usage"])
if "email" in attributes: # pragma no branch
self._email = self._makeStringAttribute(attributes["email"])
if "events_url" in attributes: # pragma no branch
self._events_url = self._makeStringAttribute(attributes["events_url"])
if "followers" in attributes: # pragma no branch
self._followers = self._makeIntAttribute(attributes["followers"])
if "following" in attributes: # pragma no branch
self._following = self._makeIntAttribute(attributes["following"])
if "gravatar_id" in attributes: # pragma no branch
self._gravatar_id = self._makeStringAttribute(attributes["gravatar_id"])
if "html_url" in attributes: # pragma no branch
self._html_url = self._makeStringAttribute(attributes["html_url"])
if "id" in attributes: # pragma no branch
self._id = self._makeIntAttribute(attributes["id"])
if "location" in attributes: # pragma no branch
self._location = self._makeStringAttribute(attributes["location"])
if "login" in attributes: # pragma no branch
self._login = self._makeStringAttribute(attributes["login"])
if "members_url" in attributes: # pragma no branch
self._members_url = self._makeStringAttribute(attributes["members_url"])
if "name" in attributes: # pragma no branch
self._name = self._makeStringAttribute(attributes["name"])
if "owned_private_repos" in attributes: # pragma no branch
self._owned_private_repos = self._makeIntAttribute(attributes["owned_private_repos"])
if "plan" in attributes: # pragma no branch
self._plan = self._makeClassAttribute(github.Plan.Plan, attributes["plan"])
if "private_gists" in attributes: # pragma no branch
self._private_gists = self._makeIntAttribute(attributes["private_gists"])
if "public_gists" in attributes: # pragma no branch
self._public_gists = self._makeIntAttribute(attributes["public_gists"])
if "public_members_url" in attributes: # pragma no branch
self._public_members_url = self._makeStringAttribute(attributes["public_members_url"])
if "public_repos" in attributes: # pragma no branch
self._public_repos = self._makeIntAttribute(attributes["public_repos"])
if "repos_url" in attributes: # pragma no branch
self._repos_url = self._makeStringAttribute(attributes["repos_url"])
if "total_private_repos" in attributes: # pragma no branch
self._total_private_repos = self._makeIntAttribute(attributes["total_private_repos"])
if "type" in attributes: # pragma no branch
self._type = self._makeStringAttribute(attributes["type"])
if "updated_at" in attributes: # pragma no branch
self._updated_at = self._makeDatetimeAttribute(attributes["updated_at"])
if "url" in attributes: # pragma no branch
self._url = self._makeStringAttribute(attributes["url"])
| gpl-3.0 |
nuagenetworks/tempest | tempest/lib/services/compute/security_group_rules_client.py | 1 | 1793 | # Copyright 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_serialization import jsonutils as json
from tempest.lib.api_schema.response.compute.v2_1 import \
security_groups as schema
from tempest.lib.common import rest_client
class SecurityGroupRulesClient(rest_client.RestClient):
def create_security_group_rule(self, **kwargs):
"""Create a new security group rule.
Available params: see http://developer.openstack.org/
api-ref-compute-v2.1.html#createSecGroupRule
"""
post_body = json.dumps({'security_group_rule': kwargs})
url = 'os-security-group-rules'
resp, body = self.post(url, post_body)
body = json.loads(body)
self.validate_response(schema.create_security_group_rule, resp, body)
return rest_client.ResponseBody(resp, body)
def delete_security_group_rule(self, group_rule_id):
"""Deletes the provided Security Group rule."""
resp, body = self.delete('os-security-group-rules/%s' %
group_rule_id)
self.validate_response(schema.delete_security_group_rule, resp, body)
return rest_client.ResponseBody(resp, body)
| apache-2.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.