repo_name
stringlengths 5
100
| path
stringlengths 4
375
| copies
stringclasses 991
values | size
stringlengths 4
7
| content
stringlengths 666
1M
| license
stringclasses 15
values |
---|---|---|---|---|---|
tmatsuya/milkymist-ml401
|
cores/softusb/navre_regress/test_opcodes/test_ST_Z_decr.py
|
5
|
2894
|
#! /usr/bin/env python
###############################################################################
#
# simulavr - A simulator for the Atmel AVR family of microcontrollers.
# Copyright (C) 2001, 2002 Theodore A. Roth
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
###############################################################################
#
# $Id: test_ST_Z_decr.py,v 1.1 2004/07/31 00:59:11 rivetwa Exp $
#
"""Test the ST_Z_decr opcode.
"""
import base_test
from registers import Reg, SREG
class ST_Z_decr_TestFail(base_test.TestFail): pass
class base_ST_Z_decr(base_test.opcode_test):
"""Generic test case for testing ST_Z_decr opcode.
ST_Z_decr - Store Indirect to data space from Register using index Z with
pre-decrement.
Operation: Z <- Z - 1 then (Z) <- Rd
opcode is '1001 001d dddd 0010' where 0 <= d <= 31
Only registers PC should be changed.
"""
def setup(self):
# Set the register values
self.setup_regs[self.Rd] = self.Vd
self.setup_regs[Reg.R30] = (self.Z & 0xff)
self.setup_regs[Reg.R31] = (self.Z >> 8)
# Return the raw opcode
return 0x9202 | (self.Rd << 4)
def analyze_results(self):
self.reg_changed.extend( [Reg.R30, Reg.R31] )
# check that result is correct
expect = self.Vd
# must account for pre-decrement
got = self.mem_byte_read( self.Z - 1 )
if expect != got:
self.fail('ST_Z_decr: expect=%02x, got=%02x' % (expect, got))
# check that Z was decremented
expect = self.Z - 1
got = (self.anal_regs[Reg.R30] & 0xff) | ((self.anal_regs[Reg.R31] << 8) & 0xff00)
if expect != got:
self.fail('LD_Z_decr Z not decr: expect=%04x, got=%04x' % (expect, got))
#
# Template code for test case.
# The fail method will raise a test specific exception.
#
template = """
class ST_Z_decr_r%02d_Z%04x_v%02x_TestFail(ST_Z_decr_TestFail): pass
class test_ST_Z_decr_r%02d_Z%04x_v%02x(base_ST_Z_decr):
Rd = %d
Z = 0x%x
Vd = 0x%x
def fail(self,s):
raise ST_Z_decr_r%02d_Z%04x_v%02x_TestFail, s
"""
#
# automagically generate the test_ST_Z_decr_* class definitions.
#
# Operation is undefined for d = 30 and d = 31.
#
code = ''
for d in range(0,30):
for x in (0x10f, 0x1ff):
for v in (0xaa, 0x55):
args = (d,x,v)*4
code += template % args
exec code
|
lgpl-3.0
|
vinodbonala/mm
|
test/functional/unit_test/coverage_tests.py
|
3
|
1067
|
import os
import unittest
import shutil
import test.lib.test_helper as test_helper
from test.lib.test_helper import MavensMateTest
class ApexUnitTestCoverageTest(MavensMateTest):
def test_01_should_get_coverage(self):
test_helper.create_project(self, "unit test project", package={ "ApexClass" : "*" })
stdin = {
"project_name" : "unit test project"
}
mm_response = self.runCommand('code_coverage_report', stdin)
self.assertTrue('NumLinesCovered' in mm_response[0])
@classmethod
def tearDownClass(self):
if os.path.exists(os.path.join(test_helper.base_test_directory,"test_workspace","unit test project")):
shutil.rmtree(os.path.join(test_helper.base_test_directory,"test_workspace","unit test project"))
if __name__ == '__main__':
if os.path.exists(os.path.join(test_helper.base_test_directory,"test_workspace","unit test project")):
shutil.rmtree(os.path.join(test_helper.base_test_directory,"test_workspace","unit test project"))
unittest.main()
|
gpl-2.0
|
krafczyk/spack
|
var/spack/repos/builtin/packages/r-irlba/package.py
|
5
|
1840
|
##############################################################################
# Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, [email protected], All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class RIrlba(RPackage):
"""Fast and memory efficient methods for truncated singular and eigenvalue
decompositions and principal component analysis of large sparse or dense
matrices."""
homepage = "https://cran.r-project.org/web/packages/irlba/index.html"
url = "https://cran.r-project.org/src/contrib/irlba_2.1.2.tar.gz"
list_url = "https://cran.r-project.org/src/contrib/Archive/irlba"
version('2.1.2', '290940abf6662ed10c0c5a8db1bc6e88')
version('2.0.0', '557674cf8b68fea5b9f231058c324d26')
depends_on('r-matrix', type=('build', 'run'))
|
lgpl-2.1
|
HuimingCheng/AutoGrading
|
learning/web_Haotian/venv/Lib/site-packages/wheel/bdist_wheel.py
|
62
|
18852
|
"""
Create a wheel (.whl) distribution.
A wheel is a built archive format.
"""
import csv
import hashlib
import os
import subprocess
import warnings
import shutil
import json
import sys
import re
from email.generator import Generator
from distutils.core import Command
from distutils.sysconfig import get_python_version
from distutils import log as logger
from shutil import rmtree
import pkg_resources
from .pep425tags import get_abbr_impl, get_impl_ver, get_abi_tag, get_platform
from .util import native, open_for_csv
from .archive import archive_wheelfile
from .pkginfo import read_pkg_info, write_pkg_info
from .metadata import pkginfo_to_dict
from . import pep425tags, metadata
from . import __version__ as wheel_version
safe_name = pkg_resources.safe_name
safe_version = pkg_resources.safe_version
PY_LIMITED_API_PATTERN = r'cp3\d'
def safer_name(name):
return safe_name(name).replace('-', '_')
def safer_version(version):
return safe_version(version).replace('-', '_')
class bdist_wheel(Command):
description = 'create a wheel distribution'
user_options = [('bdist-dir=', 'b',
"temporary directory for creating the distribution"),
('plat-name=', 'p',
"platform name to embed in generated filenames "
"(default: %s)" % get_platform()),
('keep-temp', 'k',
"keep the pseudo-installation tree around after " +
"creating the distribution archive"),
('dist-dir=', 'd',
"directory to put final built distributions in"),
('skip-build', None,
"skip rebuilding everything (for testing/debugging)"),
('relative', None,
"build the archive using relative paths"
"(default: false)"),
('owner=', 'u',
"Owner name used when creating a tar file"
" [default: current user]"),
('group=', 'g',
"Group name used when creating a tar file"
" [default: current group]"),
('universal', None,
"make a universal wheel"
" (default: false)"),
('python-tag=', None,
"Python implementation compatibility tag"
" (default: py%s)" % get_impl_ver()[0]),
('build-number=', None,
"Build number for this particular version. "
"As specified in PEP-0427, this must start with a digit. "
"[default: None]"),
('py-limited-api=', None,
"Python tag (cp32|cp33|cpNN) for abi3 wheel tag"
" (default: false)"),
]
boolean_options = ['keep-temp', 'skip-build', 'relative', 'universal']
def initialize_options(self):
self.bdist_dir = None
self.data_dir = None
self.plat_name = None
self.plat_tag = None
self.format = 'zip'
self.keep_temp = False
self.dist_dir = None
self.distinfo_dir = None
self.egginfo_dir = None
self.root_is_pure = None
self.skip_build = None
self.relative = False
self.owner = None
self.group = None
self.universal = False
self.python_tag = 'py' + get_impl_ver()[0]
self.build_number = None
self.py_limited_api = False
self.plat_name_supplied = False
def finalize_options(self):
if self.bdist_dir is None:
bdist_base = self.get_finalized_command('bdist').bdist_base
self.bdist_dir = os.path.join(bdist_base, 'wheel')
self.data_dir = self.wheel_dist_name + '.data'
self.plat_name_supplied = self.plat_name is not None
need_options = ('dist_dir', 'plat_name', 'skip_build')
self.set_undefined_options('bdist',
*zip(need_options, need_options))
self.root_is_pure = not (self.distribution.has_ext_modules()
or self.distribution.has_c_libraries())
if self.py_limited_api and not re.match(PY_LIMITED_API_PATTERN, self.py_limited_api):
raise ValueError("py-limited-api must match '%s'" % PY_LIMITED_API_PATTERN)
# Support legacy [wheel] section for setting universal
wheel = self.distribution.get_option_dict('wheel')
if 'universal' in wheel:
# please don't define this in your global configs
val = wheel['universal'][1].strip()
if val.lower() in ('1', 'true', 'yes'):
self.universal = True
if self.build_number is not None and not self.build_number[:1].isdigit():
raise ValueError("Build tag (build-number) must start with a digit.")
@property
def wheel_dist_name(self):
"""Return distribution full name with - replaced with _"""
components = (safer_name(self.distribution.get_name()),
safer_version(self.distribution.get_version()))
if self.build_number:
components += (self.build_number,)
return '-'.join(components)
def get_tag(self):
# bdist sets self.plat_name if unset, we should only use it for purepy
# wheels if the user supplied it.
if self.plat_name_supplied:
plat_name = self.plat_name
elif self.root_is_pure:
plat_name = 'any'
else:
plat_name = self.plat_name or get_platform()
if plat_name in ('linux-x86_64', 'linux_x86_64') and sys.maxsize == 2147483647:
plat_name = 'linux_i686'
plat_name = plat_name.replace('-', '_').replace('.', '_')
if self.root_is_pure:
if self.universal:
impl = 'py2.py3'
else:
impl = self.python_tag
tag = (impl, 'none', plat_name)
else:
impl_name = get_abbr_impl()
impl_ver = get_impl_ver()
impl = impl_name + impl_ver
# We don't work on CPython 3.1, 3.0.
if self.py_limited_api and (impl_name + impl_ver).startswith('cp3'):
impl = self.py_limited_api
abi_tag = 'abi3'
else:
abi_tag = str(get_abi_tag()).lower()
tag = (impl, abi_tag, plat_name)
supported_tags = pep425tags.get_supported(
supplied_platform=plat_name if self.plat_name_supplied else None)
# XXX switch to this alternate implementation for non-pure:
if not self.py_limited_api:
assert tag == supported_tags[0], "%s != %s" % (tag, supported_tags[0])
assert tag in supported_tags, "would build wheel with unsupported tag {}".format(tag)
return tag
def get_archive_basename(self):
"""Return archive name without extension"""
impl_tag, abi_tag, plat_tag = self.get_tag()
archive_basename = "%s-%s-%s-%s" % (
self.wheel_dist_name,
impl_tag,
abi_tag,
plat_tag)
return archive_basename
def run(self):
build_scripts = self.reinitialize_command('build_scripts')
build_scripts.executable = 'python'
if not self.skip_build:
self.run_command('build')
install = self.reinitialize_command('install',
reinit_subcommands=True)
install.root = self.bdist_dir
install.compile = False
install.skip_build = self.skip_build
install.warn_dir = False
# A wheel without setuptools scripts is more cross-platform.
# Use the (undocumented) `no_ep` option to setuptools'
# install_scripts command to avoid creating entry point scripts.
install_scripts = self.reinitialize_command('install_scripts')
install_scripts.no_ep = True
# Use a custom scheme for the archive, because we have to decide
# at installation time which scheme to use.
for key in ('headers', 'scripts', 'data', 'purelib', 'platlib'):
setattr(install,
'install_' + key,
os.path.join(self.data_dir, key))
basedir_observed = ''
if os.name == 'nt':
# win32 barfs if any of these are ''; could be '.'?
# (distutils.command.install:change_roots bug)
basedir_observed = os.path.normpath(os.path.join(self.data_dir, '..'))
self.install_libbase = self.install_lib = basedir_observed
setattr(install,
'install_purelib' if self.root_is_pure else 'install_platlib',
basedir_observed)
logger.info("installing to %s", self.bdist_dir)
self.run_command('install')
archive_basename = self.get_archive_basename()
pseudoinstall_root = os.path.join(self.dist_dir, archive_basename)
if not self.relative:
archive_root = self.bdist_dir
else:
archive_root = os.path.join(
self.bdist_dir,
self._ensure_relative(install.install_base))
self.set_undefined_options(
'install_egg_info', ('target', 'egginfo_dir'))
self.distinfo_dir = os.path.join(self.bdist_dir,
'%s.dist-info' % self.wheel_dist_name)
self.egg2dist(self.egginfo_dir,
self.distinfo_dir)
self.write_wheelfile(self.distinfo_dir)
self.write_record(self.bdist_dir, self.distinfo_dir)
# Make the archive
if not os.path.exists(self.dist_dir):
os.makedirs(self.dist_dir)
wheel_name = archive_wheelfile(pseudoinstall_root, archive_root)
# Sign the archive
if 'WHEEL_TOOL' in os.environ:
subprocess.call([os.environ['WHEEL_TOOL'], 'sign', wheel_name])
# Add to 'Distribution.dist_files' so that the "upload" command works
getattr(self.distribution, 'dist_files', []).append(
('bdist_wheel', get_python_version(), wheel_name))
if not self.keep_temp:
if self.dry_run:
logger.info('removing %s', self.bdist_dir)
else:
rmtree(self.bdist_dir)
def write_wheelfile(self, wheelfile_base, generator='bdist_wheel (' + wheel_version + ')'):
from email.message import Message
msg = Message()
msg['Wheel-Version'] = '1.0' # of the spec
msg['Generator'] = generator
msg['Root-Is-Purelib'] = str(self.root_is_pure).lower()
if self.build_number is not None:
msg['Build'] = self.build_number
# Doesn't work for bdist_wininst
impl_tag, abi_tag, plat_tag = self.get_tag()
for impl in impl_tag.split('.'):
for abi in abi_tag.split('.'):
for plat in plat_tag.split('.'):
msg['Tag'] = '-'.join((impl, abi, plat))
wheelfile_path = os.path.join(wheelfile_base, 'WHEEL')
logger.info('creating %s', wheelfile_path)
with open(wheelfile_path, 'w') as f:
Generator(f, maxheaderlen=0).flatten(msg)
def _ensure_relative(self, path):
# copied from dir_util, deleted
drive, path = os.path.splitdrive(path)
if path[0:1] == os.sep:
path = drive + path[1:]
return path
def _pkginfo_to_metadata(self, egg_info_path, pkginfo_path):
return metadata.pkginfo_to_metadata(egg_info_path, pkginfo_path)
def license_file(self):
"""Return license filename from a license-file key in setup.cfg, or None."""
metadata = self.distribution.get_option_dict('metadata')
if 'license_file' not in metadata:
return None
return metadata['license_file'][1]
def setupcfg_requirements(self):
"""Generate requirements from setup.cfg as
('Requires-Dist', 'requirement; qualifier') tuples. From a metadata
section in setup.cfg:
[metadata]
provides-extra = extra1
extra2
requires-dist = requirement; qualifier
another; qualifier2
unqualified
Yields
('Provides-Extra', 'extra1'),
('Provides-Extra', 'extra2'),
('Requires-Dist', 'requirement; qualifier'),
('Requires-Dist', 'another; qualifier2'),
('Requires-Dist', 'unqualified')
"""
metadata = self.distribution.get_option_dict('metadata')
# our .ini parser folds - to _ in key names:
for key, title in (('provides_extra', 'Provides-Extra'),
('requires_dist', 'Requires-Dist')):
if key not in metadata:
continue
field = metadata[key]
for line in field[1].splitlines():
line = line.strip()
if not line:
continue
yield (title, line)
def add_requirements(self, metadata_path):
"""Add additional requirements from setup.cfg to file metadata_path"""
additional = list(self.setupcfg_requirements())
if not additional:
return
pkg_info = read_pkg_info(metadata_path)
if 'Provides-Extra' in pkg_info or 'Requires-Dist' in pkg_info:
warnings.warn('setup.cfg requirements overwrite values from setup.py')
del pkg_info['Provides-Extra']
del pkg_info['Requires-Dist']
for k, v in additional:
pkg_info[k] = v
write_pkg_info(metadata_path, pkg_info)
def egg2dist(self, egginfo_path, distinfo_path):
"""Convert an .egg-info directory into a .dist-info directory"""
def adios(p):
"""Appropriately delete directory, file or link."""
if os.path.exists(p) and not os.path.islink(p) and os.path.isdir(p):
shutil.rmtree(p)
elif os.path.exists(p):
os.unlink(p)
adios(distinfo_path)
if not os.path.exists(egginfo_path):
# There is no egg-info. This is probably because the egg-info
# file/directory is not named matching the distribution name used
# to name the archive file. Check for this case and report
# accordingly.
import glob
pat = os.path.join(os.path.dirname(egginfo_path), '*.egg-info')
possible = glob.glob(pat)
err = "Egg metadata expected at %s but not found" % (egginfo_path,)
if possible:
alt = os.path.basename(possible[0])
err += " (%s found - possible misnamed archive file?)" % (alt,)
raise ValueError(err)
if os.path.isfile(egginfo_path):
# .egg-info is a single file
pkginfo_path = egginfo_path
pkg_info = self._pkginfo_to_metadata(egginfo_path, egginfo_path)
os.mkdir(distinfo_path)
else:
# .egg-info is a directory
pkginfo_path = os.path.join(egginfo_path, 'PKG-INFO')
pkg_info = self._pkginfo_to_metadata(egginfo_path, pkginfo_path)
# ignore common egg metadata that is useless to wheel
shutil.copytree(egginfo_path, distinfo_path,
ignore=lambda x, y: {'PKG-INFO', 'requires.txt', 'SOURCES.txt',
'not-zip-safe'}
)
# delete dependency_links if it is only whitespace
dependency_links_path = os.path.join(distinfo_path, 'dependency_links.txt')
with open(dependency_links_path, 'r') as dependency_links_file:
dependency_links = dependency_links_file.read().strip()
if not dependency_links:
adios(dependency_links_path)
write_pkg_info(os.path.join(distinfo_path, 'METADATA'), pkg_info)
# XXX deprecated. Still useful for current distribute/setuptools.
metadata_path = os.path.join(distinfo_path, 'METADATA')
self.add_requirements(metadata_path)
# XXX intentionally a different path than the PEP.
metadata_json_path = os.path.join(distinfo_path, 'metadata.json')
pymeta = pkginfo_to_dict(metadata_path,
distribution=self.distribution)
if 'description' in pymeta:
description_filename = 'DESCRIPTION.rst'
description_text = pymeta.pop('description')
description_path = os.path.join(distinfo_path,
description_filename)
with open(description_path, "wb") as description_file:
description_file.write(description_text.encode('utf-8'))
pymeta['extensions']['python.details']['document_names']['description'] = \
description_filename
# XXX heuristically copy any LICENSE/LICENSE.txt?
license = self.license_file()
if license:
license_filename = 'LICENSE.txt'
shutil.copy(license, os.path.join(self.distinfo_dir, license_filename))
pymeta['extensions']['python.details']['document_names']['license'] = license_filename
with open(metadata_json_path, "w") as metadata_json:
json.dump(pymeta, metadata_json, sort_keys=True)
adios(egginfo_path)
def write_record(self, bdist_dir, distinfo_dir):
from .util import urlsafe_b64encode
record_path = os.path.join(distinfo_dir, 'RECORD')
record_relpath = os.path.relpath(record_path, bdist_dir)
def walk():
for dir, dirs, files in os.walk(bdist_dir):
dirs.sort()
for f in sorted(files):
yield os.path.join(dir, f)
def skip(path):
"""Wheel hashes every possible file."""
return (path == record_relpath)
with open_for_csv(record_path, 'w+') as record_file:
writer = csv.writer(record_file)
for path in walk():
relpath = os.path.relpath(path, bdist_dir)
if skip(relpath):
hash = ''
size = ''
else:
with open(path, 'rb') as f:
data = f.read()
digest = hashlib.sha256(data).digest()
hash = 'sha256=' + native(urlsafe_b64encode(digest))
size = len(data)
record_path = os.path.relpath(
path, bdist_dir).replace(os.path.sep, '/')
writer.writerow((record_path, hash, size))
|
mit
|
jhaydel/051616PoC
|
2Leaf_PoC/topology_converter.py
|
3
|
22004
|
#!/usr/bin/env python
#
# Topology Converter
# converts a given topology.dot file to a Vagrantfile
# can use the virtualbox or libvirt Vagrant providers
# Initially written by Eric Pulvino 2015-10-19
#
# hosted @ https://github.com/cumulusnetworks/topology_converter
#
#
version = "4.0.5"
import os
import re
import sys
import time
import pprint
import jinja2
import argparse
import importlib
import pydotplus
pp = pprint.PrettyPrinter(depth=6)
parser = argparse.ArgumentParser(description='Topology Converter -- Convert topology.dot files into Vagrantfiles')
parser.add_argument('topology_file',
help='provide a topology file as input')
parser.add_argument('-v','--verbose', action='store_true',
help='enables verbose logging mode')
parser.add_argument('-p','--provider', choices=["libvirt","virtualbox"],
help='specifies the provider to be used in the Vagrantfile, script supports "virtualbox" or "libvirt", default is virtualbox.')
parser.add_argument('-a','--ansible-hostfile', action='store_true',
help='When specified, ansible hostfile will be generated from a dummy playbook run.')
parser.add_argument('-t','--template', action='append', nargs=2,
help='Specify an additional jinja2 template and a destination for that file to be rendered to.')
parser.add_argument('-s','--start-port', type=int,
help='FOR LIBVIRT PROVIDER: this option overrides the default starting-port 8000 with a new value. Use ports over 1024 to avoid permissions issues. If using this option with the virtualbox provider it will be ignored.')
parser.add_argument('-g','--port-gap', type=int,
help='FOR LIBVIRT PROVIDER: this option overrides the default port-gap of 1000 with a new value. This number is added to the start-port value to determine the port to be used by the remote-side. Port-gap also defines the max number of links that can exist in the topology. EX. If start-port is 8000 and port-gap is 1000 the first link will use ports 8001 and 9001 for the construction of the UDP tunnel. If using this option with the virtualbox provider it will be ignored.')
parser.add_argument('-dd','--display-datastructures', action='store_true',
help='When specified, the datastructures which are passed to the template are displayed to screen. Note: Using this option does not write a Vagrantfile and supercedes other options.')
parser.add_argument('--synced-folder', action='store_true',
help='Using this option enables the default Vagrant synced folder which we disable by default. See: https://www.vagrantup.com/docs/synced-folders/basic_usage.html')
args = parser.parse_args()
#Parse Arguments
provider="virtualbox"
generate_ansible_hostfile=False
verbose=False
start_port=8000
port_gap=1000
synced_folder=False
display_datastructures=False
VAGRANTFILE='Vagrantfile'
VAGRANTFILE_template='templates/Vagrantfile.j2'
TEMPLATES=[[VAGRANTFILE_template,VAGRANTFILE]]
if args.topology_file: topology_file=args.topology_file
if args.verbose: verbose=args.verbose
if args.provider: provider=args.provider
if args.ansible_hostfile: generate_ansible_hostfile=True
if args.template:
for templatefile,destination in args.template:
TEMPLATES.append([templatefile,destination])
for templatefile,destination in TEMPLATES:
if not os.path.isfile(templatefile):
print " ### ERROR: provided template file-- \"" + templatefile + "\" does not exist!"
exit(1)
if args.start_port: start_port=args.start_port
if args.port_gap: port_gap=args.port_gap
if args.display_datastructures: display_datastructures=True
if args.synced_folder: synced_folder=True
if verbose:
print "Arguments:"
print args
###################################
#### MAC Address Configuration ####
###################################
# The starting MAC for assignment for any devices not in mac_map
#Cumulus Range ( https://support.cumulusnetworks.com/hc/en-us/articles/203837076-Reserved-MAC-Address-Range-for-Use-with-Cumulus-Linux )
start_mac="443839000000"
#This file is generated to store the mapping between macs and mgmt interfaces
dhcp_mac_file="./dhcp_mac_map"
######################################################
############# Everything Else #################
######################################################
# By default, Vagrant will share the directory with the Vagrantfile to /vagrant on the host
# use this knob to enable or disable that ability.
synced_folder=False
#Hardcoded Variables
script_storage="./helper_scripts" #Location for our generated remap files
ZIPFILE="./virtual_topology.zip"
epoch_time = str(int(time.time()))
mac_map={}
#LIBvirt Provider Settings
# start_port and port_gap are only relevant to the libvirt provider. These settings provide the basis
# for the UDP tunnel construction which is used by libvirt. Since UDP tunnels only make sense in a
# point-to-point fashion, there is additional error checking when using the libvirt provider to make
# sure that interfaces are not reused for a point-to-multipoint configuration.
#Static Variables -- #Do not change!
warning=False
libvirt_reuse_error="""
When constructing a VAGRANTFILE for the libvirt provider
interface reuse is not possible because the UDP tunnels
which libvirt uses for communication are point-to-point in
nature. It is not possible to create a point-to-multipoint
UDP tunnel!
NOTE: Perhaps adding another switch to your topology would
allow you to avoid reusing interfaces here.
"""
###### Functions
def mac_fetch(hostname,interface):
global start_mac
global mac_map
global warning
new_mac = hex(int(start_mac, 16) + 1)[2:].upper()
while new_mac in mac_map:
print " WARNING: MF MAC Address Collision -- tried to use " + new_mac + " (on "+interface+") but it was already in use."
start_mac = new_mac
new_mac = hex(int(start_mac, 16) + 1)[2:].upper()
warning=True
start_mac = new_mac
return str(new_mac)
def parse_topology(topology_file):
global provider
global verbose
global warning
topology = pydotplus.graphviz.graph_from_dot_file(topology_file)
inventory = {}
nodes=topology.get_node_list()
edges=topology.get_edge_list()
for node in nodes:
node_name=node.get_name().replace('"','')
#Add node to inventory
if node_name not in inventory:
inventory[node_name] = {}
inventory[node_name]['interfaces'] = {}
node_attr_list=node.get_attributes()
print "node_attr_list:"
print node_attr_list
#Define Functional Defaults
if 'function' in node_attr_list:
value=node.get('function')
if value.startswith('"') or value.startswith("'"): value=value[1:].lower()
if value.endswith('"') or value.endswith("'"): value=value[:-1].lower()
if value=='fake':
inventory[node_name]['os']="None"
inventory[node_name]['memory']="1"
if value=='oob-server':
inventory[node_name]['os']="boxcutter/ubuntu1604"
inventory[node_name]['memory']="500"
elif value=='oob-switch':
inventory[node_name]['os']="CumulusCommunity/cumulus-vx"
inventory[node_name]['memory']="300"
elif value=='exit':
inventory[node_name]['os']="CumulusCommunity/cumulus-vx"
inventory[node_name]['memory']="300"
elif value=='spine':
inventory[node_name]['os']="CumulusCommunity/cumulus-vx"
inventory[node_name]['memory']="300"
elif value=='leaf':
inventory[node_name]['os']="CumulusCommunity/cumulus-vx"
inventory[node_name]['memory']="300"
elif value=='host':
inventory[node_name]['os']="boxcutter/ubuntu1604"
inventory[node_name]['memory']="500"
#Add attributes to node inventory
for attribute in node_attr_list:
#if verbose: print attribute + " = " + node.get(attribute)
value=node.get(attribute)
if value.startswith('"') or value.startswith("'"): value=value[1:]
if value.endswith('"') or value.endswith("'"): value=value[:-1]
inventory[node_name][attribute] = value
#Make sure mandatory attributes are present.
mandatory_attributes=['os',]
for attribute in mandatory_attributes:
if attribute not in inventory[node_name]:
print " ### ERROR: MANDATORY DEVICE ATTRIBUTE \""+attribute+"\" not specified for "+ node_name
exit(1)
#Extra Massaging for specific attributes.
# light sanity checking.
if 'function' not in inventory[node_name]: inventory[node_name]['function'] = "Unknown"
if 'memory' in inventory[node_name]:
if int(inventory[node_name]['memory']) <= 0:
print " ### ERROR -- Memory must be greater than 0mb on " + node_name
exit(1)
if provider == "libvirt":
if 'tunnel_ip' not in inventory[node_name]: inventory[node_name]['tunnel_ip']='127.0.0.1'
net_number = 1
for edge in edges:
if provider=="virtualbox":
network_string="net"+str(net_number)
elif provider=="libvirt":
PortA=str(start_port+net_number)
PortB=str(start_port+port_gap+net_number)
if int(PortA) > int(start_port+port_gap):
print " ### ERROR: Configured Port_Gap: ("+str(port_gap)+") exceeds the number of links in the topology. Read the help options to fix.\n\n"
parser.print_help()
exit(1)
#Handle Link-based Passthrough Attributes
edge_attributes={}
for attribute in edge.get_attributes():
if attribute=="left_mac" or attribute=="right_mac": continue
if attribute in edge_attributes:
print " ### WARNING: Attribute \""+attribute+"\" specified twice. Using second value."
warning=True
value=edge.get(attribute)
if value.startswith('"') or value.startswith("'"): value=value[1:]
if value.endswith('"') or value.endswith("'"): value=value[:-1]
edge_attributes[attribute]=value
#Set Devices/interfaces/MAC Addresses
left_device=edge.get_source().split(":")[0].replace('"','')
left_interface=edge.get_source().split(":")[1].replace('"','')
right_device=edge.get_destination().split(":")[0].replace('"','')
right_interface=edge.get_destination().split(":")[1].replace('"','')
left_mac_address=""
if edge.get('left_mac') != None : left_mac_address=edge.get('left_mac').replace('"','')
else: left_mac_address=mac_fetch(left_device,left_interface)
right_mac_address=""
if edge.get('right_mac') != None : right_mac_address=edge.get('right_mac').replace('"','')
else: right_mac_address=mac_fetch(right_device,right_interface)
#Check to make sure each device in the edge already exists in inventory
if left_device not in inventory:
print " ### ERROR: device " + left_device + " is referred to in list of edges/links but not defined as a node."
exit(1)
if right_device not in inventory:
print " ### ERROR: device " + right_device + " is referred to in list of edges/links but not defined as a node."
exit(1)
#Add left host switchport to inventory
if left_interface not in inventory[left_device]['interfaces']:
inventory[left_device]['interfaces'][left_interface] = {}
inventory[left_device]['interfaces'][left_interface]['mac']=left_mac_address
if left_mac_address in mac_map:
print " ### ERROR -- MAC Address Collision - tried to use "+left_mac_address+" on "+left_device+":"+left_interface+"\n but it is already in use. Check your Topology File!"
exit(1)
mac_map[left_mac_address]=left_device+","+left_interface
if provider=="virtualbox":
inventory[left_device]['interfaces'][left_interface]['network'] = network_string
elif provider=="libvirt":
inventory[left_device]['interfaces'][left_interface]['local_port'] = PortA
inventory[left_device]['interfaces'][left_interface]['remote_port'] = PortB
else:
print " ### ERROR -- Interface " + left_interface + " Already used on device: " + left_device
exit(1)
#Add right host switchport to inventory
if right_interface not in inventory[right_device]['interfaces']:
inventory[right_device]['interfaces'][right_interface] = {}
inventory[right_device]['interfaces'][right_interface]['mac']=right_mac_address
if right_mac_address in mac_map:
print " ### ERROR -- MAC Address Collision - tried to use "+right_mac_address+" on "+right_device+":"+right_interface+"\n but it is already in use. Check your Topology File!"
exit(1)
mac_map[right_mac_address]=right_device+","+right_interface
if provider=="virtualbox":
inventory[right_device]['interfaces'][right_interface]['network'] = network_string
elif provider=="libvirt":
inventory[right_device]['interfaces'][right_interface]['local_port'] = PortB
inventory[right_device]['interfaces'][right_interface]['remote_port'] = PortA
else:
print " ### ERROR -- Interface " + right_interface + " Already used on device: " + right_device
exit(1)
inventory[left_device]['interfaces'][left_interface]['remote_interface'] = right_interface
inventory[left_device]['interfaces'][left_interface]['remote_device'] = right_device
inventory[right_device]['interfaces'][right_interface]['remote_interface'] = left_interface
inventory[right_device]['interfaces'][right_interface]['remote_device'] = left_device
if provider == 'libvirt':
inventory[left_device]['interfaces'][left_interface]['local_ip'] = inventory[left_device]['tunnel_ip']
inventory[left_device]['interfaces'][left_interface]['remote_ip'] = inventory[right_device]['tunnel_ip']
inventory[right_device]['interfaces'][right_interface]['local_ip'] = inventory[right_device]['tunnel_ip']
inventory[right_device]['interfaces'][right_interface]['remote_ip'] = inventory[left_device]['tunnel_ip']
#Add link-based passthrough attributes
for attribute in edge_attributes:
inventory[left_device]['interfaces'][left_interface][attribute]=edge_attributes[attribute]
inventory[right_device]['interfaces'][right_interface][attribute]=edge_attributes[attribute]
net_number += 1
if verbose:
print "\n\n ### Inventory Datastructure: ###"
pp.pprint(inventory)
return inventory
def clean_datastructure(devices):
#Sort the devices by function
devices.sort(key=getKeyDevices)
if display_datastructures: return devices
for device in devices:
print ">> DEVICE: " + device['hostname']
print " code: " + device['os']
if 'memory' in device:
print " memory: " + device['memory']
for attribute in device:
if attribute == 'memory' or attribute == 'os' or attribute == 'interfaces': continue
print " "+str(attribute)+": "+ str(device[attribute])
for interface in device['interfaces']:
print " LINK: " + interface
for attribute in device['interfaces'][interface]:
print " " + attribute +": " + device['interfaces'][interface][attribute]
#Remove Fake Devices
indexes_to_remove=[]
for i in range(0,len(devices)):
if 'function' in devices[i]:
if devices[i]['function'] == 'fake':
indexes_to_remove.append(i)
for index in indexes_to_remove: del devices[index]
return devices
def remove_generated_files():
if display_datastructures: return
if verbose: print "Removing existing DHCP FILE..."
if os.path.isfile(dhcp_mac_file): os.remove(dhcp_mac_file)
def generate_shareable_zip():
import zipfile
topology_dir="./"+os.path.split(topology_file)[0]
template_dir="./"+os.path.split(VAGRANTFILE_template)[0]
topo_file=os.path.split(topology_file)[1]
vagrantfile=os.path.split(VAGRANTFILE)[1]
folders_to_zip=["./","./helper_scripts",]
if topology_dir not in folders_to_zip: folders_to_zip.append(topology_dir)
if template_dir not in folders_to_zip: folders_to_zip.append(template_dir)
if verbose: print "Creating ZIP..."
if verbose: print " Folders_to_Zip: ["+", ".join(folders_to_zip)+"]"
zf = zipfile.ZipFile(ZIPFILE, "w")
for dirname, subdirs, files in os.walk("./"):
if dirname in folders_to_zip:
if verbose: print " adding directory %s to zip..." % (dirname)
zf.write(dirname)
for filename in files:
if filename.endswith("~") or filename.lower().endswith(".zip") or filename.startswith(".git"): continue
elif dirname == topology_dir:
if filename != topo_file: continue
file_to_add=os.path.join(dirname, filename)
if verbose:
print " adding %s to zip..." % (file_to_add)
zf.write(file_to_add)
else:
continue
zf.close()
def getKey(item):
# Used to sort interfaces alphabetically
base = 10
if item[0:3].lower() == "eth": base = 0
val = float(item[3:].replace("s","."))
return val + base
def getKeyDevices(device):
# Used to sort interfaces alphabetically
if device['function'] == "oob-server": return 1
elif device['function'] == "oob-switch": return 2
elif device['function'] == "exit": return 3
elif device['function'] == "spine": return 4
elif device['function'] == "leaf": return 5
elif device['function'] == "host": return 6
else: return 7
def sorted_interfaces(interface_dictionary):
interface_list=[]
for link in interface_dictionary:
interface_list.append(link)
interface_list.sort(key=getKey)
return interface_list
def generate_dhcp_mac_file(mac_map):
if verbose: print "GENERATING DHCP MAC FILE..."
mac_file = open(dhcp_mac_file,"a")
if '' in mac_map: del mac_map['']
dhcp_display_list=[]
for line in mac_map:
dhcp_display_list.append(mac_map[line]+","+line)
dhcp_display_list.sort()
for line in dhcp_display_list:
mac_file.write(line+"\n")
mac_file.close()
def populate_data_structures(inventory):
devices = []
for device in inventory:
inventory[device]['hostname']=device
devices.append(inventory[device])
return clean_datastructure(devices)
def render_jinja_templates(devices):
if display_datastructures: print_datastructures(devices)
if verbose: print "RENDERING JINJA TEMPLATES..."
for templatefile,destination in TEMPLATES:
if verbose: print " Rendering: " + templatefile + " --> " + destination
template = jinja2.Template(open(templatefile).read())
with open(destination, 'w') as outfile:
outfile.write(template.render(devices=devices,
synced_folder=synced_folder,
provider=provider,
version=version,
topology_file=topology_file,
epoch_time=epoch_time,
script_storage=script_storage,
generate_ansible_hostfile=generate_ansible_hostfile,)
)
def print_datastructures(devices):
print "\n\n######################################"
print " DATASTRUCTURES SENT TO TEMPLATE:"
print "######################################\n"
print "provider=" + provider
print "synced_folder=" + str(synced_folder)
print "version=" + str(version)
print "topology_file=" + topology_file
print "epoch_time=" + str(epoch_time)
print "script_storage=" + script_storage
print "generate_ansible_hostfile=" + str(generate_ansible_hostfile)
print "devices="
pp.pprint(devices)
exit(0)
def generate_ansible_files():
if not generate_ansible_hostfile: return
if verbose: print "Generating Ansible Files..."
with open("./helper_scripts/empty_playbook.yml","w") as playbook:
playbook.write("""---
- hosts: all
user: vagrant
gather_facts: no
tasks:
- command: "uname -a"
""")
with open("./ansible.cfg","w") as ansible_cfg:
ansible_cfg.write("""[defaults]
inventory = ./.vagrant/provisioners/ansible/inventory/vagrant_ansible_inventory
hostfile= ./.vagrant/provisioners/ansible/inventory/vagrant_ansible_inventory
host_key_checking=False
callback_whitelist = profile_tasks""")
def main():
global mac_map
print "\n######################################"
print " Topology Converter"
print "######################################"
inventory = parse_topology(topology_file)
devices=populate_data_structures(inventory)
remove_generated_files()
render_jinja_templates(devices)
generate_dhcp_mac_file(mac_map)
generate_ansible_files()
#generate_shareable_zip() #Disabled because it is unreliable
if __name__ == "__main__":
main()
print "\nVagrantfile has been generated!\n"
print "\nDONE!\n"
exit(0)
|
mit
|
figarocms/thumbor
|
tests/detectors/test_queued_detector.py
|
6
|
3598
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# thumbor imaging service
# https://github.com/thumbor/thumbor/wiki
# Licensed under the MIT license:
# http://www.opensource.org/licenses/mit-license
# Copyright (c) 2011 globo.com [email protected]
from unittest import TestCase
import mock
from json import loads
from redis import Redis
from preggy import expect
from thumbor.detectors.queued_detector import QueuedDetector
class QueuedDetectorTestCase(TestCase):
def setUp(self, *args, **kw):
super(QueuedDetectorTestCase, self).setUp(*args, **kw)
self.redis = Redis(
host='0.0.0.0',
port=6668,
db=0,
password='hey_you',
)
self.redis.delete('resque:unique:queue:Detect:/image/test.jpg')
self.redis.delete('resque:queue:Detect')
QueuedDetector.queue = None
def test_detector_sends_to_queue(self):
ctx = mock.Mock(
config=mock.Mock(
REDIS_QUEUE_SERVER_HOST='0.0.0.0',
REDIS_QUEUE_SERVER_PORT=6668,
REDIS_QUEUE_SERVER_DB=0,
REDIS_QUEUE_SERVER_PASSWORD='hey_you',
),
request=mock.Mock(
image_url='/image/test.jpg',
detection_error=False,
),
)
detector = QueuedDetector(ctx, 1, [])
expect(detector).not_to_be_null()
def validate(data):
expect(data).to_be_empty()
detector.detect(validate)
expect(ctx.request.detection_error).to_be_false()
result = self.redis.get('resque:unique:queue:Detect:/image/test.jpg')
expect(result).to_equal('1')
expected_payload = {
"queue": "Detect",
"args": ["all", "/image/test.jpg", "/image/test.jpg"],
"class": "remotecv.pyres_tasks.DetectTask",
"key": "/image/test.jpg"
}
result = self.redis.lpop('resque:queue:Detect')
expect(loads(result)).to_be_like(expected_payload)
def test_detector_fails_properly(self):
ctx = mock.Mock(
config=mock.Mock(
REDIS_QUEUE_SERVER_HOST='0.0.0.0',
REDIS_QUEUE_SERVER_PORT=6669,
REDIS_QUEUE_SERVER_DB=0,
REDIS_QUEUE_SERVER_PASSWORD='hey_you',
),
request=mock.Mock(
image_url='/image/test.jpg',
detection_error=False,
),
)
detector = QueuedDetector(ctx, 1, [])
expect(detector).not_to_be_null()
def validate(data):
expect(data).to_be_empty()
detector.detect(validate)
expect(ctx.request.detection_error).to_be_true()
expect(detector.queue).to_be_null()
def test_detector_can_detect_twice(self):
ctx = mock.Mock(
config=mock.Mock(
REDIS_QUEUE_SERVER_HOST='0.0.0.0',
REDIS_QUEUE_SERVER_PORT=6668,
REDIS_QUEUE_SERVER_DB=0,
REDIS_QUEUE_SERVER_PASSWORD='hey_you',
),
request=mock.Mock(
image_url='/image/test.jpg',
detection_error=False,
),
)
detector = QueuedDetector(ctx, 1, [])
expect(detector).not_to_be_null()
def validate(data):
expect(data).to_be_empty()
detector.detect(validate)
expect(ctx.request.detection_error).to_be_false()
expect(detector.queue).not_to_be_null()
detector.detect(validate)
expect(detector.queue).not_to_be_null()
|
mit
|
Sumith1896/sympy
|
sympy/conftest.py
|
20
|
2267
|
from __future__ import print_function, division
import sys
sys._running_pytest = True
import pytest
from sympy.core.cache import clear_cache
import re
sp = re.compile(r'([0-9]+)/([1-9][0-9]*)')
def process_split(session, config, items):
split = config.getoption("--split")
if not split:
return
m = sp.match(split)
if not m:
raise ValueError("split must be a string of the form a/b "
"where a and b are ints.")
i, t = map(int, m.groups())
start, end = (i-1)*len(items)//t, i*len(items)//t
if i < t:
# remove elements from end of list first
del items[end:]
del items[:start]
def pytest_report_header(config):
from sympy.utilities.misc import ARCH
s = "architecture: %s\n" % ARCH
from sympy.core.cache import USE_CACHE
s += "cache: %s\n" % USE_CACHE
from sympy.core.compatibility import GROUND_TYPES, HAS_GMPY
version = ''
if GROUND_TYPES =='gmpy':
if HAS_GMPY == 1:
import gmpy
elif HAS_GMPY == 2:
import gmpy2 as gmpy
version = gmpy.version()
s += "ground types: %s %s\n" % (GROUND_TYPES, version)
return s
def pytest_terminal_summary(terminalreporter):
if (terminalreporter.stats.get('error', None) or
terminalreporter.stats.get('failed', None)):
terminalreporter.write_sep(
' ', 'DO *NOT* COMMIT!', red=True, bold=True)
def pytest_addoption(parser):
parser.addoption("--split", action="store", default="",
help="split tests")
def pytest_collection_modifyitems(session, config, items):
""" pytest hook. """
# handle splits
process_split(session, config, items)
@pytest.fixture(autouse=True, scope='module')
def file_clear_cache():
clear_cache()
@pytest.fixture(autouse=True, scope='module')
def check_disabled(request):
if getattr(request.module, 'disabled', False):
pytest.skip("test requirements not met.")
elif getattr(request.module, 'ipython', False):
# need to check version and options for ipython tests
if (pytest.__version__ < '2.6.3' and
pytest.config.getvalue('-s') != 'no'):
pytest.skip("run py.test with -s or upgrade to newer version.")
|
bsd-3-clause
|
aldariz/Sick-Beard
|
lib/hachoir_parser/template.py
|
90
|
1939
|
"""
====================== 8< ============================
This file is an Hachoir parser template. Make a copy
of it, and adapt it to your needs.
You have to replace all "TODO" with you code.
====================== 8< ============================
TODO parser.
Author: TODO TODO
Creation date: YYYY-mm-DD
"""
# TODO: Just keep what you need
from lib.hachoir_parser import Parser
from lib.hachoir_core.field import (ParserError,
UInt8, UInt16, UInt32, String, RawBytes)
from lib.hachoir_core.endian import LITTLE_ENDIAN, BIG_ENDIAN
class TODOFile(Parser):
PARSER_TAGS = {
"id": "TODO",
"category": "TODO", # "archive", "audio", "container", ...
"file_ext": ("TODO",), # TODO: Example ("bmp",) to parse the file "image.bmp"
"mime": (u"TODO"), # TODO: Example: "image/png"
"min_size": 0, # TODO: Minimum file size (x bits, or x*8 in bytes)
"description": "TODO", # TODO: Example: "A bitmap picture"
}
# TODO: Choose between little or big endian
# endian = LITTLE_ENDIAN
# endian = BIG_ENDIAN
def validate(self):
# TODO: Check that file looks like your format
# Example: check first two bytes
# return (self.stream.readBytes(0, 2) == 'BM')
return False
def createFields(self):
# TODO: Write your parser using this model:
# yield UInt8(self, "name1", "description1")
# yield UInt16(self, "name2", "description2")
# yield UInt32(self, "name3", "description3")
# yield String(self, "name4", 1, "description4") # TODO: add ", charset="ASCII")"
# yield String(self, "name5", 1, "description5", charset="ASCII")
# yield String(self, "name6", 1, "description6", charset="ISO-8859-1")
# Read rest of the file (if any)
# TODO: You may remove this code
if self.current_size < self._size:
yield self.seekBit(self._size, "end")
|
gpl-3.0
|
xavfernandez/pip
|
src/pip/_internal/network/auth.py
|
10
|
11119
|
"""Network Authentication Helpers
Contains interface (MultiDomainBasicAuth) and associated glue code for
providing credentials in the context of network requests.
"""
# The following comment should be removed at some point in the future.
# mypy: disallow-untyped-defs=False
import logging
from pip._vendor.requests.auth import AuthBase, HTTPBasicAuth
from pip._vendor.requests.utils import get_netrc_auth
from pip._vendor.six.moves.urllib import parse as urllib_parse
from pip._internal.utils.misc import (
ask,
ask_input,
ask_password,
remove_auth_from_url,
split_auth_netloc_from_url,
)
from pip._internal.utils.typing import MYPY_CHECK_RUNNING
if MYPY_CHECK_RUNNING:
from optparse import Values
from typing import Dict, Optional, Tuple
from pip._internal.vcs.versioncontrol import AuthInfo
Credentials = Tuple[str, str, str]
logger = logging.getLogger(__name__)
try:
import keyring # noqa
except ImportError:
keyring = None
except Exception as exc:
logger.warning(
"Keyring is skipped due to an exception: %s", str(exc),
)
keyring = None
def get_keyring_auth(url, username):
"""Return the tuple auth for a given url from keyring."""
if not url or not keyring:
return None
try:
try:
get_credential = keyring.get_credential
except AttributeError:
pass
else:
logger.debug("Getting credentials from keyring for %s", url)
cred = get_credential(url, username)
if cred is not None:
return cred.username, cred.password
return None
if username:
logger.debug("Getting password from keyring for %s", url)
password = keyring.get_password(url, username)
if password:
return username, password
except Exception as exc:
logger.warning(
"Keyring is skipped due to an exception: %s", str(exc),
)
class MultiDomainBasicAuth(AuthBase):
def __init__(self, prompting=True, index_urls=None):
# type: (bool, Optional[Values]) -> None
self.prompting = prompting
self.index_urls = index_urls
self.passwords = {} # type: Dict[str, AuthInfo]
# When the user is prompted to enter credentials and keyring is
# available, we will offer to save them. If the user accepts,
# this value is set to the credentials they entered. After the
# request authenticates, the caller should call
# ``save_credentials`` to save these.
self._credentials_to_save = None # type: Optional[Credentials]
def _get_index_url(self, url):
"""Return the original index URL matching the requested URL.
Cached or dynamically generated credentials may work against
the original index URL rather than just the netloc.
The provided url should have had its username and password
removed already. If the original index url had credentials then
they will be included in the return value.
Returns None if no matching index was found, or if --no-index
was specified by the user.
"""
if not url or not self.index_urls:
return None
for u in self.index_urls:
prefix = remove_auth_from_url(u).rstrip("/") + "/"
if url.startswith(prefix):
return u
def _get_new_credentials(self, original_url, allow_netrc=True,
allow_keyring=True):
"""Find and return credentials for the specified URL."""
# Split the credentials and netloc from the url.
url, netloc, url_user_password = split_auth_netloc_from_url(
original_url,
)
# Start with the credentials embedded in the url
username, password = url_user_password
if username is not None and password is not None:
logger.debug("Found credentials in url for %s", netloc)
return url_user_password
# Find a matching index url for this request
index_url = self._get_index_url(url)
if index_url:
# Split the credentials from the url.
index_info = split_auth_netloc_from_url(index_url)
if index_info:
index_url, _, index_url_user_password = index_info
logger.debug("Found index url %s", index_url)
# If an index URL was found, try its embedded credentials
if index_url and index_url_user_password[0] is not None:
username, password = index_url_user_password
if username is not None and password is not None:
logger.debug("Found credentials in index url for %s", netloc)
return index_url_user_password
# Get creds from netrc if we still don't have them
if allow_netrc:
netrc_auth = get_netrc_auth(original_url)
if netrc_auth:
logger.debug("Found credentials in netrc for %s", netloc)
return netrc_auth
# If we don't have a password and keyring is available, use it.
if allow_keyring:
# The index url is more specific than the netloc, so try it first
kr_auth = (
get_keyring_auth(index_url, username) or
get_keyring_auth(netloc, username)
)
if kr_auth:
logger.debug("Found credentials in keyring for %s", netloc)
return kr_auth
return username, password
def _get_url_and_credentials(self, original_url):
"""Return the credentials to use for the provided URL.
If allowed, netrc and keyring may be used to obtain the
correct credentials.
Returns (url_without_credentials, username, password). Note
that even if the original URL contains credentials, this
function may return a different username and password.
"""
url, netloc, _ = split_auth_netloc_from_url(original_url)
# Use any stored credentials that we have for this netloc
username, password = self.passwords.get(netloc, (None, None))
if username is None and password is None:
# No stored credentials. Acquire new credentials without prompting
# the user. (e.g. from netrc, keyring, or the URL itself)
username, password = self._get_new_credentials(original_url)
if username is not None or password is not None:
# Convert the username and password if they're None, so that
# this netloc will show up as "cached" in the conditional above.
# Further, HTTPBasicAuth doesn't accept None, so it makes sense to
# cache the value that is going to be used.
username = username or ""
password = password or ""
# Store any acquired credentials.
self.passwords[netloc] = (username, password)
assert (
# Credentials were found
(username is not None and password is not None) or
# Credentials were not found
(username is None and password is None)
), "Could not load credentials from url: {}".format(original_url)
return url, username, password
def __call__(self, req):
# Get credentials for this request
url, username, password = self._get_url_and_credentials(req.url)
# Set the url of the request to the url without any credentials
req.url = url
if username is not None and password is not None:
# Send the basic auth with this request
req = HTTPBasicAuth(username, password)(req)
# Attach a hook to handle 401 responses
req.register_hook("response", self.handle_401)
return req
# Factored out to allow for easy patching in tests
def _prompt_for_password(self, netloc):
username = ask_input("User for %s: " % netloc)
if not username:
return None, None
auth = get_keyring_auth(netloc, username)
if auth:
return auth[0], auth[1], False
password = ask_password("Password: ")
return username, password, True
# Factored out to allow for easy patching in tests
def _should_save_password_to_keyring(self):
if not keyring:
return False
return ask("Save credentials to keyring [y/N]: ", ["y", "n"]) == "y"
def handle_401(self, resp, **kwargs):
# We only care about 401 responses, anything else we want to just
# pass through the actual response
if resp.status_code != 401:
return resp
# We are not able to prompt the user so simply return the response
if not self.prompting:
return resp
parsed = urllib_parse.urlparse(resp.url)
# Prompt the user for a new username and password
username, password, save = self._prompt_for_password(parsed.netloc)
# Store the new username and password to use for future requests
self._credentials_to_save = None
if username is not None and password is not None:
self.passwords[parsed.netloc] = (username, password)
# Prompt to save the password to keyring
if save and self._should_save_password_to_keyring():
self._credentials_to_save = (parsed.netloc, username, password)
# Consume content and release the original connection to allow our new
# request to reuse the same one.
resp.content
resp.raw.release_conn()
# Add our new username and password to the request
req = HTTPBasicAuth(username or "", password or "")(resp.request)
req.register_hook("response", self.warn_on_401)
# On successful request, save the credentials that were used to
# keyring. (Note that if the user responded "no" above, this member
# is not set and nothing will be saved.)
if self._credentials_to_save:
req.register_hook("response", self.save_credentials)
# Send our new request
new_resp = resp.connection.send(req, **kwargs)
new_resp.history.append(resp)
return new_resp
def warn_on_401(self, resp, **kwargs):
"""Response callback to warn about incorrect credentials."""
if resp.status_code == 401:
logger.warning(
'401 Error, Credentials not correct for %s', resp.request.url,
)
def save_credentials(self, resp, **kwargs):
"""Response callback to save credentials on success."""
assert keyring is not None, "should never reach here without keyring"
if not keyring:
return
creds = self._credentials_to_save
self._credentials_to_save = None
if creds and resp.status_code < 400:
try:
logger.info('Saving credentials to keyring')
keyring.set_password(*creds)
except Exception:
logger.exception('Failed to save credentials')
|
mit
|
caisq/tensorflow
|
tensorflow/contrib/kfac/examples/mlp.py
|
14
|
12945
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
r"""Train an MLP on MNIST using K-FAC.
This library fits a 3-layer, tanh-activated MLP on MNIST using K-FAC. After
~25k steps, this should reach perfect accuracy on the training set.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from tensorflow.contrib.kfac.examples import mnist
lc = tf.contrib.kfac.layer_collection
opt = tf.contrib.kfac.optimizer
__all__ = [
"fc_layer",
"train_mnist",
"train_mnist_multitower",
]
def fc_layer(layer_id, inputs, output_size):
"""Builds a fully connected layer.
Args:
layer_id: int. Integer ID for this layer's variables.
inputs: Tensor of shape [num_examples, input_size]. Each row corresponds
to a single example.
output_size: int. Number of output dimensions after fully connected layer.
Returns:
preactivations: Tensor of shape [num_examples, output_size]. Values of the
layer immediately before the activation function.
activations: Tensor of shape [num_examples, output_size]. Values of the
layer immediately after the activation function.
params: Tuple of (weights, bias), parameters for this layer.
"""
# TODO(b/67004004): Delete this function and rely on tf.layers exclusively.
layer = tf.layers.Dense(
output_size,
kernel_initializer=tf.random_normal_initializer(),
name="fc_%d" % layer_id)
preactivations = layer(inputs)
activations = tf.nn.tanh(preactivations)
# layer.weights is a list. This converts it a (hashable) tuple.
return preactivations, activations, (layer.kernel, layer.bias)
def build_model(examples, labels, num_labels, layer_collection):
"""Builds an MLP classification model.
Args:
examples: Tensor of shape [num_examples, num_features]. Represents inputs of
model.
labels: Tensor of shape [num_examples]. Contains integer IDs to be predicted
by softmax for each example.
num_labels: int. Number of distinct values 'labels' can take on.
layer_collection: LayerCollection instance describing model architecture.
Returns:
loss: 0-D Tensor representing loss to be minimized.
accuracy: 0-D Tensor representing model's accuracy.
"""
# Build an MLP. For each layer, we'll keep track of the preactivations,
# activations, weights, and bias.
pre0, act0, params0 = fc_layer(layer_id=0, inputs=examples, output_size=128)
pre1, act1, params1 = fc_layer(layer_id=1, inputs=act0, output_size=64)
pre2, act2, params2 = fc_layer(layer_id=2, inputs=act1, output_size=32)
logits, _, params3 = fc_layer(layer_id=3, inputs=act2, output_size=num_labels)
loss = tf.reduce_mean(
tf.nn.sparse_softmax_cross_entropy_with_logits(
labels=labels, logits=logits))
accuracy = tf.reduce_mean(
tf.cast(tf.equal(labels, tf.argmax(logits, axis=1)), dtype=tf.float32))
# Register parameters. K-FAC needs to know about the inputs, outputs, and
# parameters of each layer and the logits powering the posterior probability
# over classes.
tf.logging.info("Building LayerCollection.")
layer_collection.register_fully_connected(params0, examples, pre0)
layer_collection.register_fully_connected(params1, act0, pre1)
layer_collection.register_fully_connected(params2, act1, pre2)
layer_collection.register_fully_connected(params3, act2, logits)
layer_collection.register_categorical_predictive_distribution(
logits, name="logits")
return loss, accuracy
def minimize(loss, accuracy, layer_collection, num_towers, session_config=None):
"""Minimize 'loss' with KfacOptimizer.
Args:
loss: 0-D Tensor. Loss to be minimized.
accuracy: 0-D Tensor. Accuracy of classifier on current minibatch.
layer_collection: LayerCollection instance. Describes layers in model.
num_towers: int. Number of CPUs to split minibatch across.
session_config: tf.ConfigProto. Configuration for tf.Session().
Returns:
accuracy of classifier on final minibatch.
"""
devices = tuple("/cpu:%d" % tower_id for tower_id in range(num_towers))
# Train with K-FAC. We'll use a decreasing learning rate that's cut in 1/2
# every 10k iterations.
tf.logging.info("Building KFAC Optimizer.")
global_step = tf.train.get_or_create_global_step()
optimizer = opt.KfacOptimizer(
learning_rate=tf.train.exponential_decay(
0.00002, global_step, 10000, 0.5, staircase=True),
cov_ema_decay=0.95,
damping=0.0005,
layer_collection=layer_collection,
momentum=0.99,
placement_strategy="round_robin",
cov_devices=devices,
inv_devices=devices)
(cov_update_thunks,
inv_update_thunks) = optimizer.make_vars_and_create_op_thunks()
def make_update_op(update_thunks):
update_ops = [thunk() for thunk in update_thunks]
return tf.group(*update_ops)
# TODO(b/78537047): change (some) examples to use PeriodicInvCovUpdateKfacOpt
# once that gets moved over? Could still leave more advanced examples as they
# are (e.g. train_mnist_estimator in this file)
cov_update_op = make_update_op(cov_update_thunks)
with tf.control_dependencies([cov_update_op]):
# We update the inverses only every 20 iterations.
inverse_op = tf.cond(
tf.equal(tf.mod(global_step, 100), 0),
lambda: make_update_op(inv_update_thunks), tf.no_op)
with tf.control_dependencies([inverse_op]):
train_op = optimizer.minimize(loss, global_step=global_step)
tf.logging.info("Starting training.")
with tf.train.MonitoredTrainingSession(config=session_config) as sess:
while not sess.should_stop():
global_step_, loss_, accuracy_, _ = sess.run(
[global_step, loss, accuracy, train_op])
if global_step_ % 100 == 0:
tf.logging.info("global_step: %d | loss: %f | accuracy: %f",
global_step_, loss_, accuracy_)
return accuracy_
def train_mnist(data_dir, num_epochs, use_fake_data=False):
"""Train an MLP on MNIST.
Args:
data_dir: string. Directory to read MNIST examples from.
num_epochs: int. Number of passes to make over the training set.
use_fake_data: bool. If True, generate a synthetic dataset.
Returns:
accuracy of model on the final minibatch of training data.
"""
# Load a dataset.
tf.logging.info("Loading MNIST into memory.")
examples, labels = mnist.load_mnist(
data_dir,
num_epochs=num_epochs,
batch_size=64,
flatten_images=True,
use_fake_data=use_fake_data)
# Build an MLP. The model's layers will be added to the LayerCollection.
tf.logging.info("Building model.")
layer_collection = lc.LayerCollection()
loss, accuracy = build_model(examples, labels, 10, layer_collection)
# Fit model.
minimize(loss, accuracy, layer_collection, 1)
def train_mnist_multitower(data_dir,
num_epochs,
num_towers,
use_fake_data=False):
"""Train an MLP on MNIST, splitting the minibatch across multiple towers.
Args:
data_dir: string. Directory to read MNIST examples from.
num_epochs: int. Number of passes to make over the training set.
num_towers: int. Number of CPUs to split minibatch across.
use_fake_data: bool. If True, generate a synthetic dataset.
Returns:
accuracy of model on the final minibatch of training data.
"""
# Load a dataset.
tower_batch_size = 64
batch_size = tower_batch_size * num_towers
tf.logging.info(
("Loading MNIST into memory. Using batch_size = %d = %d towers * %d "
"tower batch size.") % (batch_size, num_towers, tower_batch_size))
examples, labels = mnist.load_mnist(
data_dir,
num_epochs=num_epochs,
batch_size=batch_size,
flatten_images=True,
use_fake_data=use_fake_data)
# Split minibatch across towers.
examples = tf.split(examples, num_towers)
labels = tf.split(labels, num_towers)
# Build an MLP. Each tower's layers will be added to the LayerCollection.
layer_collection = lc.LayerCollection()
tower_results = []
for tower_id in range(num_towers):
with tf.device("/cpu:%d" % tower_id):
with tf.name_scope("tower%d" % tower_id):
with tf.variable_scope(tf.get_variable_scope(), reuse=(tower_id > 0)):
tf.logging.info("Building tower %d." % tower_id)
tower_results.append(
build_model(examples[tower_id], labels[tower_id], 10,
layer_collection))
losses, accuracies = zip(*tower_results)
# Average across towers.
loss = tf.reduce_mean(losses)
accuracy = tf.reduce_mean(accuracies)
# Fit model.
session_config = tf.ConfigProto(
allow_soft_placement=False, device_count={
"CPU": num_towers
})
return minimize(
loss, accuracy, layer_collection, num_towers,
session_config=session_config)
def train_mnist_estimator(data_dir, num_epochs, use_fake_data=False):
"""Train an MLP on MNIST using tf.estimator.
Args:
data_dir: string. Directory to read MNIST examples from.
num_epochs: int. Number of passes to make over the training set.
use_fake_data: bool. If True, generate a synthetic dataset.
Returns:
accuracy of model on the final minibatch of training data.
"""
# Load a dataset.
def input_fn():
tf.logging.info("Loading MNIST into memory.")
return mnist.load_mnist(
data_dir,
num_epochs=num_epochs,
batch_size=64,
flatten_images=True,
use_fake_data=use_fake_data)
def model_fn(features, labels, mode, params):
"""Model function for MLP trained with K-FAC.
Args:
features: Tensor of shape [batch_size, input_size]. Input features.
labels: Tensor of shape [batch_size]. Target labels for training.
mode: tf.estimator.ModeKey. Must be TRAIN.
params: ignored.
Returns:
EstimatorSpec for training.
Raises:
ValueError: If 'mode' is anything other than TRAIN.
"""
del params
if mode != tf.estimator.ModeKeys.TRAIN:
raise ValueError("Only training is supposed with this API.")
# Build a ConvNet.
layer_collection = lc.LayerCollection()
loss, accuracy = build_model(
features, labels, num_labels=10, layer_collection=layer_collection)
# Train with K-FAC.
global_step = tf.train.get_or_create_global_step()
optimizer = opt.KfacOptimizer(
learning_rate=tf.train.exponential_decay(
0.00002, global_step, 10000, 0.5, staircase=True),
cov_ema_decay=0.95,
damping=0.0001,
layer_collection=layer_collection,
momentum=0.99)
(cov_update_thunks,
inv_update_thunks) = optimizer.make_vars_and_create_op_thunks()
def make_update_op(update_thunks):
update_ops = [thunk() for thunk in update_thunks]
return tf.group(*update_ops)
def make_batch_executed_op(update_thunks, batch_size=1):
return tf.group(*tf.contrib.kfac.utils.batch_execute(
global_step, update_thunks, batch_size=batch_size))
# Run cov_update_op every step. Run 1 inv_update_ops per step.
cov_update_op = make_update_op(cov_update_thunks)
with tf.control_dependencies([cov_update_op]):
# But make sure to execute all the inverse ops on the first step
inverse_op = tf.cond(tf.equal(global_step, 0),
lambda: make_update_op(inv_update_thunks),
lambda: make_batch_executed_op(inv_update_thunks))
with tf.control_dependencies([inverse_op]):
train_op = optimizer.minimize(loss, global_step=global_step)
# Print metrics every 5 sec.
hooks = [
tf.train.LoggingTensorHook(
{
"loss": loss,
"accuracy": accuracy
}, every_n_secs=5),
]
return tf.estimator.EstimatorSpec(
mode=mode, loss=loss, train_op=train_op, training_hooks=hooks)
run_config = tf.estimator.RunConfig(
model_dir="/tmp/mnist", save_checkpoints_steps=1, keep_checkpoint_max=100)
# Train until input_fn() is empty with Estimator. This is a prerequisite for
# TPU compatibility.
estimator = tf.estimator.Estimator(model_fn=model_fn, config=run_config)
estimator.train(input_fn=input_fn)
|
apache-2.0
|
wlanslovenija/nodewatcher-warehouse
|
nodewatcherwarehouse/warehouse/templatetags/ware_tags.py
|
1
|
1229
|
from django import template
register = template.Library()
from django.core.urlresolvers import reverse
from django.utils.safestring import mark_safe
def qr_code(obj, size=2):
return reverse("wh:qr", kwargs={"string": abs_view(obj), "size":size})
def abs_view(obj):
from django.contrib.sites.models import Site
# TODO Somehow check for https?
return "%s://%s%s" % ("http", Site.objects.get_current().domain, reverse("wh:item-detail", kwargs = {"pk" : obj.id}))
def nice_location(obj):
if obj.node:
return "Node: %s" % obj.node.config.core.general().name
elif obj.location:
return obj.location.name
return ""
def nice_ownership(obj):
if obj.member:
return "%s %s (%s)" % (obj.member.first_name, obj.member.last_name, obj.member.username)
elif obj.person != "" or obj.email != "":
if obj.email:
return mark_safe("<a href='mailto:%s'>%s</a> %s" % (obj.email, obj.person, obj.phone or ""))
else:
return "%s %s" % (obj.person, obj.phone or "")
return ""
register.filter("qr_code", qr_code)
register.filter("abs_view", abs_view)
register.filter("nice_ownership", nice_ownership)
register.filter("nice_location", nice_location)
|
agpl-3.0
|
wildchildyn/autism-website
|
yanni_env/lib/python3.6/site-packages/sqlalchemy/sql/type_api.py
|
23
|
46998
|
# sql/types_api.py
# Copyright (C) 2005-2017 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Base types API.
"""
from .. import exc, util
from . import operators
from .visitors import Visitable, VisitableType
from .base import SchemaEventTarget
# these are back-assigned by sqltypes.
BOOLEANTYPE = None
INTEGERTYPE = None
NULLTYPE = None
STRINGTYPE = None
MATCHTYPE = None
INDEXABLE = None
_resolve_value_to_type = None
class TypeEngine(Visitable):
"""The ultimate base class for all SQL datatypes.
Common subclasses of :class:`.TypeEngine` include
:class:`.String`, :class:`.Integer`, and :class:`.Boolean`.
For an overview of the SQLAlchemy typing system, see
:ref:`types_toplevel`.
.. seealso::
:ref:`types_toplevel`
"""
_sqla_type = True
_isnull = False
class Comparator(operators.ColumnOperators):
"""Base class for custom comparison operations defined at the
type level. See :attr:`.TypeEngine.comparator_factory`.
"""
__slots__ = 'expr', 'type'
default_comparator = None
def __init__(self, expr):
self.expr = expr
self.type = expr.type
@util.dependencies('sqlalchemy.sql.default_comparator')
def operate(self, default_comparator, op, *other, **kwargs):
o = default_comparator.operator_lookup[op.__name__]
return o[0](self.expr, op, *(other + o[1:]), **kwargs)
@util.dependencies('sqlalchemy.sql.default_comparator')
def reverse_operate(self, default_comparator, op, other, **kwargs):
o = default_comparator.operator_lookup[op.__name__]
return o[0](self.expr, op, other,
reverse=True, *o[1:], **kwargs)
def _adapt_expression(self, op, other_comparator):
"""evaluate the return type of <self> <op> <othertype>,
and apply any adaptations to the given operator.
This method determines the type of a resulting binary expression
given two source types and an operator. For example, two
:class:`.Column` objects, both of the type :class:`.Integer`, will
produce a :class:`.BinaryExpression` that also has the type
:class:`.Integer` when compared via the addition (``+``) operator.
However, using the addition operator with an :class:`.Integer`
and a :class:`.Date` object will produce a :class:`.Date`, assuming
"days delta" behavior by the database (in reality, most databases
other than PostgreSQL don't accept this particular operation).
The method returns a tuple of the form <operator>, <type>.
The resulting operator and type will be those applied to the
resulting :class:`.BinaryExpression` as the final operator and the
right-hand side of the expression.
Note that only a subset of operators make usage of
:meth:`._adapt_expression`,
including math operators and user-defined operators, but not
boolean comparison or special SQL keywords like MATCH or BETWEEN.
"""
return op, self.type
def __reduce__(self):
return _reconstitute_comparator, (self.expr, )
hashable = True
"""Flag, if False, means values from this type aren't hashable.
Used by the ORM when uniquing result lists.
"""
comparator_factory = Comparator
"""A :class:`.TypeEngine.Comparator` class which will apply
to operations performed by owning :class:`.ColumnElement` objects.
The :attr:`.comparator_factory` attribute is a hook consulted by
the core expression system when column and SQL expression operations
are performed. When a :class:`.TypeEngine.Comparator` class is
associated with this attribute, it allows custom re-definition of
all existing operators, as well as definition of new operators.
Existing operators include those provided by Python operator overloading
such as :meth:`.operators.ColumnOperators.__add__` and
:meth:`.operators.ColumnOperators.__eq__`,
those provided as standard
attributes of :class:`.operators.ColumnOperators` such as
:meth:`.operators.ColumnOperators.like`
and :meth:`.operators.ColumnOperators.in_`.
Rudimentary usage of this hook is allowed through simple subclassing
of existing types, or alternatively by using :class:`.TypeDecorator`.
See the documentation section :ref:`types_operators` for examples.
.. versionadded:: 0.8 The expression system was enhanced to support
customization of operators on a per-type level.
"""
should_evaluate_none = False
"""If True, the Python constant ``None`` is considered to be handled
explicitly by this type.
The ORM uses this flag to indicate that a positive value of ``None``
is passed to the column in an INSERT statement, rather than omitting
the column from the INSERT statement which has the effect of firing
off column-level defaults. It also allows types which have special
behavior for Python None, such as a JSON type, to indicate that
they'd like to handle the None value explicitly.
To set this flag on an existing type, use the
:meth:`.TypeEngine.evaluates_none` method.
.. seealso::
:meth:`.TypeEngine.evaluates_none`
.. versionadded:: 1.1
"""
def evaluates_none(self):
"""Return a copy of this type which has the :attr:`.should_evaluate_none`
flag set to True.
E.g.::
Table(
'some_table', metadata,
Column(
String(50).evaluates_none(),
nullable=True,
server_default='no value')
)
The ORM uses this flag to indicate that a positive value of ``None``
is passed to the column in an INSERT statement, rather than omitting
the column from the INSERT statement which has the effect of firing
off column-level defaults. It also allows for types which have
special behavior associated with the Python None value to indicate
that the value doesn't necessarily translate into SQL NULL; a
prime example of this is a JSON type which may wish to persist the
JSON value ``'null'``.
In all cases, the actual NULL SQL value can be always be
persisted in any column by using
the :obj:`~.expression.null` SQL construct in an INSERT statement
or associated with an ORM-mapped attribute.
.. note::
The "evaulates none" flag does **not** apply to a value
of ``None`` passed to :paramref:`.Column.default` or
:paramref:`.Column.server_default`; in these cases, ``None``
still means "no default".
.. versionadded:: 1.1
.. seealso::
:ref:`session_forcing_null` - in the ORM documentation
:paramref:`.postgresql.JSON.none_as_null` - PostgreSQL JSON
interaction with this flag.
:attr:`.TypeEngine.should_evaluate_none` - class-level flag
"""
typ = self.copy()
typ.should_evaluate_none = True
return typ
def copy(self, **kw):
return self.adapt(self.__class__)
def compare_against_backend(self, dialect, conn_type):
"""Compare this type against the given backend type.
This function is currently not implemented for SQLAlchemy
types, and for all built in types will return ``None``. However,
it can be implemented by a user-defined type
where it can be consumed by schema comparison tools such as
Alembic autogenerate.
A future release of SQLAlchemy will potentially impement this method
for builtin types as well.
The function should return True if this type is equivalent to the
given type; the type is typically reflected from the database
so should be database specific. The dialect in use is also
passed. It can also return False to assert that the type is
not equivalent.
:param dialect: a :class:`.Dialect` that is involved in the comparison.
:param conn_type: the type object reflected from the backend.
.. versionadded:: 1.0.3
"""
return None
def copy_value(self, value):
return value
def literal_processor(self, dialect):
"""Return a conversion function for processing literal values that are
to be rendered directly without using binds.
This function is used when the compiler makes use of the
"literal_binds" flag, typically used in DDL generation as well
as in certain scenarios where backends don't accept bound parameters.
.. versionadded:: 0.9.0
"""
return None
def bind_processor(self, dialect):
"""Return a conversion function for processing bind values.
Returns a callable which will receive a bind parameter value
as the sole positional argument and will return a value to
send to the DB-API.
If processing is not necessary, the method should return ``None``.
:param dialect: Dialect instance in use.
"""
return None
def result_processor(self, dialect, coltype):
"""Return a conversion function for processing result row values.
Returns a callable which will receive a result row column
value as the sole positional argument and will return a value
to return to the user.
If processing is not necessary, the method should return ``None``.
:param dialect: Dialect instance in use.
:param coltype: DBAPI coltype argument received in cursor.description.
"""
return None
def column_expression(self, colexpr):
"""Given a SELECT column expression, return a wrapping SQL expression.
This is typically a SQL function that wraps a column expression
as rendered in the columns clause of a SELECT statement.
It is used for special data types that require
columns to be wrapped in some special database function in order
to coerce the value before being sent back to the application.
It is the SQL analogue of the :meth:`.TypeEngine.result_processor`
method.
The method is evaluated at statement compile time, as opposed
to statement construction time.
See also:
:ref:`types_sql_value_processing`
"""
return None
@util.memoized_property
def _has_column_expression(self):
"""memoized boolean, check if column_expression is implemented.
Allows the method to be skipped for the vast majority of expression
types that don't use this feature.
"""
return self.__class__.column_expression.__code__ \
is not TypeEngine.column_expression.__code__
def bind_expression(self, bindvalue):
""""Given a bind value (i.e. a :class:`.BindParameter` instance),
return a SQL expression in its place.
This is typically a SQL function that wraps the existing bound
parameter within the statement. It is used for special data types
that require literals being wrapped in some special database function
in order to coerce an application-level value into a database-specific
format. It is the SQL analogue of the
:meth:`.TypeEngine.bind_processor` method.
The method is evaluated at statement compile time, as opposed
to statement construction time.
Note that this method, when implemented, should always return
the exact same structure, without any conditional logic, as it
may be used in an executemany() call against an arbitrary number
of bound parameter sets.
See also:
:ref:`types_sql_value_processing`
"""
return None
@util.memoized_property
def _has_bind_expression(self):
"""memoized boolean, check if bind_expression is implemented.
Allows the method to be skipped for the vast majority of expression
types that don't use this feature.
"""
return self.__class__.bind_expression.__code__ \
is not TypeEngine.bind_expression.__code__
def compare_values(self, x, y):
"""Compare two values for equality."""
return x == y
def get_dbapi_type(self, dbapi):
"""Return the corresponding type object from the underlying DB-API, if
any.
This can be useful for calling ``setinputsizes()``, for example.
"""
return None
@property
def python_type(self):
"""Return the Python type object expected to be returned
by instances of this type, if known.
Basically, for those types which enforce a return type,
or are known across the board to do such for all common
DBAPIs (like ``int`` for example), will return that type.
If a return type is not defined, raises
``NotImplementedError``.
Note that any type also accommodates NULL in SQL which
means you can also get back ``None`` from any type
in practice.
"""
raise NotImplementedError()
def with_variant(self, type_, dialect_name):
"""Produce a new type object that will utilize the given
type when applied to the dialect of the given name.
e.g.::
from sqlalchemy.types import String
from sqlalchemy.dialects import mysql
s = String()
s = s.with_variant(mysql.VARCHAR(collation='foo'), 'mysql')
The construction of :meth:`.TypeEngine.with_variant` is always
from the "fallback" type to that which is dialect specific.
The returned type is an instance of :class:`.Variant`, which
itself provides a :meth:`.Variant.with_variant`
that can be called repeatedly.
:param type_: a :class:`.TypeEngine` that will be selected
as a variant from the originating type, when a dialect
of the given name is in use.
:param dialect_name: base name of the dialect which uses
this type. (i.e. ``'postgresql'``, ``'mysql'``, etc.)
.. versionadded:: 0.7.2
"""
return Variant(self, {dialect_name: to_instance(type_)})
@util.memoized_property
def _type_affinity(self):
"""Return a rudimental 'affinity' value expressing the general class
of type."""
typ = None
for t in self.__class__.__mro__:
if t in (TypeEngine, UserDefinedType):
return typ
elif issubclass(t, (TypeEngine, UserDefinedType)):
typ = t
else:
return self.__class__
def dialect_impl(self, dialect):
"""Return a dialect-specific implementation for this
:class:`.TypeEngine`.
"""
try:
return dialect._type_memos[self]['impl']
except KeyError:
return self._dialect_info(dialect)['impl']
def _cached_literal_processor(self, dialect):
"""Return a dialect-specific literal processor for this type."""
try:
return dialect._type_memos[self]['literal']
except KeyError:
d = self._dialect_info(dialect)
d['literal'] = lp = d['impl'].literal_processor(dialect)
return lp
def _cached_bind_processor(self, dialect):
"""Return a dialect-specific bind processor for this type."""
try:
return dialect._type_memos[self]['bind']
except KeyError:
d = self._dialect_info(dialect)
d['bind'] = bp = d['impl'].bind_processor(dialect)
return bp
def _cached_result_processor(self, dialect, coltype):
"""Return a dialect-specific result processor for this type."""
try:
return dialect._type_memos[self][coltype]
except KeyError:
d = self._dialect_info(dialect)
# key assumption: DBAPI type codes are
# constants. Else this dictionary would
# grow unbounded.
d[coltype] = rp = d['impl'].result_processor(dialect, coltype)
return rp
def _dialect_info(self, dialect):
"""Return a dialect-specific registry which
caches a dialect-specific implementation, bind processing
function, and one or more result processing functions."""
if self in dialect._type_memos:
return dialect._type_memos[self]
else:
impl = self._gen_dialect_impl(dialect)
if impl is self:
impl = self.adapt(type(self))
# this can't be self, else we create a cycle
assert impl is not self
dialect._type_memos[self] = d = {'impl': impl}
return d
def _gen_dialect_impl(self, dialect):
return dialect.type_descriptor(self)
def adapt(self, cls, **kw):
"""Produce an "adapted" form of this type, given an "impl" class
to work with.
This method is used internally to associate generic
types with "implementation" types that are specific to a particular
dialect.
"""
return util.constructor_copy(self, cls, **kw)
def coerce_compared_value(self, op, value):
"""Suggest a type for a 'coerced' Python value in an expression.
Given an operator and value, gives the type a chance
to return a type which the value should be coerced into.
The default behavior here is conservative; if the right-hand
side is already coerced into a SQL type based on its
Python type, it is usually left alone.
End-user functionality extension here should generally be via
:class:`.TypeDecorator`, which provides more liberal behavior in that
it defaults to coercing the other side of the expression into this
type, thus applying special Python conversions above and beyond those
needed by the DBAPI to both ides. It also provides the public method
:meth:`.TypeDecorator.coerce_compared_value` which is intended for
end-user customization of this behavior.
"""
_coerced_type = _resolve_value_to_type(value)
if _coerced_type is NULLTYPE or _coerced_type._type_affinity \
is self._type_affinity:
return self
else:
return _coerced_type
def _compare_type_affinity(self, other):
return self._type_affinity is other._type_affinity
def compile(self, dialect=None):
"""Produce a string-compiled form of this :class:`.TypeEngine`.
When called with no arguments, uses a "default" dialect
to produce a string result.
:param dialect: a :class:`.Dialect` instance.
"""
# arg, return value is inconsistent with
# ClauseElement.compile()....this is a mistake.
if not dialect:
dialect = self._default_dialect()
return dialect.type_compiler.process(self)
@util.dependencies("sqlalchemy.engine.default")
def _default_dialect(self, default):
if self.__class__.__module__.startswith("sqlalchemy.dialects"):
tokens = self.__class__.__module__.split(".")[0:3]
mod = ".".join(tokens)
return getattr(__import__(mod).dialects, tokens[-1]).dialect()
else:
return default.DefaultDialect()
def __str__(self):
if util.py2k:
return unicode(self.compile()).\
encode('ascii', 'backslashreplace')
else:
return str(self.compile())
def __repr__(self):
return util.generic_repr(self)
class VisitableCheckKWArg(util.EnsureKWArgType, VisitableType):
pass
class UserDefinedType(util.with_metaclass(VisitableCheckKWArg, TypeEngine)):
"""Base for user defined types.
This should be the base of new types. Note that
for most cases, :class:`.TypeDecorator` is probably
more appropriate::
import sqlalchemy.types as types
class MyType(types.UserDefinedType):
def __init__(self, precision = 8):
self.precision = precision
def get_col_spec(self, **kw):
return "MYTYPE(%s)" % self.precision
def bind_processor(self, dialect):
def process(value):
return value
return process
def result_processor(self, dialect, coltype):
def process(value):
return value
return process
Once the type is made, it's immediately usable::
table = Table('foo', meta,
Column('id', Integer, primary_key=True),
Column('data', MyType(16))
)
The ``get_col_spec()`` method will in most cases receive a keyword
argument ``type_expression`` which refers to the owning expression
of the type as being compiled, such as a :class:`.Column` or
:func:`.cast` construct. This keyword is only sent if the method
accepts keyword arguments (e.g. ``**kw``) in its argument signature;
introspection is used to check for this in order to support legacy
forms of this function.
.. versionadded:: 1.0.0 the owning expression is passed to
the ``get_col_spec()`` method via the keyword argument
``type_expression``, if it receives ``**kw`` in its signature.
"""
__visit_name__ = "user_defined"
ensure_kwarg = 'get_col_spec'
class Comparator(TypeEngine.Comparator):
__slots__ = ()
def _adapt_expression(self, op, other_comparator):
if hasattr(self.type, 'adapt_operator'):
util.warn_deprecated(
"UserDefinedType.adapt_operator is deprecated. Create "
"a UserDefinedType.Comparator subclass instead which "
"generates the desired expression constructs, given a "
"particular operator."
)
return self.type.adapt_operator(op), self.type
else:
return op, self.type
comparator_factory = Comparator
def coerce_compared_value(self, op, value):
"""Suggest a type for a 'coerced' Python value in an expression.
Default behavior for :class:`.UserDefinedType` is the
same as that of :class:`.TypeDecorator`; by default it returns
``self``, assuming the compared value should be coerced into
the same type as this one. See
:meth:`.TypeDecorator.coerce_compared_value` for more detail.
.. versionchanged:: 0.8 :meth:`.UserDefinedType.coerce_compared_value`
now returns ``self`` by default, rather than falling onto the
more fundamental behavior of
:meth:`.TypeEngine.coerce_compared_value`.
"""
return self
class TypeDecorator(SchemaEventTarget, TypeEngine):
"""Allows the creation of types which add additional functionality
to an existing type.
This method is preferred to direct subclassing of SQLAlchemy's
built-in types as it ensures that all required functionality of
the underlying type is kept in place.
Typical usage::
import sqlalchemy.types as types
class MyType(types.TypeDecorator):
'''Prefixes Unicode values with "PREFIX:" on the way in and
strips it off on the way out.
'''
impl = types.Unicode
def process_bind_param(self, value, dialect):
return "PREFIX:" + value
def process_result_value(self, value, dialect):
return value[7:]
def copy(self, **kw):
return MyType(self.impl.length)
The class-level "impl" attribute is required, and can reference any
TypeEngine class. Alternatively, the load_dialect_impl() method
can be used to provide different type classes based on the dialect
given; in this case, the "impl" variable can reference
``TypeEngine`` as a placeholder.
Types that receive a Python type that isn't similar to the ultimate type
used may want to define the :meth:`TypeDecorator.coerce_compared_value`
method. This is used to give the expression system a hint when coercing
Python objects into bind parameters within expressions. Consider this
expression::
mytable.c.somecol + datetime.date(2009, 5, 15)
Above, if "somecol" is an ``Integer`` variant, it makes sense that
we're doing date arithmetic, where above is usually interpreted
by databases as adding a number of days to the given date.
The expression system does the right thing by not attempting to
coerce the "date()" value into an integer-oriented bind parameter.
However, in the case of ``TypeDecorator``, we are usually changing an
incoming Python type to something new - ``TypeDecorator`` by default will
"coerce" the non-typed side to be the same type as itself. Such as below,
we define an "epoch" type that stores a date value as an integer::
class MyEpochType(types.TypeDecorator):
impl = types.Integer
epoch = datetime.date(1970, 1, 1)
def process_bind_param(self, value, dialect):
return (value - self.epoch).days
def process_result_value(self, value, dialect):
return self.epoch + timedelta(days=value)
Our expression of ``somecol + date`` with the above type will coerce the
"date" on the right side to also be treated as ``MyEpochType``.
This behavior can be overridden via the
:meth:`~TypeDecorator.coerce_compared_value` method, which returns a type
that should be used for the value of the expression. Below we set it such
that an integer value will be treated as an ``Integer``, and any other
value is assumed to be a date and will be treated as a ``MyEpochType``::
def coerce_compared_value(self, op, value):
if isinstance(value, int):
return Integer()
else:
return self
.. warning::
Note that the **behavior of coerce_compared_value is not inherited
by default from that of the base type**.
If the :class:`.TypeDecorator` is augmenting a
type that requires special logic for certain types of operators,
this method **must** be overridden. A key example is when decorating
the :class:`.postgresql.JSON` and :class:`.postgresql.JSONB` types;
the default rules of :meth:`.TypeEngine.coerce_compared_value` should
be used in order to deal with operators like index operations::
class MyJsonType(TypeDecorator):
impl = postgresql.JSON
def coerce_compared_value(self, op, value):
return self.impl.coerce_compared_value(op, value)
Without the above step, index operations such as ``mycol['foo']``
will cause the index value ``'foo'`` to be JSON encoded.
"""
__visit_name__ = "type_decorator"
def __init__(self, *args, **kwargs):
"""Construct a :class:`.TypeDecorator`.
Arguments sent here are passed to the constructor
of the class assigned to the ``impl`` class level attribute,
assuming the ``impl`` is a callable, and the resulting
object is assigned to the ``self.impl`` instance attribute
(thus overriding the class attribute of the same name).
If the class level ``impl`` is not a callable (the unusual case),
it will be assigned to the same instance attribute 'as-is',
ignoring those arguments passed to the constructor.
Subclasses can override this to customize the generation
of ``self.impl`` entirely.
"""
if not hasattr(self.__class__, 'impl'):
raise AssertionError("TypeDecorator implementations "
"require a class-level variable "
"'impl' which refers to the class of "
"type being decorated")
self.impl = to_instance(self.__class__.impl, *args, **kwargs)
coerce_to_is_types = (util.NoneType, )
"""Specify those Python types which should be coerced at the expression
level to "IS <constant>" when compared using ``==`` (and same for
``IS NOT`` in conjunction with ``!=``.
For most SQLAlchemy types, this includes ``NoneType``, as well as
``bool``.
:class:`.TypeDecorator` modifies this list to only include ``NoneType``,
as typedecorator implementations that deal with boolean types are common.
Custom :class:`.TypeDecorator` classes can override this attribute to
return an empty tuple, in which case no values will be coerced to
constants.
.. versionadded:: 0.8.2
Added :attr:`.TypeDecorator.coerce_to_is_types` to allow for easier
control of ``__eq__()`` ``__ne__()`` operations.
"""
class Comparator(TypeEngine.Comparator):
__slots__ = ()
def operate(self, op, *other, **kwargs):
kwargs['_python_is_types'] = self.expr.type.coerce_to_is_types
return super(TypeDecorator.Comparator, self).operate(
op, *other, **kwargs)
def reverse_operate(self, op, other, **kwargs):
kwargs['_python_is_types'] = self.expr.type.coerce_to_is_types
return super(TypeDecorator.Comparator, self).reverse_operate(
op, other, **kwargs)
@property
def comparator_factory(self):
if TypeDecorator.Comparator in self.impl.comparator_factory.__mro__:
return self.impl.comparator_factory
else:
return type("TDComparator",
(TypeDecorator.Comparator,
self.impl.comparator_factory),
{})
def _gen_dialect_impl(self, dialect):
"""
#todo
"""
adapted = dialect.type_descriptor(self)
if adapted is not self:
return adapted
# otherwise adapt the impl type, link
# to a copy of this TypeDecorator and return
# that.
typedesc = self.load_dialect_impl(dialect).dialect_impl(dialect)
tt = self.copy()
if not isinstance(tt, self.__class__):
raise AssertionError('Type object %s does not properly '
'implement the copy() method, it must '
'return an object of type %s' %
(self, self.__class__))
tt.impl = typedesc
return tt
@property
def _type_affinity(self):
"""
#todo
"""
return self.impl._type_affinity
def _set_parent(self, column):
"""Support SchemaEventTarget"""
super(TypeDecorator, self)._set_parent(column)
if isinstance(self.impl, SchemaEventTarget):
self.impl._set_parent(column)
def _set_parent_with_dispatch(self, parent):
"""Support SchemaEventTarget"""
super(TypeDecorator, self)._set_parent_with_dispatch(parent)
if isinstance(self.impl, SchemaEventTarget):
self.impl._set_parent_with_dispatch(parent)
def type_engine(self, dialect):
"""Return a dialect-specific :class:`.TypeEngine` instance
for this :class:`.TypeDecorator`.
In most cases this returns a dialect-adapted form of
the :class:`.TypeEngine` type represented by ``self.impl``.
Makes usage of :meth:`dialect_impl` but also traverses
into wrapped :class:`.TypeDecorator` instances.
Behavior can be customized here by overriding
:meth:`load_dialect_impl`.
"""
adapted = dialect.type_descriptor(self)
if not isinstance(adapted, type(self)):
return adapted
elif isinstance(self.impl, TypeDecorator):
return self.impl.type_engine(dialect)
else:
return self.load_dialect_impl(dialect)
def load_dialect_impl(self, dialect):
"""Return a :class:`.TypeEngine` object corresponding to a dialect.
This is an end-user override hook that can be used to provide
differing types depending on the given dialect. It is used
by the :class:`.TypeDecorator` implementation of :meth:`type_engine`
to help determine what type should ultimately be returned
for a given :class:`.TypeDecorator`.
By default returns ``self.impl``.
"""
return self.impl
def __getattr__(self, key):
"""Proxy all other undefined accessors to the underlying
implementation."""
return getattr(self.impl, key)
def process_literal_param(self, value, dialect):
"""Receive a literal parameter value to be rendered inline within
a statement.
This method is used when the compiler renders a
literal value without using binds, typically within DDL
such as in the "server default" of a column or an expression
within a CHECK constraint.
The returned string will be rendered into the output string.
.. versionadded:: 0.9.0
"""
raise NotImplementedError()
def process_bind_param(self, value, dialect):
"""Receive a bound parameter value to be converted.
Subclasses override this method to return the
value that should be passed along to the underlying
:class:`.TypeEngine` object, and from there to the
DBAPI ``execute()`` method.
The operation could be anything desired to perform custom
behavior, such as transforming or serializing data.
This could also be used as a hook for validating logic.
This operation should be designed with the reverse operation
in mind, which would be the process_result_value method of
this class.
:param value: Data to operate upon, of any type expected by
this method in the subclass. Can be ``None``.
:param dialect: the :class:`.Dialect` in use.
"""
raise NotImplementedError()
def process_result_value(self, value, dialect):
"""Receive a result-row column value to be converted.
Subclasses should implement this method to operate on data
fetched from the database.
Subclasses override this method to return the
value that should be passed back to the application,
given a value that is already processed by
the underlying :class:`.TypeEngine` object, originally
from the DBAPI cursor method ``fetchone()`` or similar.
The operation could be anything desired to perform custom
behavior, such as transforming or serializing data.
This could also be used as a hook for validating logic.
:param value: Data to operate upon, of any type expected by
this method in the subclass. Can be ``None``.
:param dialect: the :class:`.Dialect` in use.
This operation should be designed to be reversible by
the "process_bind_param" method of this class.
"""
raise NotImplementedError()
@util.memoized_property
def _has_bind_processor(self):
"""memoized boolean, check if process_bind_param is implemented.
Allows the base process_bind_param to raise
NotImplementedError without needing to test an expensive
exception throw.
"""
return self.__class__.process_bind_param.__code__ \
is not TypeDecorator.process_bind_param.__code__
@util.memoized_property
def _has_literal_processor(self):
"""memoized boolean, check if process_literal_param is implemented.
"""
return self.__class__.process_literal_param.__code__ \
is not TypeDecorator.process_literal_param.__code__
def literal_processor(self, dialect):
"""Provide a literal processing function for the given
:class:`.Dialect`.
Subclasses here will typically override
:meth:`.TypeDecorator.process_literal_param` instead of this method
directly.
By default, this method makes use of
:meth:`.TypeDecorator.process_bind_param` if that method is
implemented, where :meth:`.TypeDecorator.process_literal_param` is
not. The rationale here is that :class:`.TypeDecorator` typically
deals with Python conversions of data that are above the layer of
database presentation. With the value converted by
:meth:`.TypeDecorator.process_bind_param`, the underlying type will
then handle whether it needs to be presented to the DBAPI as a bound
parameter or to the database as an inline SQL value.
.. versionadded:: 0.9.0
"""
if self._has_literal_processor:
process_param = self.process_literal_param
elif self._has_bind_processor:
# the bind processor should normally be OK
# for TypeDecorator since it isn't doing DB-level
# handling, the handling here won't be different for bound vs.
# literals.
process_param = self.process_bind_param
else:
process_param = None
if process_param:
impl_processor = self.impl.literal_processor(dialect)
if impl_processor:
def process(value):
return impl_processor(process_param(value, dialect))
else:
def process(value):
return process_param(value, dialect)
return process
else:
return self.impl.literal_processor(dialect)
def bind_processor(self, dialect):
"""Provide a bound value processing function for the
given :class:`.Dialect`.
This is the method that fulfills the :class:`.TypeEngine`
contract for bound value conversion. :class:`.TypeDecorator`
will wrap a user-defined implementation of
:meth:`process_bind_param` here.
User-defined code can override this method directly,
though its likely best to use :meth:`process_bind_param` so that
the processing provided by ``self.impl`` is maintained.
:param dialect: Dialect instance in use.
This method is the reverse counterpart to the
:meth:`result_processor` method of this class.
"""
if self._has_bind_processor:
process_param = self.process_bind_param
impl_processor = self.impl.bind_processor(dialect)
if impl_processor:
def process(value):
return impl_processor(process_param(value, dialect))
else:
def process(value):
return process_param(value, dialect)
return process
else:
return self.impl.bind_processor(dialect)
@util.memoized_property
def _has_result_processor(self):
"""memoized boolean, check if process_result_value is implemented.
Allows the base process_result_value to raise
NotImplementedError without needing to test an expensive
exception throw.
"""
return self.__class__.process_result_value.__code__ \
is not TypeDecorator.process_result_value.__code__
def result_processor(self, dialect, coltype):
"""Provide a result value processing function for the given
:class:`.Dialect`.
This is the method that fulfills the :class:`.TypeEngine`
contract for result value conversion. :class:`.TypeDecorator`
will wrap a user-defined implementation of
:meth:`process_result_value` here.
User-defined code can override this method directly,
though its likely best to use :meth:`process_result_value` so that
the processing provided by ``self.impl`` is maintained.
:param dialect: Dialect instance in use.
:param coltype: A SQLAlchemy data type
This method is the reverse counterpart to the
:meth:`bind_processor` method of this class.
"""
if self._has_result_processor:
process_value = self.process_result_value
impl_processor = self.impl.result_processor(dialect,
coltype)
if impl_processor:
def process(value):
return process_value(impl_processor(value), dialect)
else:
def process(value):
return process_value(value, dialect)
return process
else:
return self.impl.result_processor(dialect, coltype)
def coerce_compared_value(self, op, value):
"""Suggest a type for a 'coerced' Python value in an expression.
By default, returns self. This method is called by
the expression system when an object using this type is
on the left or right side of an expression against a plain Python
object which does not yet have a SQLAlchemy type assigned::
expr = table.c.somecolumn + 35
Where above, if ``somecolumn`` uses this type, this method will
be called with the value ``operator.add``
and ``35``. The return value is whatever SQLAlchemy type should
be used for ``35`` for this particular operation.
"""
return self
def copy(self, **kw):
"""Produce a copy of this :class:`.TypeDecorator` instance.
This is a shallow copy and is provided to fulfill part of
the :class:`.TypeEngine` contract. It usually does not
need to be overridden unless the user-defined :class:`.TypeDecorator`
has local state that should be deep-copied.
"""
instance = self.__class__.__new__(self.__class__)
instance.__dict__.update(self.__dict__)
return instance
def get_dbapi_type(self, dbapi):
"""Return the DBAPI type object represented by this
:class:`.TypeDecorator`.
By default this calls upon :meth:`.TypeEngine.get_dbapi_type` of the
underlying "impl".
"""
return self.impl.get_dbapi_type(dbapi)
def compare_values(self, x, y):
"""Given two values, compare them for equality.
By default this calls upon :meth:`.TypeEngine.compare_values`
of the underlying "impl", which in turn usually
uses the Python equals operator ``==``.
This function is used by the ORM to compare
an original-loaded value with an intercepted
"changed" value, to determine if a net change
has occurred.
"""
return self.impl.compare_values(x, y)
def __repr__(self):
return util.generic_repr(self, to_inspect=self.impl)
class Variant(TypeDecorator):
"""A wrapping type that selects among a variety of
implementations based on dialect in use.
The :class:`.Variant` type is typically constructed
using the :meth:`.TypeEngine.with_variant` method.
.. versionadded:: 0.7.2
.. seealso:: :meth:`.TypeEngine.with_variant` for an example of use.
"""
def __init__(self, base, mapping):
"""Construct a new :class:`.Variant`.
:param base: the base 'fallback' type
:param mapping: dictionary of string dialect names to
:class:`.TypeEngine` instances.
"""
self.impl = base
self.mapping = mapping
def coerce_compared_value(self, operator, value):
result = self.impl.coerce_compared_value(operator, value)
if result is self.impl:
return self
else:
return result
def load_dialect_impl(self, dialect):
if dialect.name in self.mapping:
return self.mapping[dialect.name]
else:
return self.impl
def _set_parent(self, column):
"""Support SchemaEventTarget"""
if isinstance(self.impl, SchemaEventTarget):
self.impl._set_parent(column)
for impl in self.mapping.values():
if isinstance(impl, SchemaEventTarget):
impl._set_parent(column)
def _set_parent_with_dispatch(self, parent):
"""Support SchemaEventTarget"""
if isinstance(self.impl, SchemaEventTarget):
self.impl._set_parent_with_dispatch(parent)
for impl in self.mapping.values():
if isinstance(impl, SchemaEventTarget):
impl._set_parent_with_dispatch(parent)
def with_variant(self, type_, dialect_name):
"""Return a new :class:`.Variant` which adds the given
type + dialect name to the mapping, in addition to the
mapping present in this :class:`.Variant`.
:param type_: a :class:`.TypeEngine` that will be selected
as a variant from the originating type, when a dialect
of the given name is in use.
:param dialect_name: base name of the dialect which uses
this type. (i.e. ``'postgresql'``, ``'mysql'``, etc.)
"""
if dialect_name in self.mapping:
raise exc.ArgumentError(
"Dialect '%s' is already present in "
"the mapping for this Variant" % dialect_name)
mapping = self.mapping.copy()
mapping[dialect_name] = type_
return Variant(self.impl, mapping)
@property
def comparator_factory(self):
"""express comparison behavior in terms of the base type"""
return self.impl.comparator_factory
def _reconstitute_comparator(expression):
return expression.comparator
def to_instance(typeobj, *arg, **kw):
if typeobj is None:
return NULLTYPE
if util.callable(typeobj):
return typeobj(*arg, **kw)
else:
return typeobj
def adapt_type(typeobj, colspecs):
if isinstance(typeobj, type):
typeobj = typeobj()
for t in typeobj.__class__.__mro__[0:-1]:
try:
impltype = colspecs[t]
break
except KeyError:
pass
else:
# couldn't adapt - so just return the type itself
# (it may be a user-defined type)
return typeobj
# if we adapted the given generic type to a database-specific type,
# but it turns out the originally given "generic" type
# is actually a subclass of our resulting type, then we were already
# given a more specific type than that required; so use that.
if (issubclass(typeobj.__class__, impltype)):
return typeobj
return typeobj.adapt(impltype)
|
gpl-3.0
|
jbalogh/zamboni
|
apps/amo/urlresolvers.py
|
1
|
6504
|
# -*- coding: utf-8 -*-
import hashlib
import urllib
from threading import local
from urlparse import urlparse, urlsplit, urlunsplit
from django.conf import settings
from django.core import urlresolvers
from django.utils import encoding
from django.utils.translation.trans_real import parse_accept_lang_header
import jinja2
import amo
# Get a pointer to Django's reverse because we're going to hijack it after we
# define our own.
django_reverse = urlresolvers.reverse
# Thread-local storage for URL prefixes. Access with {get,set}_url_prefix.
_local = local()
def set_url_prefix(prefix):
"""Set ``prefix`` for the current thread."""
_local.prefix = prefix
def get_url_prefix():
"""Get the prefix for the current thread, or None."""
return getattr(_local, 'prefix', None)
def clean_url_prefixes():
"""Purge prefix cache."""
if hasattr(_local, 'prefix'):
delattr(_local, 'prefix')
def get_app_redirect(app):
"""Redirect request to another app."""
prefixer = get_url_prefix()
old_app = prefixer.app
prefixer.app = app.short
(_, _, url) = prefixer.split_path(prefixer.request.get_full_path())
new_url = prefixer.fix(url)
prefixer.app = old_app
return new_url
def reverse(viewname, urlconf=None, args=None, kwargs=None, prefix=None,
current_app=None, add_prefix=True):
"""Wraps django's reverse to prepend the correct locale and app."""
prefixer = get_url_prefix()
# Blank out the script prefix since we add that in prefixer.fix().
if prefixer:
prefix = prefix or '/'
url = django_reverse(viewname, urlconf, args, kwargs, prefix, current_app)
if prefixer and add_prefix:
return prefixer.fix(url)
else:
return url
# Replace Django's reverse with our own.
urlresolvers.reverse = reverse
class Prefixer(object):
def __init__(self, request):
self.request = request
split = self.split_path(request.path_info)
self.locale, self.app, self.shortened_path = split
def split_path(self, path_):
"""
Split the requested path into (locale, app, remainder).
locale and app will be empty strings if they're not found.
"""
path = path_.lstrip('/')
# Use partition instead of split since it always returns 3 parts.
first, _, first_rest = path.partition('/')
second, _, rest = first_rest.partition('/')
if first.lower() in settings.LANGUAGES:
if second in amo.APPS:
return first, second, rest
else:
return first, '', first_rest
elif first in amo.APPS:
return '', first, first_rest
else:
if second in amo.APPS:
return '', second, rest
else:
return '', '', path
def get_app(self):
"""
Return a valid application string using the User Agent to guess. Falls
back to settings.DEFAULT_APP.
"""
ua = self.request.META.get('HTTP_USER_AGENT')
if ua:
for app in amo.APP_DETECT:
if app.user_agent_string in ua:
return app.short
return settings.DEFAULT_APP
def get_language(self):
"""
Return a locale code that we support on the site using the
user's Accept Language header to determine which is best. This
mostly follows the RFCs but read bug 439568 for details.
"""
if 'lang' in self.request.GET:
lang = self.request.GET['lang'].lower()
if lang in settings.LANGUAGE_URL_MAP:
return settings.LANGUAGE_URL_MAP[lang]
accept = self.request.META.get('HTTP_ACCEPT_LANGUAGE', '')
return lang_from_accept_header(accept)
def fix(self, path):
path = path.lstrip('/')
url_parts = [self.request.META['SCRIPT_NAME']]
if path.partition('/')[0] not in settings.SUPPORTED_NONLOCALES:
locale = self.locale if self.locale else self.get_language()
url_parts.append(locale)
if path.partition('/')[0] not in settings.SUPPORTED_NONAPPS:
app = self.app if self.app else self.get_app()
url_parts.append(app)
url_parts.append(path)
return '/'.join(url_parts)
def get_outgoing_url(url):
"""
Bounce a URL off an outgoing URL redirector, such as outgoing.mozilla.org.
"""
if not settings.REDIRECT_URL:
return url
# no double-escaping
if urlparse(url).netloc == urlparse(settings.REDIRECT_URL).netloc:
return url
url = encoding.smart_str(jinja2.utils.Markup(url).unescape())
hash = hashlib.sha1(settings.REDIRECT_SECRET_KEY + url).hexdigest()
# Let '&=' through so query params aren't escaped. We probably shouldn't
# bother to quote the query part at all.
return '/'.join([settings.REDIRECT_URL.rstrip('/'), hash,
urllib.quote(url, safe='/&=')])
def url_fix(s, charset='utf-8'):
"""Sometimes you get an URL by a user that just isn't a real
URL because it contains unsafe characters like ' ' and so on. This
function can fix some of the problems in a similar way browsers
handle data entered by the user:
>>> url_fix(u'http://de.wikipedia.org/wiki/Elf (Begriffsklärung)')
'http://de.wikipedia.org/wiki/Elf%20%28Begriffskl%C3%A4rung%29'
:param charset: The target charset for the URL if the url was
given as unicode string.
Lifted from Werkzeug.
"""
if isinstance(s, unicode):
s = s.encode(charset, 'ignore')
scheme, netloc, path, qs, anchor = urlsplit(s)
path = urllib.quote(path, '/%:')
qs = urllib.quote_plus(qs, ':&=')
return urlunsplit((scheme, netloc, path, qs, anchor))
def lang_from_accept_header(header):
# Map all our lang codes and any prefixes to the locale code.
langs = [(k.lower(), v) for k, v in settings.LANGUAGE_URL_MAP.items()]
# Start with prefixes so any real matches override them.
lang_url_map = dict((k.split('-')[0], v) for k, v in langs)
lang_url_map.update(langs)
# If we have a lang or a prefix of the lang, return the locale code.
for lang, _ in parse_accept_lang_header(header.lower()):
if lang in lang_url_map:
return lang_url_map[lang]
prefix = lang.split('-')[0]
if prefix in lang_url_map:
return lang_url_map[prefix]
return settings.LANGUAGE_CODE
|
bsd-3-clause
|
Geosyntec/dockside
|
setup.py
|
1
|
1419
|
# Setup script for the wqio package
#
# Usage: python setup.py install
#
import os
from setuptools import setup, find_packages
DESCRIPTION = (
"dockside: A python utility to download United States "
"Geological Survey (USGS) National Water Information System (NWIS) data"
)
LONG_DESCRIPTION = DESCRIPTION
NAME = "dockside"
VERSION = "0.1.1"
AUTHOR = "Lucas Nguyen (Geosyntec Consultants)"
AUTHOR_EMAIL = "[email protected]"
URL = "https://github.com/Geosyntec/pynwis"
DOWNLOAD_URL = "https://github.com/Geosyntec/pynwis/archive/master.zip"
LICENSE = "BSD 3-clause"
PLATFORMS = "Python 3.6 and later."
CLASSIFIERS = [
"License :: OSI Approved :: BSD License",
"Operating System :: OS Independent",
"Programming Language :: Python",
"Intended Audience :: Science/Research",
"Topic :: Software Development :: Libraries :: Python Modules",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
]
INSTALL_REQUIRES = ["pandas", "requests"]
PACKAGE_DATA = {}
setup(
name=NAME,
version=VERSION,
author=AUTHOR,
author_email=AUTHOR_EMAIL,
url=URL,
description=DESCRIPTION,
long_description=LONG_DESCRIPTION,
download_url=DOWNLOAD_URL,
license=LICENSE,
package_data=PACKAGE_DATA,
platforms=PLATFORMS,
classifiers=CLASSIFIERS,
install_requires=INSTALL_REQUIRES,
)
|
bsd-3-clause
|
hexlism/xx_net
|
launcher/web_control.py
|
1
|
19797
|
#!/usr/bin/env python
# coding:utf-8
import os, sys
current_path = os.path.dirname(os.path.abspath(__file__))
if __name__ == "__main__":
python_path = os.path.abspath( os.path.join(current_path, os.pardir, 'python27', '1.0'))
noarch_lib = os.path.abspath( os.path.join(python_path, 'lib', 'noarch'))
sys.path.append(noarch_lib)
import re
import socket, ssl
import urlparse
import threading
import urllib2
import time
root_path = os.path.abspath(os.path.join(current_path, os.pardir))
import yaml
import json
from instances import xlog
import module_init
import config
import autorun
import update_from_github
import simple_http_server
from simple_i18n import SimpleI18N
NetWorkIOError = (socket.error, ssl.SSLError, OSError)
i18n_translator = SimpleI18N(config.get(['language'], None))
module_menus = {}
class Http_Handler(simple_http_server.HttpServerHandler):
deploy_proc = None
def load_module_menus(self):
global module_menus
new_module_menus = {}
#config.load()
modules = config.get(['modules'], None)
for module in modules:
values = modules[module]
if module != "launcher" and config.get(["modules", module, "auto_start"], 0) != 1: # skip php_proxy module
continue
#version = values["current_version"]
menu_path = os.path.join(root_path, module, "web_ui", "menu.yaml") # launcher & gae_proxy modules
if not os.path.isfile(menu_path):
continue
# i18n code lines (Both the locale dir & the template dir are module-dependent)
locale_dir = os.path.abspath(os.path.join(root_path, module, 'lang'))
stream = i18n_translator.render(locale_dir, menu_path)
module_menu = yaml.load(stream)
new_module_menus[module] = module_menu
module_menus = sorted(new_module_menus.iteritems(), key=lambda (k,v): (v['menu_sort_id']))
#for k,v in self.module_menus:
# logging.debug("m:%s id:%d", k, v['menu_sort_id'])
def do_POST(self):
refer = self.headers.getheader('Referer')
if refer:
refer_loc = urlparse.urlparse(refer).netloc
host = self.headers.getheader('host')
if refer_loc != host:
xlog.warn("web control ref:%s host:%s", refer_loc, host)
return
#url_path = urlparse.urlparse(self.path).path
url_path_list = self.path.split('/')
if len(url_path_list) >= 3 and url_path_list[1] == "module":
module = url_path_list[2]
if len(url_path_list) >= 4 and url_path_list[3] == "control":
if module not in module_init.proc_handler:
xlog.warn("request %s no module in path", self.path)
self.send_not_found()
return
path = '/' + '/'.join(url_path_list[4:])
controler = module_init.proc_handler[module]["imp"].local.web_control.ControlHandler(self.client_address, self.headers, self.command, path, self.rfile, self.wfile)
controler.do_POST()
return
def do_GET(self):
refer = self.headers.getheader('Referer')
if refer:
refer_loc = urlparse.urlparse(refer).netloc
host = self.headers.getheader('host')
if refer_loc != host:
xlog.warn("web control ref:%s host:%s", refer_loc, host)
return
# check for '..', which will leak file
if re.search(r'(\.{2})', self.path) is not None:
self.wfile.write(b'HTTP/1.1 404\r\n\r\n')
xlog.warn('%s %s %s haking', self.address_string(), self.command, self.path )
return
url_path = urlparse.urlparse(self.path).path
if url_path == '/':
return self.req_index_handler()
url_path_list = self.path.split('/')
if len(url_path_list) >= 3 and url_path_list[1] == "module":
module = url_path_list[2]
if len(url_path_list) >= 4 and url_path_list[3] == "control":
if module not in module_init.proc_handler:
xlog.warn("request %s no module in path", url_path)
self.send_not_found()
return
if "imp" not in module_init.proc_handler[module]:
xlog.warn("request module:%s start fail", module)
self.send_not_found()
return
path = '/' + '/'.join(url_path_list[4:])
controler = module_init.proc_handler[module]["imp"].local.web_control.ControlHandler(self.client_address, self.headers, self.command, path, self.rfile, self.wfile)
controler.do_GET()
return
else:
relate_path = '/'.join(url_path_list[3:])
file_path = os.path.join(root_path, module, "web_ui", relate_path)
if not os.path.isfile(file_path):
return self.send_not_found()
# i18n code lines (Both the locale dir & the template dir are module-dependent)
locale_dir = os.path.abspath(os.path.join(root_path, module, 'lang'))
content = i18n_translator.render(locale_dir, file_path)
return self.send_response('text/html', content)
else:
file_path = os.path.join(current_path, 'web_ui' + url_path)
xlog.debug ('launcher web_control %s %s %s ', self.address_string(), self.command, self.path)
if os.path.isfile(file_path):
if file_path.endswith('.js'):
mimetype = 'application/javascript'
elif file_path.endswith('.css'):
mimetype = 'text/css'
elif file_path.endswith('.html'):
mimetype = 'text/html'
elif file_path.endswith('.jpg'):
mimetype = 'image/jpeg'
elif file_path.endswith('.png'):
mimetype = 'image/png'
else:
mimetype = 'text/plain'
self.send_file(file_path, mimetype)
elif url_path == '/config':
self.req_config_handler()
elif url_path == '/download':
self.req_download_handler()
elif url_path == '/init_module':
self.req_init_module_handler()
elif url_path == '/quit':
self.send_response('text/html', '{"status":"success"}')
module_init.stop_all()
os._exit(0)
elif url_path == '/restart':
self.send_response('text/html', '{"status":"success"}')
update_from_github.restart_xxnet()
else:
self.send_not_found()
xlog.info('%s "%s %s HTTP/1.1" 404 -', self.address_string(), self.command, self.path)
def req_index_handler(self):
req = urlparse.urlparse(self.path).query
reqs = urlparse.parse_qs(req, keep_blank_values=True)
try:
target_module = reqs['module'][0]
target_menu = reqs['menu'][0]
except:
if config.get(['modules', 'gae_proxy', 'auto_start'], 0) == 1:
target_module = 'gae_proxy'
target_menu = 'status'
else:
target_module = 'launcher'
target_menu = 'about'
if len(module_menus) == 0:
self.load_module_menus()
# i18n code lines (Both the locale dir & the template dir are module-dependent)
locale_dir = os.path.abspath(os.path.join(current_path, 'lang'))
index_content = i18n_translator.render(locale_dir, os.path.join(current_path, "web_ui", "index.html"))
menu_content = ''
for module,v in module_menus:
#logging.debug("m:%s id:%d", module, v['menu_sort_id'])
title = v["module_title"]
menu_content += '<li class="nav-header">%s</li>\n' % title
for sub_id in v['sub_menus']:
sub_title = v['sub_menus'][sub_id]['title']
sub_url = v['sub_menus'][sub_id]['url']
if target_module == module and target_menu == sub_url:
active = 'class="active"'
else:
active = ''
menu_content += '<li %s><a href="/?module=%s&menu=%s">%s</a></li>\n' % (active, module, sub_url, sub_title)
right_content_file = os.path.join(root_path, target_module, "web_ui", target_menu + ".html")
if os.path.isfile(right_content_file):
# i18n code lines (Both the locale dir & the template dir are module-dependent)
locale_dir = os.path.abspath(os.path.join(root_path, target_module, 'lang'))
right_content = i18n_translator.render(locale_dir, os.path.join(root_path, target_module, "web_ui", target_menu + ".html"))
else:
right_content = ""
data = (index_content.decode('utf-8') % (menu_content, right_content.decode('utf-8') )).encode('utf-8')
self.send_response('text/html', data)
def req_config_handler(self):
req = urlparse.urlparse(self.path).query
reqs = urlparse.parse_qs(req, keep_blank_values=True)
data = ''
current_version = update_from_github.current_version()
if reqs['cmd'] == ['get_config']:
config.load()
check_update = config.get(["update", "check_update"], 1)
if check_update == 0:
check_update = "dont-check"
elif check_update == 1:
check_update = "stable"
data = '{ "check_update": "%s", "language": "%s", "popup_webui": %d, "allow_remote_connect": %d, "show_systray": %d, "auto_start": %d, "php_enable": %d, "gae_proxy_enable": %d }' %\
(check_update
, config.get(["language"], i18n_translator.lang)
, config.get(["modules", "launcher", "popup_webui"], 1)
, config.get(["modules", "launcher", "allow_remote_connect"], 0)
, config.get(["modules", "launcher", "show_systray"], 1)
, config.get(["modules", "launcher", "auto_start"], 0)
, config.get(["modules", "php_proxy", "auto_start"], 0)
, config.get(["modules", "gae_proxy", "auto_start"], 0))
elif reqs['cmd'] == ['set_config']:
if 'check_update' in reqs:
check_update = reqs['check_update'][0]
if check_update not in ["dont-check", "stable", "test"]:
data = '{"res":"fail, check_update:%s"}' % check_update
else:
config.set(["update", "check_update"], check_update)
config.save()
data = '{"res":"success"}'
elif 'language' in reqs:
language = reqs['language'][0]
if language not in i18n_translator.get_valid_languages():
data = '{"res":"fail, language:%s"}' % language
else:
config.set(["language"], language)
config.save()
i18n_translator.lang = language
self.load_module_menus()
data = '{"res":"success"}'
elif 'popup_webui' in reqs:
popup_webui = int(reqs['popup_webui'][0])
if popup_webui != 0 and popup_webui != 1:
data = '{"res":"fail, popup_webui:%s"}' % popup_webui
else:
config.set(["modules", "launcher", "popup_webui"], popup_webui)
config.save()
data = '{"res":"success"}'
elif 'allow_remote_connect' in reqs:
allow_remote_connect = int(reqs['allow_remote_connect'][0])
if allow_remote_connect != 0 and allow_remote_connect != 1:
data = '{"res":"fail, allow_remote_connect:%s"}' % allow_remote_connect
else:
config.set(["modules", "launcher", "allow_remote_connect"], allow_remote_connect)
config.save()
data = '{"res":"success"}'
xlog.debug("restart web control.")
stop()
time.sleep(1)
start()
xlog.debug("launcher web control restarted.")
elif 'show_systray' in reqs:
show_systray = int(reqs['show_systray'][0])
if show_systray != 0 and show_systray != 1:
data = '{"res":"fail, show_systray:%s"}' % show_systray
else:
config.set(["modules", "launcher", "show_systray"], show_systray)
config.save()
data = '{"res":"success"}'
elif 'auto_start' in reqs:
auto_start = int(reqs['auto_start'][0])
if auto_start != 0 and auto_start != 1:
data = '{"res":"fail, auto_start:%s"}' % auto_start
else:
if auto_start:
autorun.enable()
else:
autorun.disable()
config.set(["modules", "launcher", "auto_start"], auto_start)
config.save()
data = '{"res":"success"}'
elif 'gae_proxy_enable' in reqs :
gae_proxy_enable = int(reqs['gae_proxy_enable'][0])
if gae_proxy_enable != 0 and gae_proxy_enable != 1:
data = '{"res":"fail, gae_proxy_enable:%s"}' % gae_proxy_enable
else:
config.set(["modules", "gae_proxy", "auto_start"], gae_proxy_enable)
config.save()
if gae_proxy_enable:
module_init.start("gae_proxy")
else:
module_init.stop("gae_proxy")
self.load_module_menus()
data = '{"res":"success"}'
elif 'php_enable' in reqs :
php_enable = int(reqs['php_enable'][0])
if php_enable != 0 and php_enable != 1:
data = '{"res":"fail, php_enable:%s"}' % php_enable
else:
config.set(["modules", "php_proxy", "auto_start"], php_enable)
config.save()
if php_enable:
module_init.start("php_proxy")
else:
module_init.stop("php_proxy")
self.load_module_menus()
data = '{"res":"success"}'
else:
data = '{"res":"fail"}'
elif reqs['cmd'] == ['get_new_version']:
versions = update_from_github.get_github_versions()
data = '{"res":"success", "test_version":"%s", "stable_version":"%s", "current_version":"%s"}' % (versions[0][1], versions[1][1], current_version)
xlog.info("%s", data)
elif reqs['cmd'] == ['update_version']:
version = reqs['version'][0]
try:
update_from_github.update_version(version)
data = '{"res":"success"}'
except Exception as e:
xlog.info("update_test_version fail:%r", e)
data = '{"res":"fail", "error":"%s"}' % e
self.send_response('text/html', data)
def req_download_handler(self):
req = urlparse.urlparse(self.path).query
reqs = urlparse.parse_qs(req, keep_blank_values=True)
data = ''
if reqs['cmd'] == ['get_progress']:
data = json.dumps(update_from_github.download_progress)
self.send_response('text/html', data)
def req_init_module_handler(self):
req = urlparse.urlparse(self.path).query
reqs = urlparse.parse_qs(req, keep_blank_values=True)
data = ''
try:
module = reqs['module'][0]
config.load()
if reqs['cmd'] == ['start']:
result = module_init.start(module)
data = '{ "module": "%s", "cmd": "start", "result": "%s" }' % (module, result)
elif reqs['cmd'] == ['stop']:
result = module_init.stop(module)
data = '{ "module": "%s", "cmd": "stop", "result": "%s" }' % (module, result)
elif reqs['cmd'] == ['restart']:
result_stop = module_init.stop(module)
result_start = module_init.start(module)
data = '{ "module": "%s", "cmd": "restart", "stop_result": "%s", "start_result": "%s" }' % (module, result_stop, result_start)
except Exception as e:
xlog.exception("init_module except:%s", e)
self.send_response("text/html", data)
process = 0
server = 0
def start():
global process, server
# should use config.yaml to bing ip
allow_remote = config.get(["modules", "launcher", "allow_remote_connect"], 0)
host_port = config.get(["modules", "launcher", "control_port"], 8085)
if allow_remote:
host_addr = "0.0.0.0"
else:
host_addr = "127.0.0.1"
xlog.info("begin to start web control")
server = simple_http_server.HTTPServer((host_addr, host_port), Http_Handler)
process = threading.Thread(target=server.serve_forever)
process.setDaemon(True)
process.start()
xlog.info("launcher web control started.")
def stop():
global process, server
if process == 0:
return
xlog.info("begin to exit web control")
server.shutdown()
server.server_close()
process.join()
xlog.info("launcher web control exited.")
process = 0
def http_request(url, method="GET"):
proxy_handler = urllib2.ProxyHandler({})
opener = urllib2.build_opener(proxy_handler)
try:
req = opener.open(url, timeout=30)
return req
except Exception as e:
#logging.exception("web_control http_request:%s fail:%s", url, e)
return False
def confirm_xxnet_exit():
"""suppose xxnet is running, try to close it
"""
is_xxnet_exit = False
xlog.debug("start confirm_xxnet_exit")
for i in range(30):
# gae_proxy(default port:8087)
if http_request("http://127.0.0.1:8087/quit") == False:
xlog.debug("good, xxnet:8087 cleared!")
is_xxnet_exit = True
break
else:
xlog.debug("<%d>: try to terminate xxnet:8087" % i)
time.sleep(1)
for i in range(30):
# web_control(default port:8085)
host_port = config.get(["modules", "launcher", "control_port"], 8085)
req_url = "http://127.0.0.1:{port}/quit".format(port=host_port)
if http_request(req_url) == False:
xlog.debug("good, xxnet:%s clear!" % host_port)
is_xxnet_exit = True
break
else:
xlog.debug("<%d>: try to terminate xxnet:%s" % (i, host_port))
time.sleep(1)
xlog.debug("finished confirm_xxnet_exit")
return is_xxnet_exit
def confirm_module_ready(port):
if port == 0:
xlog.error("confirm_module_ready with port 0")
time.sleep(1)
return False
for i in range(200):
req = http_request("http://127.0.0.1:%d/is_ready" % port)
if req == False:
time.sleep(1)
continue
content = req.read(1024)
req.close()
#logging.debug("cert_import_ready return:%s", content)
if content == "True":
return True
else:
time.sleep(1)
return False
if __name__ == "__main__":
#confirm_xxnet_exit()
http_request("http://getbootstrap.com/dist/js/bootstrap.min.js")
|
bsd-2-clause
|
slashk/goldstone-server
|
goldstone/drfes/filters.py
|
2
|
4350
|
"""DRFES Filters."""
# Copyright 2015 Solinea, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from rest_framework.filters import BaseFilterBackend
class ElasticFilter(BaseFilterBackend):
"""A basic query filter for ES query specification.
Everything will be treated as query enhancements (rather than filters) from
an ES perspective. Conditionals other than AND are not currently
supported.
"""
@staticmethod
def _add_query(param, value, view, queryset, operation='match'):
"""Return a query, preferring the raw field if available.
:param param: the field name in ES
:param value: the field value
:param view: the calling view
:param queryset: the base queryset
:param operation: the query operation
:return: the update Search object
:rtype Search
"""
if view.Meta.model.field_has_raw(param):
param += ".raw"
return queryset.query(operation, **{param: value})
@staticmethod
def _coerce_value(value):
"""Attempt to coerce a query parameter's value to a more accurate type.
:param value: The value for a query parameter
:type value: str
:return: The original value, possibly coerced by AST
"""
import ast
try:
return ast.literal_eval(value)
except (ValueError, SyntaxError):
return value
def filter_queryset(self, request, queryset, view):
"""Return the queryset enhanced with additional specificity, as
determined by the request's query parameters.
The returned queryset is effectively an AND of the conditions.
For "<term> = <value>" parameters, the value is lowercased if term is a
kind of regular expression parameter. E.g., _all__regexp. Note,
lowercasing isn't done for "'regexp': <dict>" parameters.
:param request: The HTTP request
:type request: Request
:param queryset: The base queryset
:type queryset: Search
:param view: The view
:type view: callable
:return: The base queryset enhanced with additional queries
:rtype: Search
"""
from django.db.models.constants import LOOKUP_SEP
reserved_params = view.reserved_params + \
[view.pagination_class.page_query_param,
view.pagination_class.page_size_query_param]
for param in request.query_params:
# We don't want these in our queryset.
if param in reserved_params:
continue
value = self._coerce_value(request.query_params.get(param))
split_param = param.split(LOOKUP_SEP)
if len(split_param) == 1:
# This is a field = value term.
if split_param[0] in ["regexp", "terms"]:
# The terms and regexp "fields" have a value of a dict of
# field:value terms.
queryset = queryset.query(split_param[0], **value)
else:
# This is a standard match query.
queryset = self._add_query(param, value, view, queryset)
else:
# First term is the field, second term is the query operation.
param = split_param[0]
operation = split_param[1]
# Lowercase the value if it's a regular expression.
if operation == "regexp":
value = value.lower() # pylint: disable=E1101
queryset = self._add_query(param,
value,
view,
queryset,
operation)
return queryset
|
apache-2.0
|
vitmod/dvbapp
|
skin.py
|
1
|
32930
|
from Tools.Profile import profile
profile("LOAD:ElementTree")
import xml.etree.cElementTree
import os
profile("LOAD:enigma_skin")
from enigma import eSize, ePoint, eRect, gFont, eWindow, eLabel, ePixmap, eWindowStyleManager, \
addFont, gRGB, eWindowStyleSkinned, getDesktop
from Components.config import ConfigSubsection, ConfigText, config
from Components.Converter.Converter import Converter
from Components.Sources.Source import Source, ObsoleteSource
from Tools.Directories import resolveFilename, SCOPE_SKIN, SCOPE_FONTS, SCOPE_CURRENT_SKIN, SCOPE_CONFIG, fileExists, SCOPE_SKIN_IMAGE
from Tools.Import import my_import
from Tools.LoadPixmap import LoadPixmap
from Components.RcModel import rc_model
colorNames = {}
# Predefined fonts, typically used in built-in screens and for components like
# the movie list and so.
fonts = {
"Body": ("Regular", 18, 22, 16),
"ChoiceList": ("Regular", 20, 24, 18),
}
parameters = {}
def dump(x, i=0):
print " " * i + str(x)
try:
for n in x.childNodes:
dump(n, i + 1)
except:
None
class SkinError(Exception):
def __init__(self, message):
self.msg = message
def __str__(self):
return "{%s}: %s. Please contact the skin's author!" % (config.skin.primary_skin.value, self.msg)
dom_skins = [ ]
def addSkin(name, scope = SCOPE_SKIN):
# read the skin
filename = resolveFilename(scope, name)
if fileExists(filename):
mpath = os.path.dirname(filename) + "/"
try:
dom_skins.append((mpath, xml.etree.cElementTree.parse(filename).getroot()))
except:
print "[SKIN ERROR] error in %s" % filename
return False
else:
return True
return False
# get own skin_user_skinname.xml file, if exist
def skin_user_skinname():
name = "skin_user_" + config.skin.primary_skin.value[:config.skin.primary_skin.value.rfind('/')] + ".xml"
filename = resolveFilename(SCOPE_CONFIG, name)
if fileExists(filename):
return name
return None
# we do our best to always select the "right" value
# skins are loaded in order of priority: skin with
# highest priority is loaded last, usually the user-provided
# skin.
# currently, loadSingleSkinData (colors, bordersets etc.)
# are applied one-after-each, in order of ascending priority.
# the dom_skin will keep all screens in descending priority,
# so the first screen found will be used.
# example: loadSkin("nemesis_greenline/skin.xml")
config.skin = ConfigSubsection()
DEFAULT_SKIN = "PLi-HD/skin.xml"
# on SD hardware, PLi-HD will not be available
if not fileExists(resolveFilename(SCOPE_SKIN, DEFAULT_SKIN)):
# in that case, fallback to Magic (which is an SD skin)
DEFAULT_SKIN = "Magic/skin.xml"
config.skin.primary_skin = ConfigText(default=DEFAULT_SKIN)
profile("LoadSkin")
res = None
name = skin_user_skinname()
if name:
res = addSkin(name, SCOPE_CONFIG)
if not name or not res:
addSkin('skin_user.xml', SCOPE_CONFIG)
# some boxes lie about their dimensions
addSkin('skin_box.xml')
# add optional discrete second infobar
addSkin('skin_second_infobar.xml')
# Only one of these is present, compliments of AM_CONDITIONAL
DEFAULT_DISPLAY_SKIN = "skin_display.xml"
config.skin.display_skin = ConfigText(default=DEFAULT_DISPLAY_SKIN)
display_skin_id = 1
try:
print "loading display skin ", config.skin.display_skin.value
if not addSkin(os.path.join('display', config.skin.display_skin.value)):
raise DisplaySkinError, "display skin not found"
except Exception, err:
print "SKIN ERROR:", err
skin = DEFAULT_DISPLAY_SKIN
print "defaulting to standard display skin...", skin
config.skin.display_skin.value = skin
addSkin(os.path.join('display', skin))
del skin
if addSkin(os.path.join('display', 'skin_display96.xml')):
# Color OLED
display_skin_id = 2
addSkin('skin_text.xml')
addSkin('skin_subtitles.xml')
try:
if not addSkin(config.skin.primary_skin.value):
raise SkinError, "primary skin not found"
except Exception, err:
print "SKIN ERROR:", err
skin = DEFAULT_SKIN
if config.skin.primary_skin.value == skin:
skin = 'skin.xml'
print "defaulting to standard skin...", skin
config.skin.primary_skin.value = skin
addSkin(skin)
del skin
addSkin('skin_default.xml')
profile("LoadSkinDefaultDone")
def parseCoordinate(s, e, size=0, font=None):
s = s.strip()
if s == "center":
val = (e - size)/2
elif s == '*':
return None
else:
if s[0] is 'e':
val = e
s = s[1:]
elif s[0] is 'c':
val = e/2
s = s[1:]
else:
val = 0;
if s:
if s[-1] is '%':
val += e * int(s[:-1]) / 100
elif s[-1] is 'w':
val += fonts[font][3] * int(s[:-1]);
elif s[-1] is 'h':
val += fonts[font][2] * int(s[:-1]);
else:
val += int(s)
if val < 0:
val = 0
return val
def getParentSize(object, desktop):
size = eSize()
if object:
parent = object.getParent()
# For some widgets (e.g. ScrollLabel) the skin attributes are applied to
# a child widget, instead of to the widget itself. In that case, the parent
# we have here is not the real parent, but it is the main widget.
# We have to go one level higher to get the actual parent.
# We can detect this because the 'parent' will not have a size yet
# (the main widget's size will be calculated internally, as soon as the child
# widget has parsed the skin attributes)
if parent and parent.size().isEmpty():
parent = parent.getParent()
if parent:
size = parent.size()
elif desktop:
#widget has no parent, use desktop size instead for relative coordinates
size = desktop.size()
return size
def parsePosition(s, scale, object = None, desktop = None, size = None):
x, y = s.split(',')
parentsize = eSize()
if object and (x[0] in ('c', 'e') or y[0] in ('c', 'e')):
parentsize = getParentSize(object, desktop)
xval = parseCoordinate(x, parentsize.width(), size and size.width())
yval = parseCoordinate(y, parentsize.height(), size and size.height())
return ePoint(xval * scale[0][0] / scale[0][1], yval * scale[1][0] / scale[1][1])
def parseSize(s, scale, object = None, desktop = None):
x, y = s.split(',')
parentsize = eSize()
if object and (x[0] in ('c', 'e') or y[0] in ('c', 'e')):
parentsize = getParentSize(object, desktop)
xval = parseCoordinate(x, parentsize.width())
yval = parseCoordinate(y, parentsize.height())
return eSize(xval * scale[0][0] / scale[0][1], yval * scale[1][0] / scale[1][1])
def parseFont(s, scale):
try:
f = fonts[s]
name = f[0]
size = f[1]
except:
name, size = s.split(';')
return gFont(name, int(size) * scale[0][0] / scale[0][1])
def parseColor(s):
if s[0] != '#':
try:
return colorNames[s]
except:
raise SkinError("color '%s' must be #aarrggbb or valid named color" % (s))
return gRGB(int(s[1:], 0x10))
def collectAttributes(skinAttributes, node, context, skin_path_prefix=None, ignore=(), filenames=frozenset(("pixmap", "pointer", "seek_pointer", "backgroundPixmap", "selectionPixmap", "sliderPixmap", "scrollbarbackgroundPixmap"))):
# walk all attributes
size = None
pos = None
font = None
for attrib, value in node.items():
if attrib not in ignore:
if attrib in filenames:
value = resolveFilename(SCOPE_CURRENT_SKIN, value, path_prefix=skin_path_prefix)
# Bit of a hack this, really. When a window has a flag (e.g. wfNoBorder)
# it needs to be set at least before the size is set, in order for the
# window dimensions to be calculated correctly in all situations.
# If wfNoBorder is applied after the size has been set, the window will fail to clear the title area.
# Similar situation for a scrollbar in a listbox; when the scrollbar setting is applied after
# the size, a scrollbar will not be shown until the selection moves for the first time
if attrib == 'size':
size = value.encode("utf-8")
elif attrib == 'position':
pos = value.encode("utf-8")
elif attrib == 'font':
font = value.encode("utf-8")
skinAttributes.append((attrib, font))
else:
skinAttributes.append((attrib, value.encode("utf-8")))
if pos is not None:
pos, size = context.parse(pos, size, font)
skinAttributes.append(('position', pos))
if size is not None:
skinAttributes.append(('size', size))
def morphRcImagePath(value):
if rc_model.rcIsDefault() is False:
if value == '/usr/share/enigma2/skin_default/rc.png' or value == '/usr/share/enigma2/skin_default/rcold.png':
value = rc_model.getRcLocation() + 'rc.png'
elif value == '/usr/share/enigma2/skin_default/rc0.png' or value == '/usr/share/enigma2/skin_default/rc1.png' or value == '/usr/share/enigma2/skin_default/rc2.png':
value = rc_model.getRcLocation() + 'rc.png'
return value
def loadPixmap(path, desktop):
option = path.find("#")
if option != -1:
path = path[:option]
ptr = LoadPixmap(morphRcImagePath(path), desktop)
if ptr is None:
raise SkinError("pixmap file %s not found!" % (path))
return ptr
class AttributeParser:
def __init__(self, guiObject, desktop, scale=((1,1),(1,1))):
self.guiObject = guiObject
self.desktop = desktop
self.scaleTuple = scale
def applyOne(self, attrib, value):
try:
getattr(self, attrib)(value)
except AttributeError:
print "[Skin] Attribute not implemented:", attrib, "value:", value
except SkinError, ex:
print "[Skin] Error:", ex
except:
print "[Skin] Error:", attrib
def applyAll(self, attrs):
for attrib, value in attrs:
self.applyOne(attrib, value)
def conditional(self, value):
pass
def position(self, value):
if isinstance(value, tuple):
self.guiObject.move(ePoint(*value))
else:
self.guiObject.move(parsePosition(value, self.scaleTuple, self.guiObject, self.desktop, self.guiObject.csize()))
def size(self, value):
if isinstance(value, tuple):
self.guiObject.resize(eSize(*value))
else:
self.guiObject.resize(parseSize(value, self.scaleTuple, self.guiObject, self.desktop))
def animationPaused(self, value):
pass
def animationMode(self, value):
self.guiObject.setAnimationMode(
{ "disable": 0x00,
"off": 0x00,
"offshow": 0x10,
"offhide": 0x01,
"onshow": 0x01,
"onhide": 0x10,
}[value])
def title(self, value):
self.guiObject.setTitle(_(value))
def text(self, value):
self.guiObject.setText(_(value))
def font(self, value):
self.guiObject.setFont(parseFont(value, self.scaleTuple))
def zPosition(self, value):
self.guiObject.setZPosition(int(value))
def itemHeight(self, value):
self.guiObject.setItemHeight(int(value))
def pixmap(self, value):
ptr = loadPixmap(value, self.desktop)
self.guiObject.setPixmap(ptr)
def backgroundPixmap(self, value):
ptr = loadPixmap(value, self.desktop)
self.guiObject.setBackgroundPicture(ptr)
def selectionPixmap(self, value):
ptr = loadPixmap(value, self.desktop)
self.guiObject.setSelectionPicture(ptr)
def sliderPixmap(self, value):
ptr = loadPixmap(value, self.desktop)
self.guiObject.setSliderPicture(ptr)
def scrollbarbackgroundPixmap(self, value):
ptr = loadPixmap(value, self.desktop)
self.guiObject.setScrollbarBackgroundPicture(ptr)
def alphatest(self, value):
self.guiObject.setAlphatest(
{ "on": 1,
"off": 0,
"blend": 2,
}[value])
def scale(self, value):
self.guiObject.setScale(1)
def orientation(self, value): # used by eSlider
try:
self.guiObject.setOrientation(*
{ "orVertical": (self.guiObject.orVertical, False),
"orTopToBottom": (self.guiObject.orVertical, False),
"orBottomToTop": (self.guiObject.orVertical, True),
"orHorizontal": (self.guiObject.orHorizontal, False),
"orLeftToRight": (self.guiObject.orHorizontal, False),
"orRightToLeft": (self.guiObject.orHorizontal, True),
}[value])
except KeyError:
print "oprientation must be either orVertical or orHorizontal!"
def valign(self, value):
try:
self.guiObject.setVAlign(
{ "top": self.guiObject.alignTop,
"center": self.guiObject.alignCenter,
"bottom": self.guiObject.alignBottom
}[value])
except KeyError:
print "valign must be either top, center or bottom!"
def halign(self, value):
try:
self.guiObject.setHAlign(
{ "left": self.guiObject.alignLeft,
"center": self.guiObject.alignCenter,
"right": self.guiObject.alignRight,
"block": self.guiObject.alignBlock
}[value])
except KeyError:
print "halign must be either left, center, right or block!"
def textOffset(self, value):
x, y = value.split(',')
self.guiObject.setTextOffset(ePoint(int(x) * self.scaleTuple[0][0] / self.scaleTuple[0][1], int(y) * self.scaleTuple[1][0] / self.scaleTuple[1][1]))
def flags(self, value):
flags = value.split(',')
for f in flags:
try:
fv = eWindow.__dict__[f]
self.guiObject.setFlag(fv)
except KeyError:
print "illegal flag %s!" % f
def backgroundColor(self, value):
self.guiObject.setBackgroundColor(parseColor(value))
def backgroundColorSelected(self, value):
self.guiObject.setBackgroundColorSelected(parseColor(value))
def foregroundColor(self, value):
self.guiObject.setForegroundColor(parseColor(value))
def foregroundColorSelected(self, value):
self.guiObject.setForegroundColorSelected(parseColor(value))
def shadowColor(self, value):
self.guiObject.setShadowColor(parseColor(value))
def selectionDisabled(self, value):
self.guiObject.setSelectionEnable(0)
def transparent(self, value):
self.guiObject.setTransparent(int(value))
def borderColor(self, value):
self.guiObject.setBorderColor(parseColor(value))
def borderWidth(self, value):
self.guiObject.setBorderWidth(int(value))
def scrollbarMode(self, value):
self.guiObject.setScrollbarMode(getattr(self.guiObject, value))
# { "showOnDemand": self.guiObject.showOnDemand,
# "showAlways": self.guiObject.showAlways,
# "showNever": self.guiObject.showNever,
# "showLeft": self.guiObject.showLeft
# }[value])
def enableWrapAround(self, value):
self.guiObject.setWrapAround(True)
def itemHeight(self, value):
self.guiObject.setItemHeight(int(value))
def pointer(self, value):
(name, pos) = value.split(':')
pos = parsePosition(pos, self.scaleTuple)
ptr = loadPixmap(name, self.desktop)
self.guiObject.setPointer(0, ptr, pos)
def seek_pointer(self, value):
(name, pos) = value.split(':')
pos = parsePosition(pos, self.scaleTuple)
ptr = loadPixmap(name, self.desktop)
self.guiObject.setPointer(1, ptr, pos)
def shadowOffset(self, value):
self.guiObject.setShadowOffset(parsePosition(value, self.scaleTuple))
def noWrap(self, value):
self.guiObject.setNoWrap(1)
def applySingleAttribute(guiObject, desktop, attrib, value, scale = ((1,1),(1,1))):
# Someone still using applySingleAttribute?
AttributeParser(guiObject, desktop, scale).applyOne(attrib, value)
def applyAllAttributes(guiObject, desktop, attributes, scale):
AttributeParser(guiObject, desktop, scale).applyAll(attributes)
def loadSingleSkinData(desktop, skin, path_prefix):
"""loads skin data like colors, windowstyle etc."""
assert skin.tag == "skin", "root element in skin must be 'skin'!"
for c in skin.findall("output"):
id = c.attrib.get('id')
if id:
id = int(id)
else:
id = 0
if id == 0: # framebuffer
for res in c.findall("resolution"):
get_attr = res.attrib.get
xres = get_attr("xres")
if xres:
xres = int(xres)
else:
xres = 720
yres = get_attr("yres")
if yres:
yres = int(yres)
else:
yres = 576
bpp = get_attr("bpp")
if bpp:
bpp = int(bpp)
else:
bpp = 32
#print "Resolution:", xres,yres,bpp
from enigma import gMainDC
gMainDC.getInstance().setResolution(xres, yres)
desktop.resize(eSize(xres, yres))
if bpp != 32:
# load palette (not yet implemented)
pass
for skininclude in skin.findall("include"):
filename = skininclude.attrib.get("filename")
if filename:
skinfile = resolveFilename(SCOPE_CURRENT_SKIN, filename, path_prefix=path_prefix)
if not fileExists(skinfile):
skinfile = resolveFilename(SCOPE_SKIN_IMAGE, filename, path_prefix=path_prefix)
if fileExists(skinfile):
print "[SKIN] loading include:", skinfile
loadSkin(skinfile)
for c in skin.findall("colors"):
for color in c.findall("color"):
get_attr = color.attrib.get
name = get_attr("name")
color = get_attr("value")
if name and color:
colorNames[name] = parseColor(color)
#print "Color:", name, color
else:
raise SkinError("need color and name, got %s %s" % (name, color))
for c in skin.findall("fonts"):
for font in c.findall("font"):
get_attr = font.attrib.get
filename = get_attr("filename", "<NONAME>")
name = get_attr("name", "Regular")
scale = get_attr("scale")
if scale:
scale = int(scale)
else:
scale = 100
is_replacement = get_attr("replacement") and True or False
render = get_attr("render")
if render:
render = int(render)
else:
render = 0
resolved_font = resolveFilename(SCOPE_FONTS, filename, path_prefix=path_prefix)
if not fileExists(resolved_font): #when font is not available look at current skin path
skin_path = resolveFilename(SCOPE_CURRENT_SKIN, filename)
if fileExists(skin_path):
resolved_font = skin_path
addFont(resolved_font, name, scale, is_replacement, render)
#print "Font: ", resolved_font, name, scale, is_replacement
for alias in c.findall("alias"):
get = alias.attrib.get
try:
name = get("name")
font = get("font")
size = int(get("size"))
height = int(get("height", size)) # to be calculated some day
width = int(get("width", size))
global fonts
fonts[name] = (font, size, height, width)
except Exception, ex:
print "[SKIN] bad font alias", ex
for c in skin.findall("parameters"):
for parameter in c.findall("parameter"):
get = parameter.attrib.get
try:
name = get("name")
value = get("value")
parameters[name] = map(int, value.split(","))
except Exception, ex:
print "[SKIN] bad parameter", ex
for c in skin.findall("subtitles"):
from enigma import eWidget, eSubtitleWidget
scale = ((1,1),(1,1))
for substyle in c.findall("sub"):
get_attr = substyle.attrib.get
font = parseFont(get_attr("font"), scale)
col = get_attr("foregroundColor")
if col:
foregroundColor = parseColor(col)
haveColor = 1
else:
foregroundColor = gRGB(0xFFFFFF)
haveColor = 0
col = get_attr("borderColor")
if col:
borderColor = parseColor(col)
else:
borderColor = gRGB(0)
borderwidth = get_attr("borderWidth")
if borderwidth is None:
# default: use a subtitle border
borderWidth = 3
else:
borderWidth = int(borderwidth)
face = eSubtitleWidget.__dict__[get_attr("name")]
eSubtitleWidget.setFontStyle(face, font, haveColor, foregroundColor, borderColor, borderWidth)
for windowstyle in skin.findall("windowstyle"):
style = eWindowStyleSkinned()
style_id = windowstyle.attrib.get("id")
if style_id:
style_id = int(style_id)
else:
style_id = 0
# defaults
font = gFont("Regular", 20)
offset = eSize(20, 5)
for title in windowstyle.findall("title"):
get_attr = title.attrib.get
offset = parseSize(get_attr("offset"), ((1,1),(1,1)))
font = parseFont(get_attr("font"), ((1,1),(1,1)))
style.setTitleFont(font);
style.setTitleOffset(offset)
#print " ", font, offset
for borderset in windowstyle.findall("borderset"):
bsName = str(borderset.attrib.get("name"))
for pixmap in borderset.findall("pixmap"):
get_attr = pixmap.attrib.get
bpName = get_attr("pos")
filename = get_attr("filename")
if filename and bpName:
png = loadPixmap(resolveFilename(SCOPE_CURRENT_SKIN, filename, path_prefix=path_prefix), desktop)
style.setPixmap(eWindowStyleSkinned.__dict__[bsName], eWindowStyleSkinned.__dict__[bpName], png)
#print " borderset:", bpName, filename
for color in windowstyle.findall("color"):
get_attr = color.attrib.get
colorType = get_attr("name")
color = parseColor(get_attr("color"))
try:
style.setColor(eWindowStyleSkinned.__dict__["col" + colorType], color)
except:
raise SkinError("Unknown color %s" % (colorType))
#pass
#print " color:", type, color
x = eWindowStyleManager.getInstance()
x.setStyle(style_id, style)
for margin in skin.findall("margin"):
style_id = margin.attrib.get("id")
if style_id:
style_id = int(style_id)
else:
style_id = 0
r = eRect(0,0,0,0)
v = margin.attrib.get("left")
if v:
r.setLeft(int(v))
v = margin.attrib.get("top")
if v:
r.setTop(int(v))
v = margin.attrib.get("right")
if v:
r.setRight(int(v))
v = margin.attrib.get("bottom")
if v:
r.setBottom(int(v))
# the "desktop" parameter is hardcoded to the UI screen, so we must ask
# for the one that this actually applies to.
getDesktop(style_id).setMargins(r)
dom_screens = {}
def loadSkin(name, scope = SCOPE_SKIN):
# Now a utility for plugins to add skin data to the screens
global dom_screens, display_skin_id
filename = resolveFilename(scope, name)
if fileExists(filename):
path = os.path.dirname(filename) + "/"
for elem in xml.etree.cElementTree.parse(filename).getroot():
if elem.tag == 'screen':
name = elem.attrib.get('name', None)
if name:
sid = elem.attrib.get('id', None)
if sid and (sid != display_skin_id):
# not for this display
elem.clear()
continue
if name in dom_screens:
print "loadSkin: Screen already defined elsewhere:", name
elem.clear()
else:
dom_screens[name] = (elem, path)
else:
elem.clear()
else:
elem.clear()
def loadSkinData(desktop):
# Kinda hackish, but this is called once by mytest.py
global dom_skins
skins = dom_skins[:]
skins.reverse()
for (path, dom_skin) in skins:
loadSingleSkinData(desktop, dom_skin, path)
for elem in dom_skin:
if elem.tag == 'screen':
name = elem.attrib.get('name', None)
if name:
sid = elem.attrib.get('id', None)
if sid and (sid != display_skin_id):
# not for this display
elem.clear()
continue
if name in dom_screens:
# Kill old versions, save memory
dom_screens[name][0].clear()
dom_screens[name] = (elem, path)
else:
# without name, it's useless!
elem.clear()
else:
# non-screen element, no need for it any longer
elem.clear()
# no longer needed, we know where the screens are now.
del dom_skins
class additionalWidget:
pass
# Class that makes a tuple look like something else. Some plugins just assume
# that size is a string and try to parse it. This class makes that work.
class SizeTuple(tuple):
def split(self, *args):
return (str(self[0]), str(self[1]))
def strip(self, *args):
return '%s,%s' % self
def __str__(self):
return '%s,%s' % self
class SkinContext:
def __init__(self, parent=None, pos=None, size=None, font=None):
if parent is not None:
if pos is not None:
pos, size = parent.parse(pos, size, font)
self.x, self.y = pos
self.w, self.h = size
else:
self.x = None
self.y = None
self.w = None
self.h = None
def __str__(self):
return "Context (%s,%s)+(%s,%s) " % (self.x, self.y, self.w, self.h)
def parse(self, pos, size, font):
if pos == "fill":
pos = (self.x, self.y)
size = (self.w, self.h)
self.w = 0
self.h = 0
else:
w,h = size.split(',')
w = parseCoordinate(w, self.w, 0, font)
h = parseCoordinate(h, self.h, 0, font)
if pos == "bottom":
pos = (self.x, self.y + self.h - h)
size = (self.w, h)
self.h -= h
elif pos == "top":
pos = (self.x, self.y)
size = (self.w, h)
self.h -= h
self.y += h
elif pos == "left":
pos = (self.x, self.y)
size = (w, self.h)
self.x += w
self.w -= w
elif pos == "right":
pos = (self.x + self.w - w, self.y)
size = (w, self.h)
self.w -= w
else:
size = (w, h)
pos = pos.split(',')
pos = (self.x + parseCoordinate(pos[0], self.w, size[0], font), self.y + parseCoordinate(pos[1], self.h, size[1], font))
return (SizeTuple(pos), SizeTuple(size))
class SkinContextStack(SkinContext):
# A context that stacks things instead of aligning them
def parse(self, pos, size, font):
if pos == "fill":
pos = (self.x, self.y)
size = (self.w, self.h)
else:
w,h = size.split(',')
w = parseCoordinate(w, self.w, 0, font)
h = parseCoordinate(h, self.h, 0, font)
if pos == "bottom":
pos = (self.x, self.y + self.h - h)
size = (self.w, h)
elif pos == "top":
pos = (self.x, self.y)
size = (self.w, h)
elif pos == "left":
pos = (self.x, self.y)
size = (w, self.h)
elif pos == "right":
pos = (self.x + self.w - w, self.y)
size = (w, self.h)
else:
size = (w, h)
pos = pos.split(',')
pos = (self.x + parseCoordinate(pos[0], self.w, size[0], font), self.y + parseCoordinate(pos[1], self.h, size[1], font))
return (SizeTuple(pos), SizeTuple(size))
def readSkin(screen, skin, names, desktop):
if not isinstance(names, list):
names = [names]
# try all skins, first existing one have priority
global dom_screens
for n in names:
myscreen, path = dom_screens.get(n, (None,None))
if myscreen is not None:
# use this name for debug output
name = n
break
else:
name = "<embedded-in-'%s'>" % screen.__class__.__name__
# otherwise try embedded skin
if myscreen is None:
myscreen = getattr(screen, "parsedSkin", None)
# try uncompiled embedded skin
if myscreen is None and getattr(screen, "skin", None):
skin = screen.skin
print "[SKIN] Parsing embedded skin", name
if (isinstance(skin, tuple)):
for s in skin:
candidate = xml.etree.cElementTree.fromstring(s)
if candidate.tag == 'screen':
sid = candidate.attrib.get('id', None)
if (not sid) or (int(sid) == display_skin_id):
myscreen = candidate
break;
else:
print "[SKIN] Hey, no suitable screen!"
else:
myscreen = xml.etree.cElementTree.fromstring(skin)
if myscreen:
screen.parsedSkin = myscreen
if myscreen is None:
print "[SKIN] No skin to read..."
myscreen = screen.parsedSkin = xml.etree.cElementTree.fromstring("<screen></screen>")
screen.skinAttributes = [ ]
skin_path_prefix = getattr(screen, "skin_path", path)
context = SkinContextStack()
s = desktop.bounds()
context.x = s.left()
context.y = s.top()
context.w = s.width()
context.h = s.height()
del s
collectAttributes(screen.skinAttributes, myscreen, context, skin_path_prefix, ignore=("name",))
context = SkinContext(context, myscreen.attrib.get('position'), myscreen.attrib.get('size'))
screen.additionalWidgets = [ ]
screen.renderer = [ ]
visited_components = set()
# now walk all widgets and stuff
def process_none(widget, context):
pass
def process_widget(widget, context):
get_attr = widget.attrib.get
# ok, we either have 1:1-mapped widgets ('old style'), or 1:n-mapped
# widgets (source->renderer).
wname = get_attr('name')
wsource = get_attr('source')
if wname is None and wsource is None:
print "widget has no name and no source!"
return
if wname:
#print "Widget name=", wname
visited_components.add(wname)
# get corresponding 'gui' object
try:
attributes = screen[wname].skinAttributes = [ ]
except:
raise SkinError("component with name '" + wname + "' was not found in skin of screen '" + name + "'!")
# assert screen[wname] is not Source
collectAttributes(attributes, widget, context, skin_path_prefix, ignore=('name',))
elif wsource:
# get corresponding source
#print "Widget source=", wsource
while True: # until we found a non-obsolete source
# parse our current "wsource", which might specifiy a "related screen" before the dot,
# for example to reference a parent, global or session-global screen.
scr = screen
# resolve all path components
path = wsource.split('.')
while len(path) > 1:
scr = screen.getRelatedScreen(path[0])
if scr is None:
#print wsource
#print name
raise SkinError("specified related screen '" + wsource + "' was not found in screen '" + name + "'!")
path = path[1:]
# resolve the source.
source = scr.get(path[0])
if isinstance(source, ObsoleteSource):
# however, if we found an "obsolete source", issue warning, and resolve the real source.
print "WARNING: SKIN '%s' USES OBSOLETE SOURCE '%s', USE '%s' INSTEAD!" % (name, wsource, source.new_source)
print "OBSOLETE SOURCE WILL BE REMOVED %s, PLEASE UPDATE!" % (source.removal_date)
if source.description:
print source.description
wsource = source.new_source
else:
# otherwise, use that source.
break
if source is None:
raise SkinError("source '" + wsource + "' was not found in screen '" + name + "'!")
wrender = get_attr('render')
if not wrender:
raise SkinError("you must define a renderer with render= for source '%s'" % (wsource))
for converter in widget.findall("convert"):
ctype = converter.get('type')
assert ctype, "'convert'-tag needs a 'type'-attribute"
#print "Converter:", ctype
try:
parms = converter.text.strip()
except:
parms = ""
#print "Params:", parms
converter_class = my_import('.'.join(("Components", "Converter", ctype))).__dict__.get(ctype)
c = None
for i in source.downstream_elements:
if isinstance(i, converter_class) and i.converter_arguments == parms:
c = i
if c is None:
c = converter_class(parms)
c.connect(source)
source = c
renderer_class = my_import('.'.join(("Components", "Renderer", wrender))).__dict__.get(wrender)
renderer = renderer_class() # instantiate renderer
renderer.connect(source) # connect to source
attributes = renderer.skinAttributes = [ ]
collectAttributes(attributes, widget, context, skin_path_prefix, ignore=('render', 'source'))
screen.renderer.append(renderer)
def process_applet(widget, context):
try:
codeText = widget.text.strip()
widgetType = widget.attrib.get('type')
code = compile(codeText, "skin applet", "exec")
except Exception, ex:
raise SkinError("applet failed to compile: " + str(ex))
if widgetType == "onLayoutFinish":
screen.onLayoutFinish.append(code)
else:
raise SkinError("applet type '%s' unknown!" % widgetType)
def process_elabel(widget, context):
w = additionalWidget()
w.widget = eLabel
w.skinAttributes = [ ]
collectAttributes(w.skinAttributes, widget, context, skin_path_prefix, ignore=('name',))
screen.additionalWidgets.append(w)
def process_epixmap(widget, context):
w = additionalWidget()
w.widget = ePixmap
w.skinAttributes = [ ]
collectAttributes(w.skinAttributes, widget, context, skin_path_prefix, ignore=('name',))
screen.additionalWidgets.append(w)
def process_screen(widget, context):
for w in widget.getchildren():
conditional = w.attrib.get('conditional')
if conditional and not [i for i in conditional.split(",") if i in screen.keys()]:
continue
p = processors.get(w.tag, process_none)
try:
p(w, context)
except SkinError, e:
print "[Skin] SKIN ERROR in screen '%s' widget '%s':" % (name, w.tag), e
def process_panel(widget, context):
n = widget.attrib.get('name')
if n:
try:
s = dom_screens[n]
except KeyError:
print "[SKIN] Unable to find screen '%s' referred in screen '%s'" % (n, name)
else:
process_screen(s[0], context)
layout = widget.attrib.get('layout')
if layout == 'stack':
cc = SkinContextStack
else:
cc = SkinContext
try:
c = cc(context, widget.attrib.get('position'), widget.attrib.get('size'), widget.attrib.get('font'))
except Exception, ex:
raise SkinError("Failed to create skincontext (%s,%s,%s) in %s: %s" % (widget.attrib.get('position'), widget.attrib.get('size'), widget.attrib.get('font'), context, ex) )
process_screen(widget, c)
processors = {
None: process_none,
"widget": process_widget,
"applet": process_applet,
"eLabel": process_elabel,
"ePixmap": process_epixmap,
"panel": process_panel
}
try:
context.x = 0 # reset offsets, all components are relative to screen
context.y = 0 # coordinates.
process_screen(myscreen, context)
except Exception, e:
print "[Skin] SKIN ERROR in %s:" % name, e
from Components.GUIComponent import GUIComponent
nonvisited_components = [x for x in set(screen.keys()) - visited_components if isinstance(x, GUIComponent)]
assert not nonvisited_components, "the following components in %s don't have a skin entry: %s" % (name, ', '.join(nonvisited_components))
# This may look pointless, but it unbinds 'screen' from the nested scope. A better
# solution is to avoid the nested scope above and use the context object to pass
# things around.
screen = None
visited_components = None
|
gpl-2.0
|
Jusedawg/SickRage
|
lib/feedparser/mixin.py
|
22
|
32271
|
# Shared code that is common to the strict and loose feed parsers
# Copyright 2010-2015 Kurt McKee <[email protected]>
# Copyright 2002-2008 Mark Pilgrim
# All rights reserved.
#
# This file is a part of feedparser.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 'AS IS'
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
from __future__ import absolute_import, unicode_literals
import copy
import re
from xml.sax.saxutils import escape as _xmlescape
# base64 support for Atom feeds that contain embedded binary data
try:
import base64, binascii
except ImportError:
base64 = binascii = None
else:
# Python 3.1 deprecates decodestring in favor of decodebytes
_base64decode = getattr(base64, 'decodebytes', base64.decodestring)
try:
from html.entities import name2codepoint, entitydefs
except ImportError:
from htmlentitydefs import name2codepoint, entitydefs
from .html import _cp1252
from .namespaces import _base, cc, dc, georss, itunes, mediarss, psc
from .sanitizer import _sanitizeHTML, _HTMLSanitizer
from .util import FeedParserDict
from .urls import _urljoin, _makeSafeAbsoluteURI, _resolveRelativeURIs
bytes_ = type(b'')
try:
chr = unichr
except NameError:
pass
class _FeedParserMixin(
_base.Namespace,
cc.Namespace,
dc.Namespace,
georss.Namespace,
itunes.Namespace,
mediarss.Namespace,
psc.Namespace,
):
namespaces = {
'': '',
'http://backend.userland.com/rss': '',
'http://blogs.law.harvard.edu/tech/rss': '',
'http://purl.org/rss/1.0/': '',
'http://my.netscape.com/rdf/simple/0.9/': '',
'http://example.com/newformat#': '',
'http://example.com/necho': '',
'http://purl.org/echo/': '',
'uri/of/echo/namespace#': '',
'http://purl.org/pie/': '',
'http://purl.org/atom/ns#': '',
'http://www.w3.org/2005/Atom': '',
'http://purl.org/rss/1.0/modules/rss091#': '',
'http://webns.net/mvcb/': 'admin',
'http://purl.org/rss/1.0/modules/aggregation/': 'ag',
'http://purl.org/rss/1.0/modules/annotate/': 'annotate',
'http://media.tangent.org/rss/1.0/': 'audio',
'http://backend.userland.com/blogChannelModule': 'blogChannel',
'http://creativecommons.org/ns#license': 'cc',
'http://web.resource.org/cc/': 'cc',
'http://cyber.law.harvard.edu/rss/creativeCommonsRssModule.html': 'creativeCommons',
'http://backend.userland.com/creativeCommonsRssModule': 'creativeCommons',
'http://purl.org/rss/1.0/modules/company': 'co',
'http://purl.org/rss/1.0/modules/content/': 'content',
'http://my.theinfo.org/changed/1.0/rss/': 'cp',
'http://purl.org/dc/elements/1.1/': 'dc',
'http://purl.org/dc/terms/': 'dcterms',
'http://purl.org/rss/1.0/modules/email/': 'email',
'http://purl.org/rss/1.0/modules/event/': 'ev',
'http://rssnamespace.org/feedburner/ext/1.0': 'feedburner',
'http://freshmeat.net/rss/fm/': 'fm',
'http://xmlns.com/foaf/0.1/': 'foaf',
'http://www.w3.org/2003/01/geo/wgs84_pos#': 'geo',
'http://www.georss.org/georss': 'georss',
'http://www.opengis.net/gml': 'gml',
'http://postneo.com/icbm/': 'icbm',
'http://purl.org/rss/1.0/modules/image/': 'image',
'http://www.itunes.com/DTDs/PodCast-1.0.dtd': 'itunes',
'http://example.com/DTDs/PodCast-1.0.dtd': 'itunes',
'http://purl.org/rss/1.0/modules/link/': 'l',
'http://search.yahoo.com/mrss': 'media',
# Version 1.1.2 of the Media RSS spec added the trailing slash on the namespace
'http://search.yahoo.com/mrss/': 'media',
'http://madskills.com/public/xml/rss/module/pingback/': 'pingback',
'http://prismstandard.org/namespaces/1.2/basic/': 'prism',
'http://www.w3.org/1999/02/22-rdf-syntax-ns#': 'rdf',
'http://www.w3.org/2000/01/rdf-schema#': 'rdfs',
'http://purl.org/rss/1.0/modules/reference/': 'ref',
'http://purl.org/rss/1.0/modules/richequiv/': 'reqv',
'http://purl.org/rss/1.0/modules/search/': 'search',
'http://purl.org/rss/1.0/modules/slash/': 'slash',
'http://schemas.xmlsoap.org/soap/envelope/': 'soap',
'http://purl.org/rss/1.0/modules/servicestatus/': 'ss',
'http://hacks.benhammersley.com/rss/streaming/': 'str',
'http://purl.org/rss/1.0/modules/subscription/': 'sub',
'http://purl.org/rss/1.0/modules/syndication/': 'sy',
'http://schemas.pocketsoap.com/rss/myDescModule/': 'szf',
'http://purl.org/rss/1.0/modules/taxonomy/': 'taxo',
'http://purl.org/rss/1.0/modules/threading/': 'thr',
'http://purl.org/rss/1.0/modules/textinput/': 'ti',
'http://madskills.com/public/xml/rss/module/trackback/': 'trackback',
'http://wellformedweb.org/commentAPI/': 'wfw',
'http://purl.org/rss/1.0/modules/wiki/': 'wiki',
'http://www.w3.org/1999/xhtml': 'xhtml',
'http://www.w3.org/1999/xlink': 'xlink',
'http://www.w3.org/XML/1998/namespace': 'xml',
'http://podlove.org/simple-chapters': 'psc',
}
_matchnamespaces = {}
can_be_relative_uri = set(['link', 'id', 'wfw_comment', 'wfw_commentrss', 'docs', 'url', 'href', 'comments', 'icon', 'logo'])
can_contain_relative_uris = set(['content', 'title', 'summary', 'info', 'tagline', 'subtitle', 'copyright', 'rights', 'description'])
can_contain_dangerous_markup = set(['content', 'title', 'summary', 'info', 'tagline', 'subtitle', 'copyright', 'rights', 'description'])
html_types = ['text/html', 'application/xhtml+xml']
def __init__(self):
if not self._matchnamespaces:
for k, v in self.namespaces.items():
self._matchnamespaces[k.lower()] = v
self.feeddata = FeedParserDict() # feed-level data
self.entries = [] # list of entry-level data
self.version = '' # feed type/version, see SUPPORTED_VERSIONS
self.namespacesInUse = {} # dictionary of namespaces defined by the feed
# the following are used internally to track state;
# this is really out of control and should be refactored
self.infeed = 0
self.inentry = 0
self.incontent = 0
self.intextinput = 0
self.inimage = 0
self.inauthor = 0
self.incontributor = 0
self.inpublisher = 0
self.insource = 0
self.sourcedata = FeedParserDict()
self.contentparams = FeedParserDict()
self._summaryKey = None
self.namespacemap = {}
self.elementstack = []
self.basestack = []
self.langstack = []
self.svgOK = 0
self.title_depth = -1
self.depth = 0
if self.lang:
self.feeddata['language'] = self.lang.replace('_','-')
# A map of the following form:
# {
# object_that_value_is_set_on: {
# property_name: depth_of_node_property_was_extracted_from,
# other_property: depth_of_node_property_was_extracted_from,
# },
# }
self.property_depth_map = {}
super(_FeedParserMixin, self).__init__()
def unknown_starttag(self, tag, attrs):
# increment depth counter
self.depth += 1
# normalize attrs
attrs = [self._normalize_attributes(attr) for attr in attrs]
# track xml:base and xml:lang
attrsD = dict(attrs)
baseuri = attrsD.get('xml:base', attrsD.get('base')) or self.baseuri
if isinstance(baseuri, bytes_):
baseuri = baseuri.decode(self.encoding, 'ignore')
# ensure that self.baseuri is always an absolute URI that
# uses a whitelisted URI scheme (e.g. not `javscript:`)
if self.baseuri:
self.baseuri = _makeSafeAbsoluteURI(self.baseuri, baseuri) or self.baseuri
else:
self.baseuri = _urljoin(self.baseuri, baseuri)
lang = attrsD.get('xml:lang', attrsD.get('lang'))
if lang == '':
# xml:lang could be explicitly set to '', we need to capture that
lang = None
elif lang is None:
# if no xml:lang is specified, use parent lang
lang = self.lang
if lang:
if tag in ('feed', 'rss', 'rdf:RDF'):
self.feeddata['language'] = lang.replace('_','-')
self.lang = lang
self.basestack.append(self.baseuri)
self.langstack.append(lang)
# track namespaces
for prefix, uri in attrs:
if prefix.startswith('xmlns:'):
self.trackNamespace(prefix[6:], uri)
elif prefix == 'xmlns':
self.trackNamespace(None, uri)
# track inline content
if self.incontent and not self.contentparams.get('type', 'xml').endswith('xml'):
if tag in ('xhtml:div', 'div'):
return # typepad does this 10/2007
# element declared itself as escaped markup, but it isn't really
self.contentparams['type'] = 'application/xhtml+xml'
if self.incontent and self.contentparams.get('type') == 'application/xhtml+xml':
if tag.find(':') != -1:
prefix, tag = tag.split(':', 1)
namespace = self.namespacesInUse.get(prefix, '')
if tag=='math' and namespace=='http://www.w3.org/1998/Math/MathML':
attrs.append(('xmlns',namespace))
if tag=='svg' and namespace=='http://www.w3.org/2000/svg':
attrs.append(('xmlns',namespace))
if tag == 'svg':
self.svgOK += 1
return self.handle_data('<%s%s>' % (tag, self.strattrs(attrs)), escape=0)
# match namespaces
if tag.find(':') != -1:
prefix, suffix = tag.split(':', 1)
else:
prefix, suffix = '', tag
prefix = self.namespacemap.get(prefix, prefix)
if prefix:
prefix = prefix + '_'
# special hack for better tracking of empty textinput/image elements in illformed feeds
if (not prefix) and tag not in ('title', 'link', 'description', 'name'):
self.intextinput = 0
if (not prefix) and tag not in ('title', 'link', 'description', 'url', 'href', 'width', 'height'):
self.inimage = 0
# call special handler (if defined) or default handler
methodname = '_start_' + prefix + suffix
try:
method = getattr(self, methodname)
return method(attrsD)
except AttributeError:
# Since there's no handler or something has gone wrong we explicitly add the element and its attributes
unknown_tag = prefix + suffix
if len(attrsD) == 0:
# No attributes so merge it into the encosing dictionary
return self.push(unknown_tag, 1)
else:
# Has attributes so create it in its own dictionary
context = self._getContext()
context[unknown_tag] = attrsD
def unknown_endtag(self, tag):
# match namespaces
if tag.find(':') != -1:
prefix, suffix = tag.split(':', 1)
else:
prefix, suffix = '', tag
prefix = self.namespacemap.get(prefix, prefix)
if prefix:
prefix = prefix + '_'
if suffix == 'svg' and self.svgOK:
self.svgOK -= 1
# call special handler (if defined) or default handler
methodname = '_end_' + prefix + suffix
try:
if self.svgOK:
raise AttributeError()
method = getattr(self, methodname)
method()
except AttributeError:
self.pop(prefix + suffix)
# track inline content
if self.incontent and not self.contentparams.get('type', 'xml').endswith('xml'):
# element declared itself as escaped markup, but it isn't really
if tag in ('xhtml:div', 'div'):
return # typepad does this 10/2007
self.contentparams['type'] = 'application/xhtml+xml'
if self.incontent and self.contentparams.get('type') == 'application/xhtml+xml':
tag = tag.split(':')[-1]
self.handle_data('</%s>' % tag, escape=0)
# track xml:base and xml:lang going out of scope
if self.basestack:
self.basestack.pop()
if self.basestack and self.basestack[-1]:
self.baseuri = self.basestack[-1]
if self.langstack:
self.langstack.pop()
if self.langstack: # and (self.langstack[-1] is not None):
self.lang = self.langstack[-1]
self.depth -= 1
def handle_charref(self, ref):
# called for each character reference, e.g. for ' ', ref will be '160'
if not self.elementstack:
return
ref = ref.lower()
if ref in ('34', '38', '39', '60', '62', 'x22', 'x26', 'x27', 'x3c', 'x3e'):
text = '&#%s;' % ref
else:
if ref[0] == 'x':
c = int(ref[1:], 16)
else:
c = int(ref)
text = chr(c).encode('utf-8')
self.elementstack[-1][2].append(text)
def handle_entityref(self, ref):
# called for each entity reference, e.g. for '©', ref will be 'copy'
if not self.elementstack:
return
if ref in ('lt', 'gt', 'quot', 'amp', 'apos'):
text = '&%s;' % ref
elif ref in self.entities:
text = self.entities[ref]
if text.startswith('&#') and text.endswith(';'):
return self.handle_entityref(text)
else:
try:
name2codepoint[ref]
except KeyError:
text = '&%s;' % ref
else:
text = chr(name2codepoint[ref]).encode('utf-8')
self.elementstack[-1][2].append(text)
def handle_data(self, text, escape=1):
# called for each block of plain text, i.e. outside of any tag and
# not containing any character or entity references
if not self.elementstack:
return
if escape and self.contentparams.get('type') == 'application/xhtml+xml':
text = _xmlescape(text)
self.elementstack[-1][2].append(text)
def handle_comment(self, text):
# called for each comment, e.g. <!-- insert message here -->
pass
def handle_pi(self, text):
# called for each processing instruction, e.g. <?instruction>
pass
def handle_decl(self, text):
pass
def parse_declaration(self, i):
# override internal declaration handler to handle CDATA blocks
if self.rawdata[i:i+9] == '<![CDATA[':
k = self.rawdata.find(']]>', i)
if k == -1:
# CDATA block began but didn't finish
k = len(self.rawdata)
return k
self.handle_data(_xmlescape(self.rawdata[i+9:k]), 0)
return k+3
else:
k = self.rawdata.find('>', i)
if k >= 0:
return k+1
else:
# We have an incomplete CDATA block.
return k
def mapContentType(self, contentType):
contentType = contentType.lower()
if contentType == 'text' or contentType == 'plain':
contentType = 'text/plain'
elif contentType == 'html':
contentType = 'text/html'
elif contentType == 'xhtml':
contentType = 'application/xhtml+xml'
return contentType
def trackNamespace(self, prefix, uri):
loweruri = uri.lower()
if not self.version:
if (prefix, loweruri) == (None, 'http://my.netscape.com/rdf/simple/0.9/'):
self.version = 'rss090'
elif loweruri == 'http://purl.org/rss/1.0/':
self.version = 'rss10'
elif loweruri == 'http://www.w3.org/2005/atom':
self.version = 'atom10'
if loweruri.find('backend.userland.com/rss') != -1:
# match any backend.userland.com namespace
uri = 'http://backend.userland.com/rss'
loweruri = uri
if loweruri in self._matchnamespaces:
self.namespacemap[prefix] = self._matchnamespaces[loweruri]
self.namespacesInUse[self._matchnamespaces[loweruri]] = uri
else:
self.namespacesInUse[prefix or ''] = uri
def resolveURI(self, uri):
return _urljoin(self.baseuri or '', uri)
def decodeEntities(self, element, data):
return data
def strattrs(self, attrs):
return ''.join([' %s="%s"' % (t[0],_xmlescape(t[1],{'"':'"'})) for t in attrs])
def push(self, element, expectingText):
self.elementstack.append([element, expectingText, []])
def pop(self, element, stripWhitespace=1):
if not self.elementstack:
return
if self.elementstack[-1][0] != element:
return
element, expectingText, pieces = self.elementstack.pop()
if self.version == 'atom10' and self.contentparams.get('type', 'text') == 'application/xhtml+xml':
# remove enclosing child element, but only if it is a <div> and
# only if all the remaining content is nested underneath it.
# This means that the divs would be retained in the following:
# <div>foo</div><div>bar</div>
while pieces and len(pieces)>1 and not pieces[-1].strip():
del pieces[-1]
while pieces and len(pieces)>1 and not pieces[0].strip():
del pieces[0]
if pieces and (pieces[0] == '<div>' or pieces[0].startswith('<div ')) and pieces[-1]=='</div>':
depth = 0
for piece in pieces[:-1]:
if piece.startswith('</'):
depth -= 1
if depth == 0:
break
elif piece.startswith('<') and not piece.endswith('/>'):
depth += 1
else:
pieces = pieces[1:-1]
# Ensure each piece is a str for Python 3
for (i, v) in enumerate(pieces):
if isinstance(v, bytes_):
pieces[i] = v.decode('utf-8')
output = ''.join(pieces)
if stripWhitespace:
output = output.strip()
if not expectingText:
return output
# decode base64 content
if base64 and self.contentparams.get('base64', 0):
try:
output = _base64decode(output)
except binascii.Error:
pass
except binascii.Incomplete:
pass
except TypeError:
# In Python 3, base64 takes and outputs bytes, not str
# This may not be the most correct way to accomplish this
output = _base64decode(output.encode('utf-8')).decode('utf-8')
# resolve relative URIs
if (element in self.can_be_relative_uri) and output:
# do not resolve guid elements with isPermalink="false"
if not element == 'id' or self.guidislink:
output = self.resolveURI(output)
# decode entities within embedded markup
if not self.contentparams.get('base64', 0):
output = self.decodeEntities(element, output)
# some feed formats require consumers to guess
# whether the content is html or plain text
if not self.version.startswith('atom') and self.contentparams.get('type') == 'text/plain':
if self.lookslikehtml(output):
self.contentparams['type'] = 'text/html'
# remove temporary cruft from contentparams
try:
del self.contentparams['mode']
except KeyError:
pass
try:
del self.contentparams['base64']
except KeyError:
pass
is_htmlish = self.mapContentType(self.contentparams.get('type', 'text/html')) in self.html_types
# resolve relative URIs within embedded markup
if is_htmlish and RESOLVE_RELATIVE_URIS:
if element in self.can_contain_relative_uris:
output = _resolveRelativeURIs(output, self.baseuri, self.encoding, self.contentparams.get('type', 'text/html'))
# sanitize embedded markup
if is_htmlish and SANITIZE_HTML:
if element in self.can_contain_dangerous_markup:
output = _sanitizeHTML(output, self.encoding, self.contentparams.get('type', 'text/html'))
if self.encoding and isinstance(output, bytes_):
output = output.decode(self.encoding, 'ignore')
# address common error where people take data that is already
# utf-8, presume that it is iso-8859-1, and re-encode it.
if self.encoding in ('utf-8', 'utf-8_INVALID_PYTHON_3') and not isinstance(output, bytes_):
try:
output = output.encode('iso-8859-1').decode('utf-8')
except (UnicodeEncodeError, UnicodeDecodeError):
pass
# map win-1252 extensions to the proper code points
if not isinstance(output, bytes_):
output = output.translate(_cp1252)
# categories/tags/keywords/whatever are handled in _end_category or _end_tags or _end_itunes_keywords
if element in ('category', 'tags', 'itunes_keywords'):
return output
if element == 'title' and -1 < self.title_depth <= self.depth:
return output
# store output in appropriate place(s)
if self.inentry and not self.insource:
if element == 'content':
self.entries[-1].setdefault(element, [])
contentparams = copy.deepcopy(self.contentparams)
contentparams['value'] = output
self.entries[-1][element].append(contentparams)
elif element == 'link':
if not self.inimage:
# query variables in urls in link elements are improperly
# converted from `?a=1&b=2` to `?a=1&b;=2` as if they're
# unhandled character references. fix this special case.
output = output.replace('&', '&')
output = re.sub("&([A-Za-z0-9_]+);", "&\g<1>", output)
self.entries[-1][element] = output
if output:
self.entries[-1]['links'][-1]['href'] = output
else:
if element == 'description':
element = 'summary'
old_value_depth = self.property_depth_map.setdefault(self.entries[-1], {}).get(element)
if old_value_depth is None or self.depth <= old_value_depth:
self.property_depth_map[self.entries[-1]][element] = self.depth
self.entries[-1][element] = output
if self.incontent:
contentparams = copy.deepcopy(self.contentparams)
contentparams['value'] = output
self.entries[-1][element + '_detail'] = contentparams
elif (self.infeed or self.insource):# and (not self.intextinput) and (not self.inimage):
context = self._getContext()
if element == 'description':
element = 'subtitle'
context[element] = output
if element == 'link':
# fix query variables; see above for the explanation
output = re.sub("&([A-Za-z0-9_]+);", "&\g<1>", output)
context[element] = output
context['links'][-1]['href'] = output
elif self.incontent:
contentparams = copy.deepcopy(self.contentparams)
contentparams['value'] = output
context[element + '_detail'] = contentparams
return output
def pushContent(self, tag, attrsD, defaultContentType, expectingText):
self.incontent += 1
if self.lang:
self.lang=self.lang.replace('_','-')
self.contentparams = FeedParserDict({
'type': self.mapContentType(attrsD.get('type', defaultContentType)),
'language': self.lang,
'base': self.baseuri})
self.contentparams['base64'] = self._isBase64(attrsD, self.contentparams)
self.push(tag, expectingText)
def popContent(self, tag):
value = self.pop(tag)
self.incontent -= 1
self.contentparams.clear()
return value
# a number of elements in a number of RSS variants are nominally plain
# text, but this is routinely ignored. This is an attempt to detect
# the most common cases. As false positives often result in silent
# data loss, this function errs on the conservative side.
@staticmethod
def lookslikehtml(s):
# must have a close tag or an entity reference to qualify
if not (re.search(r'</(\w+)>', s) or re.search(r'&#?\w+;', s)):
return
# all tags must be in a restricted subset of valid HTML tags
if any((t for t in re.findall(r'</?(\w+)', s) if t.lower() not in _HTMLSanitizer.acceptable_elements)):
return
# all entities must have been defined as valid HTML entities
if any((e for e in re.findall(r'&(\w+);', s) if e not in entitydefs)):
return
return 1
def _mapToStandardPrefix(self, name):
colonpos = name.find(':')
if colonpos != -1:
prefix = name[:colonpos]
suffix = name[colonpos+1:]
prefix = self.namespacemap.get(prefix, prefix)
name = prefix + ':' + suffix
return name
def _getAttribute(self, attrsD, name):
return attrsD.get(self._mapToStandardPrefix(name))
def _isBase64(self, attrsD, contentparams):
if attrsD.get('mode', '') == 'base64':
return 1
if self.contentparams['type'].startswith('text/'):
return 0
if self.contentparams['type'].endswith('+xml'):
return 0
if self.contentparams['type'].endswith('/xml'):
return 0
return 1
def _itsAnHrefDamnIt(self, attrsD):
href = attrsD.get('url', attrsD.get('uri', attrsD.get('href', None)))
if href:
try:
del attrsD['url']
except KeyError:
pass
try:
del attrsD['uri']
except KeyError:
pass
attrsD['href'] = href
return attrsD
def _save(self, key, value, overwrite=False):
context = self._getContext()
if overwrite:
context[key] = value
else:
context.setdefault(key, value)
def _getContext(self):
if self.insource:
context = self.sourcedata
elif self.inimage and 'image' in self.feeddata:
context = self.feeddata['image']
elif self.intextinput:
context = self.feeddata['textinput']
elif self.inentry:
context = self.entries[-1]
else:
context = self.feeddata
return context
def _save_author(self, key, value, prefix='author'):
context = self._getContext()
context.setdefault(prefix + '_detail', FeedParserDict())
context[prefix + '_detail'][key] = value
self._sync_author_detail()
context.setdefault('authors', [FeedParserDict()])
context['authors'][-1][key] = value
def _save_contributor(self, key, value):
context = self._getContext()
context.setdefault('contributors', [FeedParserDict()])
context['contributors'][-1][key] = value
def _sync_author_detail(self, key='author'):
context = self._getContext()
detail = context.get('%ss' % key, [FeedParserDict()])[-1]
if detail:
name = detail.get('name')
email = detail.get('email')
if name and email:
context[key] = '%s (%s)' % (name, email)
elif name:
context[key] = name
elif email:
context[key] = email
else:
author, email = context.get(key), None
if not author:
return
emailmatch = re.search(r'''(([a-zA-Z0-9\_\-\.\+]+)@((\[[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.)|(([a-zA-Z0-9\-]+\.)+))([a-zA-Z]{2,4}|[0-9]{1,3})(\]?))(\?subject=\S+)?''', author)
if emailmatch:
email = emailmatch.group(0)
# probably a better way to do the following, but it passes all the tests
author = author.replace(email, '')
author = author.replace('()', '')
author = author.replace('<>', '')
author = author.replace('<>', '')
author = author.strip()
if author and (author[0] == '('):
author = author[1:]
if author and (author[-1] == ')'):
author = author[:-1]
author = author.strip()
if author or email:
context.setdefault('%s_detail' % key, detail)
if author:
detail['name'] = author
if email:
detail['email'] = email
def _addTag(self, term, scheme, label):
context = self._getContext()
tags = context.setdefault('tags', [])
if (not term) and (not scheme) and (not label):
return
value = FeedParserDict(term=term, scheme=scheme, label=label)
if value not in tags:
tags.append(value)
def _start_tags(self, attrsD):
# This is a completely-made up element. Its semantics are determined
# only by a single feed that precipitated bug report 392 on Google Code.
# In short, this is junk code.
self.push('tags', 1)
def _end_tags(self):
for term in self.pop('tags').split(','):
self._addTag(term.strip(), None, None)
|
gpl-3.0
|
Allow2CEO/browser-ios
|
brave/node_modules/ad-block/vendor/depot_tools/third_party/gsutil/gslib/addlhelp/projects.py
|
51
|
4876
|
# Copyright 2012 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from gslib.help_provider import HELP_NAME
from gslib.help_provider import HELP_NAME_ALIASES
from gslib.help_provider import HELP_ONE_LINE_SUMMARY
from gslib.help_provider import HelpProvider
from gslib.help_provider import HELP_TEXT
from gslib.help_provider import HelpType
from gslib.help_provider import HELP_TYPE
_detailed_help_text = ("""
<B>OVERVIEW</B>
This section discusses how to work with projects in Google Cloud Storage.
For more information about using the Google APIs Console to administer
project memberships (which are automatically included in ACLs for buckets
you create) see https://code.google.com/apis/console#:storage:access.
<B>PROJECT MEMBERS AND PERMISSIONS</B>
There are three groups of users associated with each project:
- Project Owners are allowed to list, create, and delete buckets,
and can also perform administrative tasks like adding and removing team
members and changing billing. The project owners group is the owner
of all buckets within a project, regardless of who may be the original
bucket creator.
- Project Editors are allowed to list, create, and delete buckets.
- All Project Team Members are allowed to list buckets within a project.
These projects make it easy to set up a bucket and start uploading objects
with access control appropriate for a project at your company, as the three
group memberships can be configured by your administrative staff. Control
over projects and their associated memberships is provided by the Google
APIs Console (https://code.google.com/apis/console).
<B>HOW PROJECT MEMBERSHIP IS REFLECTED IN BUCKET ACLS</B>
When you create a bucket without specifying an ACL the bucket is given a
"project-private" ACL, which grants the permissions described in the previous
section. Here's an example of such an ACL:
<AccessControlList>
<Owner>
<ID>
00b4903a9740e42c29800f53bd5a9a62a2f96eb3f64a4313a115df3f3a776bf7
</ID>
</Owner>
<Entries>
<Entry>
<Scope type="GroupById">
<ID>
00b4903a9740e42c29800f53bd5a9a62a2f96eb3f64a4313a115df3f3a776bf7
</ID>
</Scope>
<Permission>
FULL_CONTROL
</Permission>
</Entry>
<Entry>
<Scope type="GroupById">
<ID>
00b4903a977fd817e9da167bc81306489181a110456bb635f466d71cf90a0d51
</ID>
</Scope>
<Permission>
FULL_CONTROL
</Permission>
</Entry>
<Entry>
<Scope type="GroupById">
<ID>
00b4903a974898cc8fc309f2f2835308ba3d3df1b889d3fc7e33e187d52d8e71
</ID>
</Scope>
<Permission>
READ
</Permission>
</Entry>
</Entries>
</AccessControlList>
The three "GroupById" scopes are the canonical IDs for the Project Owners,
Project Editors, and All Project Team Members groups.
You can edit the bucket ACL if you want to (see "gsutil help setacl"),
but for many cases you'll never need to, and instead can change group
membership via the APIs console.
<B>IDENTIFYING PROJECTS WHEN CREATING AND LISTING BUCKETS</B>
When you create a bucket or list your buckets, you need to provide the
project ID that want to create or list (using the gsutil mb -p option or
the gsutil ls -p option, respectively). The project's name shown in the
Google APIs Console is a user-friendly name that you can choose; this is
not the project ID required by the gsutil mb and ls commands. To find the
project ID, go to the Storage Access pane in the Google APIs Console. Your
project ID is listed under Identifying your project.
""")
class CommandOptions(HelpProvider):
"""Additional help about Access Control Lists."""
help_spec = {
# Name of command or auxiliary help info for which this help applies.
HELP_NAME : 'projects',
# List of help name aliases.
HELP_NAME_ALIASES : ['apis console', 'console', 'dev console', 'project',
'proj', 'project-id'],
# Type of help:
HELP_TYPE : HelpType.ADDITIONAL_HELP,
# One line summary of this help.
HELP_ONE_LINE_SUMMARY : 'Working with projects',
# The full help text.
HELP_TEXT : _detailed_help_text,
}
|
mpl-2.0
|
PostCenter/botlang
|
botlang/environment/primitives/strings.py
|
1
|
1298
|
import re
from urllib.parse import quote
from unidecode import unidecode
def simplify_text(text):
return unidecode(text)\
.lower()\
.replace("'", '')\
.replace('&', '')
def email_censor(value):
if '@' not in value:
return value
start, end = value.split('@')
return "{}@{}".format(
word_censor(start),
word_censor(end)
)
def word_censor(value):
if len(value) <= 1:
return value
half = int(len(value) / 2)
censored = value[0:half] + '*' * (len(value) - half)
return censored
def pattern_match(pattern, message):
if re.match(pattern, message):
return True
return False
def divide_text(max_chars, text):
if len(text) <= max_chars:
return [text]
texts = []
for p in re.split('\n', text):
stripped_p = p.strip()
if len(stripped_p) > 0:
texts.append(stripped_p)
return texts
STRING_OPS = {
'split': str.split,
'join': str.join,
'plain': simplify_text,
'uppercase': str.upper,
'lowercase': str.lower,
'capitalize': str.capitalize,
'replace': str.replace,
'trim': str.strip,
'match?': pattern_match,
'divide-text': divide_text,
'url-quote': quote,
'email-censor': email_censor,
}
|
mit
|
linktlh/Toontown-journey
|
toontown/coghq/LawOfficeManagerAI.py
|
4
|
2131
|
import random
import DistributedLawOfficeAI
import DistributedStageAI
from direct.directnotify import DirectNotifyGlobal
from direct.showbase import DirectObject
from toontown.coghq import StageLayout
from toontown.toonbase import ToontownGlobals
StageId2Layouts = {
ToontownGlobals.LawbotStageIntA: (0, 1, 2),
ToontownGlobals.LawbotStageIntB: (3, 4, 5),
ToontownGlobals.LawbotStageIntC: (6, 7, 8),
ToontownGlobals.LawbotStageIntD: (9, 10, 11)
}
class LawOfficeManagerAI(DirectObject.DirectObject):
notify = DirectNotifyGlobal.directNotify.newCategory('LawOfficeManagerAI')
lawOfficeId = None
def __init__(self, air):
DirectObject.DirectObject.__init__(self)
self.air = air
def getDoId(self):
return 0
def createLawOffice(self, StageId, entranceId, players):
for avId in players:
if bboard.has('StageId-%s' % avId):
StageId = bboard.get('StageId-%s' % avId)
break
floor = 0
layoutIndex = None
for avId in players:
if bboard.has('stageRoom-%s' % avId):
roomId = bboard.get('stageRoom-%s' % avId)
for lt in StageId2Layouts[StageId]:
for i in xrange(StageLayout.getNumFloors(lt)):
layout = StageLayout.StageLayout(StageId, i, stageLayout = lt)
if roomId in layout.getRoomIds():
layoutIndex = lt
floor = i
else:
StageRoomSpecs = StageRoomSpecs
roomName = StageRoomSpecs.CashbotStageRoomId2RoomName[roomId]
LawOfficeManagerAI.notify.warning('room %s (%s) not found in any floor of Stage %s' % (roomId, roomName, StageId))
StageZone = self.air.allocateZone()
if layoutIndex is None:
layoutIndex = random.choice(StageId2Layouts[StageId])
Stage = DistributedStageAI.DistributedStageAI(self.air, StageId, StageZone, floor, players, layoutIndex)
Stage.generateWithRequired(StageZone)
return StageZone
|
apache-2.0
|
davidharrigan/django
|
tests/utils_tests/test_datastructures.py
|
262
|
4154
|
"""
Tests for stuff in django.utils.datastructures.
"""
import copy
from django.test import SimpleTestCase
from django.utils import six
from django.utils.datastructures import (
DictWrapper, ImmutableList, MultiValueDict, MultiValueDictKeyError,
OrderedSet,
)
class OrderedSetTests(SimpleTestCase):
def test_bool(self):
# Refs #23664
s = OrderedSet()
self.assertFalse(s)
s.add(1)
self.assertTrue(s)
def test_len(self):
s = OrderedSet()
self.assertEqual(len(s), 0)
s.add(1)
s.add(2)
s.add(2)
self.assertEqual(len(s), 2)
class MultiValueDictTests(SimpleTestCase):
def test_multivaluedict(self):
d = MultiValueDict({'name': ['Adrian', 'Simon'],
'position': ['Developer']})
self.assertEqual(d['name'], 'Simon')
self.assertEqual(d.get('name'), 'Simon')
self.assertEqual(d.getlist('name'), ['Adrian', 'Simon'])
self.assertEqual(
sorted(six.iteritems(d)),
[('name', 'Simon'), ('position', 'Developer')]
)
self.assertEqual(
sorted(six.iterlists(d)),
[('name', ['Adrian', 'Simon']), ('position', ['Developer'])]
)
six.assertRaisesRegex(self, MultiValueDictKeyError, 'lastname',
d.__getitem__, 'lastname')
self.assertEqual(d.get('lastname'), None)
self.assertEqual(d.get('lastname', 'nonexistent'), 'nonexistent')
self.assertEqual(d.getlist('lastname'), [])
self.assertEqual(d.getlist('doesnotexist', ['Adrian', 'Simon']),
['Adrian', 'Simon'])
d.setlist('lastname', ['Holovaty', 'Willison'])
self.assertEqual(d.getlist('lastname'), ['Holovaty', 'Willison'])
self.assertEqual(sorted(six.itervalues(d)),
['Developer', 'Simon', 'Willison'])
def test_appendlist(self):
d = MultiValueDict()
d.appendlist('name', 'Adrian')
d.appendlist('name', 'Simon')
self.assertEqual(d.getlist('name'), ['Adrian', 'Simon'])
def test_copy(self):
for copy_func in [copy.copy, lambda d: d.copy()]:
d1 = MultiValueDict({
"developers": ["Carl", "Fred"]
})
self.assertEqual(d1["developers"], "Fred")
d2 = copy_func(d1)
d2.update({"developers": "Groucho"})
self.assertEqual(d2["developers"], "Groucho")
self.assertEqual(d1["developers"], "Fred")
d1 = MultiValueDict({
"key": [[]]
})
self.assertEqual(d1["key"], [])
d2 = copy_func(d1)
d2["key"].append("Penguin")
self.assertEqual(d1["key"], ["Penguin"])
self.assertEqual(d2["key"], ["Penguin"])
def test_dict_translation(self):
mvd = MultiValueDict({
'devs': ['Bob', 'Joe'],
'pm': ['Rory'],
})
d = mvd.dict()
self.assertEqual(sorted(six.iterkeys(d)), sorted(six.iterkeys(mvd)))
for key in six.iterkeys(mvd):
self.assertEqual(d[key], mvd[key])
self.assertEqual({}, MultiValueDict().dict())
class ImmutableListTests(SimpleTestCase):
def test_sort(self):
d = ImmutableList(range(10))
# AttributeError: ImmutableList object is immutable.
self.assertRaisesMessage(AttributeError,
'ImmutableList object is immutable.', d.sort)
self.assertEqual(repr(d), '(0, 1, 2, 3, 4, 5, 6, 7, 8, 9)')
def test_custom_warning(self):
d = ImmutableList(range(10), warning="Object is immutable!")
self.assertEqual(d[1], 1)
# AttributeError: Object is immutable!
self.assertRaisesMessage(AttributeError,
'Object is immutable!', d.__setitem__, 1, 'test')
class DictWrapperTests(SimpleTestCase):
def test_dictwrapper(self):
f = lambda x: "*%s" % x
d = DictWrapper({'a': 'a'}, f, 'xx_')
self.assertEqual(
"Normal: %(a)s. Modified: %(xx_a)s" % d,
'Normal: a. Modified: *a'
)
|
bsd-3-clause
|
xinwu/bosi-1
|
etc/t6/python_template/dpid.py
|
4
|
2232
|
#!/usr/bin/python
# Print the MAC address of the first non-loopback NIC.
# Based on http://programmaticallyspeaking.com/getting-network-interfaces-in-python.html.
# Based on getifaddrs.py from pydlnadms [http://code.google.com/p/pydlnadms/].
from socket import AF_INET, AF_INET6, AF_PACKET, inet_ntop
from ctypes import (
Structure, Union, POINTER,
pointer, get_errno, cast,
c_ushort, c_byte, c_void_p, c_char_p, c_uint, c_int, c_uint16, c_uint32
)
import ctypes.util
import ctypes
IFF_LOOPBACK = 8
class struct_sockaddr(Structure):
_fields_ = [
('sa_family', c_ushort),
('sa_data', c_byte * 14),]
class struct_sockaddr_ll(Structure):
_fields_ = [
('sll_family', c_ushort),
('sll_protocol', c_uint16),
('sll_ifindex', c_int),
('sll_hatype', c_ushort),
('sll_pkttype', c_byte),
('sll_halen', c_byte),
('sll_addr', c_byte * 8)]
class union_ifa_ifu(Union):
_fields_ = [
('ifu_broadaddr', POINTER(struct_sockaddr)),
('ifu_dstaddr', POINTER(struct_sockaddr)),]
class struct_ifaddrs(Structure):
pass
struct_ifaddrs._fields_ = [
('ifa_next', POINTER(struct_ifaddrs)),
('ifa_name', c_char_p),
('ifa_flags', c_uint),
('ifa_addr', POINTER(struct_sockaddr)),
('ifa_netmask', POINTER(struct_sockaddr)),
('ifa_ifu', union_ifa_ifu),
('ifa_data', c_void_p),]
libc = ctypes.CDLL(ctypes.util.find_library('c'))
def ifap_iter(ifap):
ifa = ifap.contents
while True:
yield ifa
if not ifa.ifa_next:
break
ifa = ifa.ifa_next.contents
def get_mac():
ifap = POINTER(struct_ifaddrs)()
result = libc.getifaddrs(pointer(ifap))
if result != 0:
raise OSError(get_errno())
del result
try:
for ifa in ifap_iter(ifap):
if ifa.ifa_flags & IFF_LOOPBACK:
continue
sa = cast(ifa.ifa_addr, POINTER(struct_sockaddr_ll)).contents
if sa.sll_family != AF_PACKET:
continue
mac = ':'.join("%02x" % (x & 0xff) for x in sa.sll_addr[:sa.sll_halen])
return mac
finally:
libc.freeifaddrs(ifap)
if __name__ == '__main__':
print get_mac()
|
apache-2.0
|
saurabh6790/OFF-RISLIB
|
webnotes/install_lib/setup_public_folder.py
|
34
|
1160
|
# Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
# make public folders
from __future__ import unicode_literals
import os
import webnotes
def make(site=None):
"""make public folder symlinks if missing"""
from webnotes.utils import get_site_base_path, get_base_path, get_path
webnotes.init(site=site)
site_path = get_site_base_path() if site else get_base_path()
# setup standard folders
for param in (("public_path", "public"), ("backup_path", "public/backups"), ("files_path", "public/files")):
path = os.path.join(site_path, webnotes.conf.get(param[0], param[1]))
if not os.path.exists(path):
os.mkdir(path)
# setup js and css folders
if not site:
for folder in ("js", "css"):
path = get_path(webnotes.conf.get("public_path", "public"), folder)
if not os.path.exists(path):
os.mkdir(path)
os.chdir(webnotes.conf.get("public_path", "public"))
symlinks = [
["app", "../app/public"],
["lib", "../lib/public"],
]
for link in symlinks:
if not os.path.exists(link[0]) and os.path.exists(link[1]):
os.symlink(link[1], link[0])
os.chdir("..")
|
mit
|
KeepSafe/aiohttp
|
examples/background_tasks.py
|
3
|
1813
|
#!/usr/bin/env python3
"""Example of aiohttp.web.Application.on_startup signal handler"""
import asyncio
import aioredis
from aiohttp import web
async def websocket_handler(request):
ws = web.WebSocketResponse()
await ws.prepare(request)
request.app['websockets'].append(ws)
try:
async for msg in ws:
print(msg)
await asyncio.sleep(1)
finally:
request.app['websockets'].remove(ws)
return ws
async def on_shutdown(app):
for ws in app['websockets']:
await ws.close(code=999, message='Server shutdown')
async def listen_to_redis(app):
try:
sub = await aioredis.create_redis(('localhost', 6379), loop=app.loop)
ch, *_ = await sub.subscribe('news')
async for msg in ch.iter(encoding='utf-8'):
# Forward message to all connected websockets:
for ws in app['websockets']:
await ws.send_str('{}: {}'.format(ch.name, msg))
print("message in {}: {}".format(ch.name, msg))
except asyncio.CancelledError:
pass
finally:
print('Cancel Redis listener: close connection...')
await sub.unsubscribe(ch.name)
await sub.quit()
print('Redis connection closed.')
async def start_background_tasks(app):
app['redis_listener'] = app.loop.create_task(listen_to_redis(app))
async def cleanup_background_tasks(app):
print('cleanup background tasks...')
app['redis_listener'].cancel()
await app['redis_listener']
def init():
app = web.Application()
app['websockets'] = []
app.router.add_get('/news', websocket_handler)
app.on_startup.append(start_background_tasks)
app.on_cleanup.append(cleanup_background_tasks)
app.on_shutdown.append(on_shutdown)
return app
web.run_app(init())
|
apache-2.0
|
wersoo/omim
|
3party/freetype/src/tools/docmaker/docbeauty.py
|
877
|
2642
|
#!/usr/bin/env python
#
# DocBeauty (c) 2003, 2004, 2008 David Turner <[email protected]>
#
# This program is used to beautify the documentation comments used
# in the FreeType 2 public headers.
#
from sources import *
from content import *
from utils import *
import utils
import sys, os, time, string, getopt
content_processor = ContentProcessor()
def beautify_block( block ):
if block.content:
content_processor.reset()
markups = content_processor.process_content( block.content )
text = []
first = 1
for markup in markups:
text.extend( markup.beautify( first ) )
first = 0
# now beautify the documentation "borders" themselves
lines = [" /*************************************************************************"]
for l in text:
lines.append( " *" + l )
lines.append( " */" )
block.lines = lines
def usage():
print "\nDocBeauty 0.1 Usage information\n"
print " docbeauty [options] file1 [file2 ...]\n"
print "using the following options:\n"
print " -h : print this page"
print " -b : backup original files with the 'orig' extension"
print ""
print " --backup : same as -b"
def main( argv ):
"""main program loop"""
global output_dir
try:
opts, args = getopt.getopt( sys.argv[1:], \
"hb", \
["help", "backup"] )
except getopt.GetoptError:
usage()
sys.exit( 2 )
if args == []:
usage()
sys.exit( 1 )
# process options
#
output_dir = None
do_backup = None
for opt in opts:
if opt[0] in ( "-h", "--help" ):
usage()
sys.exit( 0 )
if opt[0] in ( "-b", "--backup" ):
do_backup = 1
# create context and processor
source_processor = SourceProcessor()
# retrieve the list of files to process
file_list = make_file_list( args )
for filename in file_list:
source_processor.parse_file( filename )
for block in source_processor.blocks:
beautify_block( block )
new_name = filename + ".new"
ok = None
try:
file = open( new_name, "wt" )
for block in source_processor.blocks:
for line in block.lines:
file.write( line )
file.write( "\n" )
file.close()
except:
ok = 0
# if called from the command line
#
if __name__ == '__main__':
main( sys.argv )
# eof
|
apache-2.0
|
southpawtech/TACTIC-DEV
|
src/pyasm/prod/prod_test.py
|
6
|
1795
|
#!/usr/bin/python
###########################################################
#
# Copyright (c) 2005, Southpaw Technology
# All Rights Reserved
#
# PROPRIETARY INFORMATION. This software is proprietary to
# Southpaw Technology, and is not to be reproduced, transmitted,
# or disclosed in any way without written permission.
#
#
#
from pyasm.search import *
from pyasm.prod.biz import *
import unittest
class ProdTest(unittest.TestCase):
def setUp(my):
db = DbContainer.get("prod")
db.start()
def tearDown(my):
db = DbContainer.get("prod")
db.rollback()
#db.commit()
def create_production(my):
'''function to create a test production'''
# create the sequence
seq_code = "000TST"
seq_desc = "Test Sequence"
sequence = Sequence.create(seq_code,seq_desc)
# create a shot
shot_code = "0000"
shot_desc = "Test Shot"
shot = Shot.create(sequence, shot_code, shot_desc)
# create some assets
cow_code = "chr998"
cow_name = "cow"
cow_asset_type = "chr"
cow_desc = "It's a cow!"
cow = Asset.create( cow_code, cow_name, cow_asset_type, cow_desc )
pig_code = "chr999"
pig_name = "pig"
pig_asset_type = "chr"
pig_desc = "It's a pig!"
pig = Asset.create( pig_code, pig_name, pig_asset_type, pig_desc )
# add these assets to the shot
shot.add_asset(cow,"cow_left")
shot.add_asset(cow,"cow_right")
shot.add_asset(pig,"piggy")
instances = shot.get_all_instances()
my.assertEquals(3, len(instances))
def test_prod(my):
my.create_production()
if __name__ == "__main__":
unittest.main()
|
epl-1.0
|
YACOWS/opps
|
opps/articles/admin.py
|
3
|
3965
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from django.contrib import admin
from django.utils.translation import ugettext_lazy as _
from .models import Post, PostRelated, Album, Link
from .forms import PostAdminForm, AlbumAdminForm, LinkAdminForm
from opps.containers.admin import (
ContainerAdmin, ContainerImageInline, ContainerRelatedInline)
from opps.core.admin import apply_opps_rules, HaystackModelAdmin
from opps.core.permissions.admin import AdminViewPermission
@apply_opps_rules('articles')
class PostRelatedInline(admin.TabularInline):
model = PostRelated
fk_name = 'post'
raw_id_fields = ['related']
actions = None
ordering = ('order',)
extra = 1
classes = ('collapse',)
verbose_name = _(u'Related post')
verbose_name_plural = _(u'Related posts')
@apply_opps_rules('articles')
class PostAdmin(HaystackModelAdmin, ContainerAdmin, AdminViewPermission):
form = PostAdminForm
inlines = [ContainerImageInline, ContainerRelatedInline]
search_fields = ['title', 'headline', 'slug', 'channel_name']
raw_id_fields = ['main_image', 'channel',
'mirror_channel', 'albums']
ordering = ('-date_available',)
fieldsets = (
(_(u'Identification'), {
'fields': ('site', 'title', 'slug', 'get_http_absolute_url',
'short_url')}),
(_(u'Content'), {
'fields': ('hat', 'short_title', 'headline', 'content',
('main_image', 'main_image_caption',
'image_thumb'), 'source', 'tags')}),
(_(u'Custom'), {
'fields': ('json',)}),
(_(u'Relationships'), {
'fields': ('channel', 'mirror_channel', 'albums',)}),
(_(u'Publication'), {
'classes': ('extrapretty'),
'fields': ('published', 'date_available',
'show_on_root_channel', 'in_containerboxes')}),
)
@apply_opps_rules('articles')
class AlbumAdmin(HaystackModelAdmin, ContainerAdmin, AdminViewPermission):
form = AlbumAdminForm
inlines = [ContainerImageInline, ContainerRelatedInline]
list_display = ['title', 'channel', 'images_count',
'date_available', 'published', 'preview_url']
fieldsets = (
(_(u'Identification'), {
'fields': ('site', 'title', 'slug', 'get_http_absolute_url',
'short_url',)}),
(_(u'Content'), {
'fields': ('hat', 'short_title', 'headline',
('main_image', 'main_image_caption',
'image_thumb'), 'tags')}),
(_(u'Custom'), {
'fields': ('json',)}),
(_(u'Relationships'), {
'fields': ('channel', 'mirror_channel')}),
(_(u'Publication'), {
'classes': ('extrapretty'),
'fields': ('published', 'date_available',
'show_on_root_channel')}),
)
@apply_opps_rules('articles')
class LinkAdmin(ContainerAdmin, AdminViewPermission):
form = LinkAdminForm
raw_id_fields = ['container', 'channel',
'mirror_channel', 'main_image']
fieldsets = (
(_(u'Identification'), {
'fields': ('site', 'title', 'slug', 'get_http_absolute_url',
'short_url',)}),
(_(u'Content'), {
'fields': ('hat', 'short_title', 'headline', 'url', 'container',
('main_image', 'main_image_caption',
'image_thumb'), 'tags')}),
(_(u'Custom'), {
'fields': ('json',)}),
(_(u'Relationships'), {
'fields': ('channel', 'mirror_channel')}),
(_(u'Publication'), {
'classes': ('extrapretty'),
'fields': ('published', 'date_available',
'show_on_root_channel')}),
)
admin.site.register(Post, PostAdmin)
admin.site.register(Album, AlbumAdmin)
admin.site.register(Link, LinkAdmin)
|
mit
|
jacquerie/inspire-next
|
inspirehep/modules/arxiv/utils.py
|
6
|
1264
|
# -*- coding: utf-8 -*-
#
# This file is part of INSPIRE.
# Copyright (C) 2014-2017 CERN.
#
# INSPIRE is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# INSPIRE is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with INSPIRE. If not, see <http://www.gnu.org/licenses/>.
#
# In applying this license, CERN does not waive the privileges and immunities
# granted to it by virtue of its status as an Intergovernmental Organization
# or submit itself to any jurisdiction.
from __future__ import absolute_import, division, print_function
def etree_to_dict(tree):
"""Translate etree into dictionary.
:param tree: etree dictionary object
:type tree: <http://lxml.de/api/lxml.etree-module.html>
"""
d = {tree.tag.split('}')[1]: map(
etree_to_dict, tree.iterchildren()
) or tree.text}
return d
|
gpl-3.0
|
danbob123/profitpy
|
profit/lib/widgets/plotitemdialog.py
|
18
|
27336
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2007 Troy Melhase <[email protected]>
# Distributed under the terms of the GNU General Public License v2
##
#
# This module defines the PlotItemDialog class for edit and display
# of plot item pens and plot curves.
#
##
from PyQt4.QtCore import QVariant, Qt, pyqtSignature
from PyQt4.QtGui import QBrush, QColor, QColorDialog, QDialog, QIcon
from PyQt4.QtGui import QPainter, QPen, QPixmap
from PyQt4.Qwt5 import QwtPlot, QwtPlotCurve, QwtPlotMarker, QwtSymbol
from profit.lib import Settings
from profit.lib.gui import colorIcon, complementColor
from profit.lib.widgets.ui_plotitemdialog import Ui_PlotItemDialog
penStyles = [
(Qt.SolidLine, 'Solid'),
(Qt.DashLine, 'Dash'),
(Qt.DotLine, 'Dot'),
(Qt.DashDotLine, 'Dash Dot'),
(Qt.DashDotDotLine, 'Dash Dot Dot'),
]
lineStyles = [
(QwtPlotCurve.NoCurve, 'No Line'),
(QwtPlotCurve.Lines, 'Line'),
(QwtPlotCurve.Sticks, 'Sticks'),
(QwtPlotCurve.Steps, 'Steps'),
(QwtPlotCurve.Dots, 'Dots'),
]
symbolStyles = [
(QwtSymbol.NoSymbol, 'No Symbol'),
(QwtSymbol.Ellipse, 'Ellipse'),
(QwtSymbol.Rect, 'Rectangle'),
(QwtSymbol.Diamond, 'Diamond'),
(QwtSymbol.Triangle, 'Triangle'),
(QwtSymbol.DTriangle, 'Triangle Down'),
(QwtSymbol.UTriangle, 'Triangle Up'),
(QwtSymbol.LTriangle, 'Triangle Left'),
(QwtSymbol.RTriangle, 'Triangle Right'),
(QwtSymbol.Cross, 'Cross'),
(QwtSymbol.XCross, 'Cross Diagonal'),
(QwtSymbol.HLine, 'Line Horizontal'),
(QwtSymbol.VLine, 'Line Vertical'),
(QwtSymbol.Star1, 'Star 1'),
(QwtSymbol.Star2, 'Star 2'),
(QwtSymbol.Hexagon, 'Hexagon'),
]
brushStyles = [
(Qt.NoBrush, 'None'),
(Qt.SolidPattern, 'Solid'),
(Qt.Dense1Pattern, 'Extremely Dense'),
(Qt.Dense2Pattern, 'Very Dense'),
(Qt.Dense3Pattern, 'Somewhat Dense'),
(Qt.Dense4Pattern, 'Half Dense'),
(Qt.Dense5Pattern, 'Somewhat Sparse'),
(Qt.Dense6Pattern, 'Very Sparse'),
(Qt.Dense7Pattern, 'Extremely Sparse'),
(Qt.HorPattern, 'Horizontal Lines'),
(Qt.VerPattern, 'Vertical Lines'),
(Qt.CrossPattern, 'Crossing Horizontal and Vertical Lines'),
(Qt.BDiagPattern, 'Backward Diagonal Lines'),
(Qt.FDiagPattern, 'Forward Diagonal Lines'),
(Qt.DiagCrossPattern, 'Crossing Diagonal Lines'),
]
class PenStylePixmap(QPixmap):
""" Pixmap type for creating pen style icons.
"""
def __init__(self):
""" Constructor.
"""
QPixmap.__init__(self, 32, 18)
self.fill(QColor(Qt.white))
def paintStyle(self, painter, style):
""" Draws an example of specified pen style.
@param painter QPainter instance
@param style QPen style
@return None
"""
painter.begin(self)
pen = QPen(style)
pen.setWidth(2)
painter.setPen(pen)
ymid = self.height() / 2
painter.drawLine(0, ymid, self.width(), ymid)
painter.end()
class BrushStylePixmap(QPixmap):
""" Pixmap type for creating brush style icons.
"""
def __init__(self):
""" Constructor.
"""
QPixmap.__init__(self, 32, 18)
self.fill(QColor(Qt.black))
def paintStyle(self, painter, style):
""" Draws an example of specified brush style.
@param painter QPainter instance
@param style QBrush style
@return None
"""
white = QColor(Qt.white)
brush = QBrush(style)
brush.setColor(white)
pen = QPen(white)
pen.setWidth(2)
painter.begin(self)
painter.setBrush(brush)
painter.setPen(pen)
painter.drawRect(0, 0, self.width(), self.height())
painter.end()
class LineStylePixmap(QPixmap):
""" Pixmap type for creating plot curve style icons.
"""
def __init__(self):
""" Constructor.
"""
QPixmap.__init__(self, 18, 18)
self.fill(QColor(Qt.white))
def paintStyle(self, painter, style):
""" Draws an example of specified curve style.
@param painter QPainter instance; ignored
@param style QwtCurve style
@return None
"""
plot = SamplePlot()
plot.curve.setStyle(style)
plot.resize(self.size())
plot.replot()
plot.print_(self)
class SymbolStylePixmap(QPixmap):
""" Pixmap type for creating plot symbol style icons.
"""
def __init__(self):
""" Constructor.
"""
QPixmap.__init__(self, 18, 18)
self.fill(QColor(Qt.white))
def paintStyle(self, painter, style):
""" Draws an example of specified plot symbol style.
@param painter QPainter instance
@param style QwtSymbol style
@return None
"""
brush = QBrush(QColor(Qt.white))
pen = QPen(QColor(Qt.black))
size = self.size()
symbol = QwtSymbol(style, brush, pen, size)
painter.begin(self)
rect = self.rect()
rect.adjust(2, 2, -2, -2)
symbol.draw(painter, rect)
painter.end()
class SamplePlot(QwtPlot):
""" Plot type with simplified appearance and builtin data.
"""
y = [0, 1, 0.5, 1.5]
x = range(len(y))
def __init__(self):
""" Constructor.
"""
QwtPlot.__init__(self)
self.setupPlot(self)
@classmethod
def setupPlot(cls, plot):
""" Configure a plot widget.
The Qt Designer tool doesn't provide a way to promote QwtPlot
widgets to be promoted to custom widgets, so this class method
is provided to operate on instances created in a ui file.
@param plot QwtPlot instance
@return None
"""
plot.enableAxis(plot.yLeft, False)
plot.enableAxis(plot.xBottom, False)
plot.setCanvasBackground(QColor(Qt.white))
canvas = plot.canvas()
canvas.setFrameStyle(canvas.NoFrame)
plot.curve = QwtPlotCurve()
plot.curve.attach(plot)
plot.curve.setData(cls.x, cls.y)
pen = QPen(Qt.black)
pen.setWidth(0)
plot.curve.setPen(pen)
def comboCurrentData(combo, cast):
""" Locates current item data in a combobox and returns a cast for it.
@param combo QComboBox instance
@param cast type cast for item data
@return combobox current item data
"""
data = combo.itemData(combo.currentIndex()).toInt()[0]
return cast(data)
def fillStyleFunction(pixmapType, stylesDefault):
""" Creates function for filling combo with style names, icons, and data.
@param pixmapType class for rendering style icons
@param stylesDefault default style parameter value for returned function
@return function for populating a style combobox
"""
def fillFunction(combo, current, styles=stylesDefault):
""" Populates combobox with style names, icons, and data.
@param combo QComboBox instance
@param current style to set as current item
@return None
"""
if combo.count():
index = combo.findData(QVariant(current))
combo.setCurrentIndex(index)
else:
painter = QPainter()
for index, (style, name) in enumerate(styles):
pixmap = pixmapType()
pixmap.paintStyle(painter, style)
combo.addItem(QIcon(pixmap), name, QVariant(style))
if style == current:
combo.setCurrentIndex(index)
combo.setIconSize(pixmap.size())
return fillFunction
##
# Function for populating a pen style combobox.
fillPenStyles = fillStyleFunction(PenStylePixmap, penStyles)
##
# Function for populating a brush style combobox.
fillBrushStyles = fillStyleFunction(BrushStylePixmap, brushStyles)
##
# Function for populating a curve line style combobox.
fillLineStyles = fillStyleFunction(LineStylePixmap, lineStyles)
##
# Function for populating a symbol style combobox.
fillSymbolStyles = fillStyleFunction(SymbolStylePixmap, symbolStyles)
class PlotItemDialog(QDialog, Ui_PlotItemDialog):
""" Dialog for editing plot item pens and plot curves.
Clients can use the 'applyToCurve' method after the dialog is
shown to update the curve to match the dialog.
"""
def __init__(self, item, marker=None, parent=None):
""" Constructor.
@param item QwtMarker, QwtPlotCurve or QPen instance
@param parent ancestor of this widget
"""
QDialog.__init__(self, parent)
self.setupUi(self)
if isinstance(item, QwtPlotCurve):
if isinstance(marker, QwtPlotMarker):
self.setupFromCurve(item)
self.setupFromMarker(marker)
self.sectionList.takeItem(1)
self.sectionStack.removeWidget(self.curvePage)
self.setWindowTitle('Edit Plot Marker')
else:
self.setupFromCurve(item)
self.sectionList.takeItem(3)
self.sectionStack.removeWidget(self.linePage)
self.linePage = None
self.setWindowTitle('Edit Plot Curve')
elif isinstance(item, QPen):
self.sectionList.takeItem(3)
self.sectionList.takeItem(2)
self.sectionList.takeItem(1)
self.setupFromPen(item)
self.setWindowTitle('Edit Plot Item Pen')
else:
raise TypeError('item not curve or pen')
def setupFromMarker(self, marker):
""" Configures this dialog for marker display and edit.
@param marker QwtPlotMarker instance
@return None
"""
self.markerSample = sample = QwtPlotMarker()
sample.setLineStyle(marker.lineStyle())
sample.setLinePen(marker.linePen())
sample.setSymbol(marker.symbol())
curve = self.plotSample.curve
sample.setAxis(curve.xAxis(), curve.yAxis())
try:
data = curve.data()
size = data.size()
x, y = data.x(int(size/2)), data.y(int(size/2))
except (Exception, ), ex:
x = y = 1
sample.setValue(x, y)
self.setupPenPage(sample.linePen())
self.setupMarkerPage(sample)
self.setupSymbolPage(sample)
sample.attach(self.plotSample)
def setupFromPen(self, pen):
""" Configures this dialog for only pen display and edit.
@param pen QPen instance
@return None
"""
self.plotSampleGroup.setHidden(True)
self.setupPenPage(pen)
def setupFromCurve(self, curve):
""" Configures this dialog for curve display and edit.
@param curve QwtPlotCurve instance
@return None
"""
self.setupPenPage(curve.pen())
self.setupCurvePage(curve)
self.setupSymbolPage(curve)
SamplePlot.setupPlot(self.plotSample)
self.plotSample.setCanvasBackground(curve.plot().canvasBackground())
data = curve.data()
if data.size():
self.plotSample.curve.setData(data)
self.applyToCurve(self.plotSample.curve)
self.penSampleGroup.setVisible(False)
def applyToCurve(self, curve):
""" Applies values in this dialog to specified curve.
@param curve QwtPlotCurve instance
@return None
"""
curve.setPen(QPen(self.selectedPen))
linestyle = comboCurrentData(self.lineStyle, curve.CurveStyle)
curve.setStyle(linestyle)
curve.setBaseline(self.areaFillBaseline.value())
brush = QBrush()
if self.areaFill.isChecked():
style = comboCurrentData(self.areaFillStyle, Qt.BrushStyle)
brush.setStyle(style)
brush.setColor(self.areaFillColor.color)
curve.setBrush(brush)
if linestyle == QwtPlotCurve.Steps:
curve.setCurveAttribute(curve.Inverted,
self.curveAttributeInverted.checkState()==Qt.Checked)
elif linestyle == QwtPlotCurve.Lines:
curve.setCurveAttribute(curve.Fitted,
self.curveAttributeFitted.checkState()==Qt.Checked)
curve.setPaintAttribute(curve.PaintFiltered,
self.paintAttributeFiltered.checkState()==Qt.Checked)
curve.setPaintAttribute(curve.ClipPolygons,
self.paintAttributeClipPolygons.checkState()==Qt.Checked)
symbol = QwtSymbol()
style = comboCurrentData(self.symbolStyle, symbol.Style)
symbol.setStyle(style)
symbol.setSize(self.symbolWidth.value(), self.symbolHeight.value())
pen = QPen()
pen.setStyle(comboCurrentData(self.symbolPenStyle, Qt.PenStyle))
pen.setColor(self.symbolPenColor.color)
pen.setWidth(self.symbolPenWidth.value())
symbol.setPen(pen)
brush = QBrush()
if self.symbolFill.isChecked():
style = comboCurrentData(self.symbolFillStyle, Qt.BrushStyle)
brush.setStyle(style)
brush.setColor(self.symbolFillColor.color)
symbol.setBrush(brush)
curve.setSymbol(symbol)
def applyToMarker(self, marker):
""" Applies values in this dialog to specified marker.
@param marker QwtPlotMarker instance
@return None
"""
marker.setLinePen(QPen(self.selectedPen))
symbol = QwtSymbol()
style = comboCurrentData(self.symbolStyle, symbol.Style)
symbol.setStyle(style)
symbol.setSize(
self.symbolWidth.value(), self.symbolHeight.value())
pen = QPen()
pen.setStyle(comboCurrentData(self.symbolPenStyle, Qt.PenStyle))
pen.setColor(self.symbolPenColor.color)
pen.setWidth(self.symbolPenWidth.value())
symbol.setPen(pen)
brush = QBrush()
if self.symbolFill.isChecked():
style = comboCurrentData(self.symbolFillStyle, Qt.BrushStyle)
brush.setStyle(style)
brush.setColor(self.symbolFillColor.color)
symbol.setBrush(brush)
marker.setSymbol(symbol)
if self.noLine.isChecked():
style = marker.NoLine
elif self.horizontalLine.isChecked():
style = marker.HLine
elif self.crossLine.isChecked():
style = marker.Cross
else:
style = marker.VLine
marker.setLineStyle(style)
def setupMarkerPage(self, marker):
""" Configures the marker line display and edit page.
@param marker QwtPlotMarker instance
@return None
"""
style = marker.lineStyle()
if style == marker.NoLine:
self.noLine.setChecked(True)
elif style == marker.HLine:
self.horizontalLine.setChecked(True)
elif style == marker.VLine:
self.verticalLine.setChecked(True)
elif style == marker.Cross:
self.crossLine.setChecked(True)
def setupPenPage(self, pen):
""" Configures the pen display and edit page.
@param pen QPen instance
@return None
"""
self.selectedPen = QPen(pen or QPen())
fillPenStyles(self.penStyle, pen.style())
self.penColor.color = color = pen.color()
self.penColor.setIcon(colorIcon(color))
self.penWidth.setValue(pen.width())
self.penSample.installEventFilter(self)
def setupCurvePage(self, curve):
""" Configures the curve display and edit page.
@param curve QwtPlotCurve instance
@return None
"""
brush = curve.brush()
current = brush.style()
fillLineStyles(self.lineStyle, curve.style())
fillBrushStyles(self.areaFillStyle, current)
self.areaFill.setChecked(current != Qt.NoBrush)
self.areaFillColor.color = color = curve.brush().color()
self.areaFillColor.setIcon(colorIcon(color))
self.areaFillBaseline.setValue(curve.baseline())
self.curveAttributeInverted.setChecked(
curve.testCurveAttribute(curve.Inverted))
self.curveAttributeFitted.setChecked(
curve.testCurveAttribute(curve.Fitted))
self.paintAttributeFiltered.setChecked(
curve.testPaintAttribute(curve.PaintFiltered))
self.paintAttributeClipPolygons.setChecked(
curve.testPaintAttribute(curve.ClipPolygons))
def setupSymbolPage(self, item):
""" Configures the symbol display and edit page.
@param curve QwtPlotCurve instance
@return None
"""
symbol = item.symbol()
brush = symbol.brush()
pen = symbol.pen()
fillSymbolStyles(self.symbolStyle, symbol.style())
fillBrushStyles(self.symbolFillStyle, brush.style())
self.symbolFillColor.color = color = brush.color()
self.symbolFillColor.setIcon(colorIcon(color))
self.symbolFill.setChecked(brush != Qt.NoBrush)
fillPenStyles(self.symbolPenStyle, pen.style())
self.symbolPenColor.color = color = pen.color()
self.symbolPenColor.setIcon(colorIcon(color))
self.symbolPenWidth.setValue(pen.width())
size = symbol.size()
w = size.width()
h = size.height()
self.symbolWidth.setValue(w)
self.symbolHeight.setValue(h)
self.symbolSyncSize.setChecked(w==h)
havesymbol = symbol.style() != QwtSymbol.NoSymbol
self.symbolFill.setEnabled(havesymbol)
self.symbolSizeGroup.setEnabled(havesymbol)
self.symbolOutlineGroup.setEnabled(havesymbol)
def updatePlotSample(self):
""" Messages the plot sample to replot if it's visible.
@return None
"""
if self.plotSampleGroup.isVisible():
if self.linePage:
self.applyToMarker(self.markerSample)
else:
self.applyToCurve(self.plotSample.curve)
self.plotSample.replot()
def selectColor(self, widget):
""" Displays color selection dialog for a widget.
@param widget object with 'color' attribute and 'setIcon' method
@return new color if dialog accepted, otherwise None
"""
color = QColorDialog.getColor(widget.color, self)
if color.isValid():
widget.color = color
widget.setIcon(colorIcon(color))
return color
def eventFilter(self, watched, event):
""" Filters framework events for another object.
In this implementation, we trap paint events sent to the pen
sample label widget and draw its contents based on the
currently configured pen options.
@param watched QObject instance
@param event QEvent instance
@return True if event handled, False otherwise
"""
if watched == self.penSample:
if event.type() == event.Paint:
watched.paintEvent(event)
rect = watched.rect()
painter = QPainter()
painter.begin(watched)
comp = complementColor(self.selectedPen.color())
painter.fillRect(rect, QBrush(comp))
x1 = y1 = y2 = rect.height()/2
x2 = rect.width() - y1
painter.setPen(self.selectedPen)
painter.drawLine(x1, y1, x2, y2)
painter.end()
return True
else:
return False
else:
return QDialog.eventFilter(self, watched, event)
## pen page signal handlers
@pyqtSignature('int')
def on_penStyle_activated(self, index):
""" Signal handler for pen style combobox item activation.
@param index row number of selected item.
@return None
"""
value, okay = self.penStyle.itemData(index).toInt()
if okay:
self.selectedPen.setStyle(Qt.PenStyle(value))
self.penSample.update()
self.updatePlotSample()
@pyqtSignature('')
def on_penColor_clicked(self):
""" Signal handler for pen color button clicks.
@return None
"""
color = self.selectColor(self.penColor)
if color:
self.selectedPen.setColor(color)
self.penSample.update()
self.updatePlotSample()
@pyqtSignature('int')
def on_penWidth_valueChanged(self, value):
""" Signal handler for pen width spinbox changes.
@param value new value for spinbox
@return None
"""
self.selectedPen.setWidth(value)
self.penSample.update()
self.updatePlotSample()
## curve page signal handlers
@pyqtSignature('int')
def on_lineStyle_currentIndexChanged(self, index):
""" Signal handler for curve line style combobox item activation.
@param index row number of selected item.
@return None
"""
value, okay = self.lineStyle.itemData(index).toInt()
if okay:
self.curveAttributeInverted.setEnabled(value==QwtPlotCurve.Steps)
self.curveAttributeFitted.setEnabled(value==QwtPlotCurve.Lines)
hascurve = value != QwtPlotCurve.NoCurve
self.areaFill.setEnabled(hascurve)
self.curveAttributesGroup.setEnabled(hascurve)
self.updatePlotSample()
@pyqtSignature('bool')
def on_areaFill_toggled(self, checked):
""" Signal handler for area fill group checkbox toggle.
@param checked new value for checkbox
@return None
"""
self.updatePlotSample()
@pyqtSignature('int')
def on_areaFillStyle_currentIndexChanged(self, index):
""" Signal handler for area fill brush style combobox item activation.
@param index row number of selected item.
@return None
"""
self.updatePlotSample()
@pyqtSignature('')
def on_areaFillColor_clicked(self):
""" Signal handler for area fill color button clicks.
@return None
"""
self.selectColor(self.areaFillColor)
self.updatePlotSample()
@pyqtSignature('double')
def on_areaFillBaseline_valueChanged(self, value):
""" Signal handler for curve baseline spinbox changes.
@param value new value for spinbox
@return None
"""
self.updatePlotSample()
@pyqtSignature('int')
def on_curveAttributeInverted_stateChanged(self, state):
""" Signal handler for inverted curve checkbox state changes.
@return None
"""
self.updatePlotSample()
@pyqtSignature('int')
def on_curveAttributeFitted_stateChanged(self, state):
""" Signal handler for fitted curve checkbox state changes.
@return None
"""
self.updatePlotSample()
@pyqtSignature('int')
def on_paintAttributeFiltered_stateChanged(self, state):
""" Signal handler for paint filtered checkbox state changes.
@return None
"""
self.updatePlotSample()
@pyqtSignature('int')
def on_paintAttributeClipPolygons_stateChanged(self, state):
""" Signal handler for paint clip polys checkbox state changes.
@return None
"""
self.updatePlotSample()
## symbol page signal handlers
@pyqtSignature('int')
def on_symbolStyle_currentIndexChanged(self, index):
""" Signal handler for symbol style combobox item activation.
@param index row number of selected item.
@return None
"""
value, okay = self.symbolStyle.itemData(index).toInt()
if okay:
havesymbol = value != QwtSymbol.NoSymbol
self.symbolFill.setEnabled(havesymbol)
self.symbolSizeGroup.setEnabled(havesymbol)
self.symbolOutlineGroup.setEnabled(havesymbol)
self.updatePlotSample()
@pyqtSignature('bool')
def on_symbolFill_toggled(self, checked):
""" Signal handler for symbol fill group checkbox toggle.
@param checked new value for checkbox
@return None
"""
self.updatePlotSample()
@pyqtSignature('int')
def on_symbolFillStyle_currentIndexChanged(self, index):
""" Signal handler for symbol brush style combobox item activation.
@param index row number of selected item.
@return None
"""
self.updatePlotSample()
@pyqtSignature('')
def on_symbolFillColor_clicked(self):
""" Signal handler for symbol fill color button clicks.
@return None
"""
self.selectColor(self.symbolFillColor)
self.updatePlotSample()
@pyqtSignature('int')
def on_symbolWidth_valueChanged(self, value):
""" Signal handler for symbol width spinbox changes.
@param value new value for spinbox
@return None
"""
if self.symbolSyncSize.checkState() == Qt.Checked:
self.symbolHeight.setValue(value)
self.updatePlotSample()
@pyqtSignature('int')
def on_symbolHeight_valueChanged(self, value):
""" Signal handler for symbol height spinbox changes.
@param value new value for spinbox
@return None
"""
if self.symbolSyncSize.checkState() == Qt.Checked:
self.symbolWidth.setValue(value)
self.updatePlotSample()
@pyqtSignature('int')
def on_symbolSyncSize_stateChanged(self, state):
""" Signal handler for sync symbol w/h checkbox state changes.
@return None
"""
if state == Qt.Checked:
value = max(self.symbolWidth.value(), self.symbolHeight.value())
self.symbolWidth.setValue(value)
self.symbolHeight.setValue(value)
@pyqtSignature('int')
def on_symbolPenStyle_currentIndexChanged(self, index):
""" Signal handler for symbol pen style combobox item activation.
@param index row number of selected item.
@return None
"""
self.updatePlotSample()
@pyqtSignature('')
def on_symbolPenColor_clicked(self):
""" Signal handler for symbol outline color button clicks.
@return None
"""
self.selectColor(self.symbolPenColor)
self.updatePlotSample()
@pyqtSignature('int')
def on_symbolPenWidth_valueChanged(self, value):
""" Signal handler for symbol pen width spinbox changes.
@param value new value for spinbox
@return None
"""
self.updatePlotSample()
@pyqtSignature('bool')
def on_noLine_toggled(self, checked):
""" Signal handler for no marker line radio.
@param checked ignored
@return None
"""
self.updatePlotSample()
@pyqtSignature('bool')
def on_horizontalLine_toggled(self, checked):
""" Signal handler for horizontal marker line radio.
@param checked ignored
@return None
"""
self.updatePlotSample()
@pyqtSignature('bool')
def on_verticalLine_toggled(self, checked):
""" Signal handler for vertical marker line radio.
@param checked ignored
@return None
"""
self.updatePlotSample()
@pyqtSignature('bool')
def on_crossLine_toggled(self, checked):
""" Signal handler for cross marker line radio.
@param checked ignored
@return None
"""
self.updatePlotSample()
|
gpl-2.0
|
cselis86/edx-platform
|
lms/djangoapps/courseware/migrations/0005_auto__add_offlinecomputedgrade__add_unique_offlinecomputedgrade_user_c.py
|
194
|
8410
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'OfflineComputedGrade'
db.create_table('courseware_offlinecomputedgrade', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('user', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['auth.User'])),
('course_id', self.gf('django.db.models.fields.CharField')(max_length=255, db_index=True)),
('created', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, null=True, db_index=True, blank=True)),
('updated', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, db_index=True, blank=True)),
('gradeset', self.gf('django.db.models.fields.TextField')(null=True, blank=True)),
))
db.send_create_signal('courseware', ['OfflineComputedGrade'])
# Adding unique constraint on 'OfflineComputedGrade', fields ['user', 'course_id']
db.create_unique('courseware_offlinecomputedgrade', ['user_id', 'course_id'])
# Adding model 'OfflineComputedGradeLog'
db.create_table('courseware_offlinecomputedgradelog', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('course_id', self.gf('django.db.models.fields.CharField')(max_length=255, db_index=True)),
('created', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, null=True, db_index=True, blank=True)),
('seconds', self.gf('django.db.models.fields.IntegerField')(default=0)),
('nstudents', self.gf('django.db.models.fields.IntegerField')(default=0)),
))
db.send_create_signal('courseware', ['OfflineComputedGradeLog'])
def backwards(self, orm):
# Removing unique constraint on 'OfflineComputedGrade', fields ['user', 'course_id']
db.delete_unique('courseware_offlinecomputedgrade', ['user_id', 'course_id'])
# Deleting model 'OfflineComputedGrade'
db.delete_table('courseware_offlinecomputedgrade')
# Deleting model 'OfflineComputedGradeLog'
db.delete_table('courseware_offlinecomputedgradelog')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'courseware.offlinecomputedgrade': {
'Meta': {'unique_together': "(('user', 'course_id'),)", 'object_name': 'OfflineComputedGrade'},
'course_id': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'db_index': 'True', 'blank': 'True'}),
'gradeset': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'db_index': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'courseware.offlinecomputedgradelog': {
'Meta': {'object_name': 'OfflineComputedGradeLog'},
'course_id': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'db_index': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'nstudents': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'seconds': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'courseware.studentmodule': {
'Meta': {'unique_together': "(('student', 'module_state_key', 'course_id'),)", 'object_name': 'StudentModule'},
'course_id': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'db_index': 'True', 'blank': 'True'}),
'done': ('django.db.models.fields.CharField', [], {'default': "'na'", 'max_length': '8', 'db_index': 'True'}),
'grade': ('django.db.models.fields.FloatField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'max_grade': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'db_index': 'True', 'blank': 'True'}),
'module_state_key': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_column': "'module_id'", 'db_index': 'True'}),
'module_type': ('django.db.models.fields.CharField', [], {'default': "'problem'", 'max_length': '32', 'db_index': 'True'}),
'state': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'student': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
}
}
complete_apps = ['courseware']
|
agpl-3.0
|
maxsands1503/e3chal
|
node_modules/node-gyp/gyp/pylib/gyp/flock_tool.py
|
1835
|
1748
|
#!/usr/bin/env python
# Copyright (c) 2011 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""These functions are executed via gyp-flock-tool when using the Makefile
generator. Used on systems that don't have a built-in flock."""
import fcntl
import os
import struct
import subprocess
import sys
def main(args):
executor = FlockTool()
executor.Dispatch(args)
class FlockTool(object):
"""This class emulates the 'flock' command."""
def Dispatch(self, args):
"""Dispatches a string command to a method."""
if len(args) < 1:
raise Exception("Not enough arguments")
method = "Exec%s" % self._CommandifyName(args[0])
getattr(self, method)(*args[1:])
def _CommandifyName(self, name_string):
"""Transforms a tool name like copy-info-plist to CopyInfoPlist"""
return name_string.title().replace('-', '')
def ExecFlock(self, lockfile, *cmd_list):
"""Emulates the most basic behavior of Linux's flock(1)."""
# Rely on exception handling to report errors.
# Note that the stock python on SunOS has a bug
# where fcntl.flock(fd, LOCK_EX) always fails
# with EBADF, that's why we use this F_SETLK
# hack instead.
fd = os.open(lockfile, os.O_WRONLY|os.O_NOCTTY|os.O_CREAT, 0666)
if sys.platform.startswith('aix'):
# Python on AIX is compiled with LARGEFILE support, which changes the
# struct size.
op = struct.pack('hhIllqq', fcntl.F_WRLCK, 0, 0, 0, 0, 0, 0)
else:
op = struct.pack('hhllhhl', fcntl.F_WRLCK, 0, 0, 0, 0, 0, 0)
fcntl.fcntl(fd, fcntl.F_SETLK, op)
return subprocess.call(cmd_list)
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
|
gpl-2.0
|
BinPy/BinPy
|
BinPy/examples/source/ic/Series_4000/IC4025.py
|
5
|
1254
|
# -*- coding: utf-8 -*-
# <nbformat>3.0</nbformat>
# <headingcell level=2>
# Usage of IC 4025
# <codecell>
from __future__ import print_function
from BinPy import *
# <codecell>
# Usage of IC 4025:
ic = IC_4025()
print(ic.__doc__)
# <codecell>
# The Pin configuration is:
inp = {1: 1, 2: 1, 3: 0, 4: 0, 5: 0, 7: 0, 8: 1, 11: 0, 12: 1, 13: 1, 14: 1}
# Pin initinalization
# Powering up the IC - using -- ic.setIC({14: 1, 7: 0})
ic.setIC({14: 1, 7: 0})
# Setting the inputs of the ic
ic.setIC(inp)
# Draw the IC with the current configuration\n
ic.drawIC()
# <codecell>
# Run the IC with the current configuration using -- print ic.run() --
# Note that the ic.run() returns a dict of pin configuration similar to
print (ic.run())
# <codecell>
# Seting the outputs to the current IC configuration using --
# ic.setIC(ic.run()) --\n
ic.setIC(ic.run())
# Draw the final configuration
ic.drawIC()
# <codecell>
# Seting the outputs to the current IC configuration using --
# ic.setIC(ic.run()) --
ic.setIC(ic.run())
# Draw the final configuration
ic.drawIC()
# Run the IC
print (ic.run())
# <codecell>
# Connector Outputs
c = Connector()
# Set the output connector to a particular pin of the ic
ic.setOutput(9, c)
print(c)
|
bsd-3-clause
|
PriceChild/ansible
|
lib/ansible/modules/network/f5/bigip_irule.py
|
51
|
11120
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright 2016 F5 Networks Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: bigip_irule
short_description: Manage iRules across different modules on a BIG-IP.
description:
- Manage iRules across different modules on a BIG-IP.
version_added: "2.2"
options:
content:
description:
- When used instead of 'src', sets the contents of an iRule directly to
the specified value. This is for simple values, but can be used with
lookup plugins for anything complex or with formatting. Either one
of C(src) or C(content) must be provided.
module:
description:
- The BIG-IP module to add the iRule to.
required: true
choices:
- ltm
- gtm
partition:
description:
- The partition to create the iRule on.
required: false
default: Common
name:
description:
- The name of the iRule.
required: true
src:
description:
- The iRule file to interpret and upload to the BIG-IP. Either one
of C(src) or C(content) must be provided.
required: true
state:
description:
- Whether the iRule should exist or not.
required: false
default: present
choices:
- present
- absent
notes:
- Requires the f5-sdk Python package on the host. This is as easy as
pip install f5-sdk.
extends_documentation_fragment: f5
requirements:
- f5-sdk
author:
- Tim Rupp (@caphrim007)
'''
EXAMPLES = '''
- name: Add the iRule contained in templated irule.tcl to the LTM module
bigip_irule:
content: "{{ lookup('template', 'irule-template.tcl') }}"
module: "ltm"
name: "MyiRule"
password: "secret"
server: "lb.mydomain.com"
state: "present"
user: "admin"
delegate_to: localhost
- name: Add the iRule contained in static file irule.tcl to the LTM module
bigip_irule:
module: "ltm"
name: "MyiRule"
password: "secret"
server: "lb.mydomain.com"
src: "irule-static.tcl"
state: "present"
user: "admin"
delegate_to: localhost
'''
RETURN = '''
module:
description: The module that the iRule was added to
returned: changed and success
type: string
sample: "gtm"
src:
description: The filename that included the iRule source
returned: changed and success, when provided
type: string
sample: "/opt/src/irules/example1.tcl"
name:
description: The name of the iRule that was managed
returned: changed and success
type: string
sample: "my-irule"
content:
description: The content of the iRule that was managed
returned: changed and success
type: string
sample: "when LB_FAILED { set wipHost [LB::server addr] }"
partition:
description: The partition in which the iRule was managed
returned: changed and success
type: string
sample: "Common"
'''
try:
from f5.bigip import ManagementRoot
from icontrol.session import iControlUnexpectedHTTPError
HAS_F5SDK = True
except ImportError:
HAS_F5SDK = False
MODULES = ['gtm', 'ltm']
class BigIpiRule(object):
def __init__(self, *args, **kwargs):
if not HAS_F5SDK:
raise F5ModuleError("The python f5-sdk module is required")
if kwargs['state'] != 'absent':
if not kwargs['content'] and not kwargs['src']:
raise F5ModuleError(
"Either 'content' or 'src' must be provided"
)
source = kwargs['src']
if source:
with open(source) as f:
kwargs['content'] = f.read()
# The params that change in the module
self.cparams = dict()
# Stores the params that are sent to the module
self.params = kwargs
self.api = ManagementRoot(kwargs['server'],
kwargs['user'],
kwargs['password'],
port=kwargs['server_port'])
def flush(self):
result = dict()
state = self.params['state']
try:
if state == "present":
changed = self.present()
elif state == "absent":
changed = self.absent()
except iControlUnexpectedHTTPError as e:
raise F5ModuleError(str(e))
result.update(**self.cparams)
result.update(dict(changed=changed))
return result
def read(self):
"""Read information and transform it
The values that are returned by BIG-IP in the f5-sdk can have encoding
attached to them as well as be completely missing in some cases.
Therefore, this method will transform the data from the BIG-IP into a
format that is more easily consumable by the rest of the class and the
parameters that are supported by the module.
"""
p = dict()
name = self.params['name']
partition = self.params['partition']
module = self.params['module']
if module == 'ltm':
r = self.api.tm.ltm.rules.rule.load(
name=name,
partition=partition
)
elif module == 'gtm':
r = self.api.tm.gtm.rules.rule.load(
name=name,
partition=partition
)
if hasattr(r, 'apiAnonymous'):
p['content'] = str(r.apiAnonymous.strip())
p['name'] = name
return p
def delete(self):
params = dict()
check_mode = self.params['check_mode']
module = self.params['module']
params['name'] = self.params['name']
params['partition'] = self.params['partition']
self.cparams = camel_dict_to_snake_dict(params)
if check_mode:
return True
if module == 'ltm':
r = self.api.tm.ltm.rules.rule.load(**params)
r.delete()
elif module == 'gtm':
r = self.api.tm.gtm.rules.rule.load(**params)
r.delete()
if self.exists():
raise F5ModuleError("Failed to delete the iRule")
return True
def exists(self):
name = self.params['name']
partition = self.params['partition']
module = self.params['module']
if module == 'ltm':
return self.api.tm.ltm.rules.rule.exists(
name=name,
partition=partition
)
elif module == 'gtm':
return self.api.tm.gtm.rules.rule.exists(
name=name,
partition=partition
)
def present(self):
if self.exists():
return self.update()
else:
return self.create()
def update(self):
params = dict()
current = self.read()
changed = False
check_mode = self.params['check_mode']
content = self.params['content']
name = self.params['name']
partition = self.params['partition']
module = self.params['module']
if content is not None:
content = content.strip()
if 'content' in current:
if content != current['content']:
params['apiAnonymous'] = content
else:
params['apiAnonymous'] = content
if params:
changed = True
params['name'] = name
params['partition'] = partition
self.cparams = camel_dict_to_snake_dict(params)
if 'api_anonymous' in self.cparams:
self.cparams['content'] = self.cparams.pop('api_anonymous')
if self.params['src']:
self.cparams['src'] = self.params['src']
if check_mode:
return changed
else:
return changed
if module == 'ltm':
d = self.api.tm.ltm.rules.rule.load(
name=name,
partition=partition
)
d.update(**params)
d.refresh()
elif module == 'gtm':
d = self.api.tm.gtm.rules.rule.load(
name=name,
partition=partition
)
d.update(**params)
d.refresh()
return True
def create(self):
params = dict()
check_mode = self.params['check_mode']
content = self.params['content']
name = self.params['name']
partition = self.params['partition']
module = self.params['module']
if check_mode:
return True
if content is not None:
params['apiAnonymous'] = content.strip()
params['name'] = name
params['partition'] = partition
self.cparams = camel_dict_to_snake_dict(params)
if 'api_anonymous' in self.cparams:
self.cparams['content'] = self.cparams.pop('api_anonymous')
if self.params['src']:
self.cparams['src'] = self.params['src']
if check_mode:
return True
if module == 'ltm':
d = self.api.tm.ltm.rules.rule
d.create(**params)
elif module == 'gtm':
d = self.api.tm.gtm.rules.rule
d.create(**params)
if not self.exists():
raise F5ModuleError("Failed to create the iRule")
return True
def absent(self):
changed = False
if self.exists():
changed = self.delete()
return changed
def main():
argument_spec = f5_argument_spec()
meta_args = dict(
content=dict(required=False, default=None),
src=dict(required=False, default=None),
name=dict(required=True),
module=dict(required=True, choices=MODULES)
)
argument_spec.update(meta_args)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True,
mutually_exclusive=[
['content', 'src']
]
)
try:
obj = BigIpiRule(check_mode=module.check_mode, **module.params)
result = obj.flush()
module.exit_json(**result)
except F5ModuleError as e:
module.fail_json(msg=str(e))
from ansible.module_utils.basic import *
from ansible.module_utils.ec2 import camel_dict_to_snake_dict
from ansible.module_utils.f5_utils import *
if __name__ == '__main__':
main()
|
gpl-3.0
|
bop/bauhaus
|
lib/python2.7/site-packages/distribute-0.6.24-py2.7.egg/setuptools/command/setopt.py
|
167
|
5053
|
import distutils, os
from setuptools import Command
from distutils.util import convert_path
from distutils import log
from distutils.errors import *
__all__ = ['config_file', 'edit_config', 'option_base', 'setopt']
def config_file(kind="local"):
"""Get the filename of the distutils, local, global, or per-user config
`kind` must be one of "local", "global", or "user"
"""
if kind=='local':
return 'setup.cfg'
if kind=='global':
return os.path.join(
os.path.dirname(distutils.__file__),'distutils.cfg'
)
if kind=='user':
dot = os.name=='posix' and '.' or ''
return os.path.expanduser(convert_path("~/%spydistutils.cfg" % dot))
raise ValueError(
"config_file() type must be 'local', 'global', or 'user'", kind
)
def edit_config(filename, settings, dry_run=False):
"""Edit a configuration file to include `settings`
`settings` is a dictionary of dictionaries or ``None`` values, keyed by
command/section name. A ``None`` value means to delete the entire section,
while a dictionary lists settings to be changed or deleted in that section.
A setting of ``None`` means to delete that setting.
"""
from ConfigParser import RawConfigParser
log.debug("Reading configuration from %s", filename)
opts = RawConfigParser()
opts.read([filename])
for section, options in settings.items():
if options is None:
log.info("Deleting section [%s] from %s", section, filename)
opts.remove_section(section)
else:
if not opts.has_section(section):
log.debug("Adding new section [%s] to %s", section, filename)
opts.add_section(section)
for option,value in options.items():
if value is None:
log.debug("Deleting %s.%s from %s",
section, option, filename
)
opts.remove_option(section,option)
if not opts.options(section):
log.info("Deleting empty [%s] section from %s",
section, filename)
opts.remove_section(section)
else:
log.debug(
"Setting %s.%s to %r in %s",
section, option, value, filename
)
opts.set(section,option,value)
log.info("Writing %s", filename)
if not dry_run:
f = open(filename,'w'); opts.write(f); f.close()
class option_base(Command):
"""Abstract base class for commands that mess with config files"""
user_options = [
('global-config', 'g',
"save options to the site-wide distutils.cfg file"),
('user-config', 'u',
"save options to the current user's pydistutils.cfg file"),
('filename=', 'f',
"configuration file to use (default=setup.cfg)"),
]
boolean_options = [
'global-config', 'user-config',
]
def initialize_options(self):
self.global_config = None
self.user_config = None
self.filename = None
def finalize_options(self):
filenames = []
if self.global_config:
filenames.append(config_file('global'))
if self.user_config:
filenames.append(config_file('user'))
if self.filename is not None:
filenames.append(self.filename)
if not filenames:
filenames.append(config_file('local'))
if len(filenames)>1:
raise DistutilsOptionError(
"Must specify only one configuration file option",
filenames
)
self.filename, = filenames
class setopt(option_base):
"""Save command-line options to a file"""
description = "set an option in setup.cfg or another config file"
user_options = [
('command=', 'c', 'command to set an option for'),
('option=', 'o', 'option to set'),
('set-value=', 's', 'value of the option'),
('remove', 'r', 'remove (unset) the value'),
] + option_base.user_options
boolean_options = option_base.boolean_options + ['remove']
def initialize_options(self):
option_base.initialize_options(self)
self.command = None
self.option = None
self.set_value = None
self.remove = None
def finalize_options(self):
option_base.finalize_options(self)
if self.command is None or self.option is None:
raise DistutilsOptionError("Must specify --command *and* --option")
if self.set_value is None and not self.remove:
raise DistutilsOptionError("Must specify --set-value or --remove")
def run(self):
edit_config(
self.filename, {
self.command: {self.option.replace('-','_'):self.set_value}
},
self.dry_run
)
|
mit
|
jolyonb/edx-platform
|
lms/djangoapps/edxnotes/helpers.py
|
1
|
15847
|
"""
Helper methods related to EdxNotes.
"""
from __future__ import absolute_import
import json
import logging
from datetime import datetime
from json import JSONEncoder
from uuid import uuid4
import requests
import six
from six.moves.urllib.parse import urlencode, urlparse, parse_qs # pylint: disable=import-error
from dateutil.parser import parse as dateutil_parse
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.urls import reverse
from django.utils.translation import ugettext as _
from oauth2_provider.models import Application
from opaque_keys.edx.keys import UsageKey
from requests.exceptions import RequestException
from courseware.access import has_access
from courseware.courses import get_current_child
from edxnotes.exceptions import EdxNotesParseError, EdxNotesServiceUnavailable
from edxnotes.plugins import EdxNotesTab
from lms.lib.utils import get_parent_unit
from openedx.core.djangoapps.oauth_dispatch.jwt import create_jwt_for_user
from openedx.core.djangolib.markup import Text
from student.models import anonymous_id_for_user
from util.date_utils import get_default_time_display
from xmodule.modulestore.django import modulestore
from xmodule.modulestore.exceptions import ItemNotFoundError
log = logging.getLogger(__name__)
# OAuth2 Client name for edxnotes
CLIENT_NAME = "edx-notes"
DEFAULT_PAGE = 1
DEFAULT_PAGE_SIZE = 25
class NoteJSONEncoder(JSONEncoder):
"""
Custom JSON encoder that encode datetime objects to appropriate time strings.
"""
# pylint: disable=method-hidden
def default(self, obj):
if isinstance(obj, datetime):
return get_default_time_display(obj)
return json.JSONEncoder.default(self, obj)
def get_edxnotes_id_token(user):
"""
Returns generated ID Token for edxnotes.
"""
try:
notes_application = Application.objects.get(name=CLIENT_NAME)
except Application.DoesNotExist:
raise ImproperlyConfigured(
u'OAuth2 Client with name [{}] does not exist.'.format(CLIENT_NAME)
)
return create_jwt_for_user(
user, secret=notes_application.client_secret, aud=notes_application.client_id
)
def get_token_url(course_id):
"""
Returns token url for the course.
"""
return reverse("get_token", kwargs={
"course_id": six.text_type(course_id),
})
def send_request(user, course_id, page, page_size, path="", text=None):
"""
Sends a request to notes api with appropriate parameters and headers.
Arguments:
user: Current logged in user
course_id: Course id
page: requested or default page number
page_size: requested or default page size
path: `search` or `annotations`. This is used to calculate notes api endpoint.
text: text to search.
Returns:
Response received from notes api
"""
url = get_internal_endpoint(path)
params = {
"user": anonymous_id_for_user(user, None),
"course_id": six.text_type(course_id).encode("utf-8"),
"page": page,
"page_size": page_size,
}
if text:
params.update({
"text": text,
"highlight": True
})
try:
response = requests.get(
url,
headers={
"x-annotator-auth-token": get_edxnotes_id_token(user)
},
params=params,
timeout=(settings.EDXNOTES_CONNECT_TIMEOUT, settings.EDXNOTES_READ_TIMEOUT)
)
except RequestException:
log.error(u"Failed to connect to edx-notes-api: url=%s, params=%s", url, str(params))
raise EdxNotesServiceUnavailable(_("EdxNotes Service is unavailable. Please try again in a few minutes."))
return response
def delete_all_notes_for_user(user):
"""
helper method to delete all notes for a user, as part of GDPR compliance
:param user: The user object associated with the deleted notes
:return: response (requests) object
Raises:
EdxNotesServiceUnavailable - when notes api is not found/misconfigured.
"""
url = get_internal_endpoint('retire_annotations')
headers = {
"x-annotator-auth-token": get_edxnotes_id_token(user),
}
data = {
"user": anonymous_id_for_user(user, None)
}
try:
response = requests.post(
url=url,
headers=headers,
data=data,
timeout=(settings.EDXNOTES_CONNECT_TIMEOUT, settings.EDXNOTES_READ_TIMEOUT)
)
except RequestException:
log.error(u"Failed to connect to edx-notes-api: url=%s, params=%s", url, str(headers))
raise EdxNotesServiceUnavailable(_("EdxNotes Service is unavailable. Please try again in a few minutes."))
return response
def preprocess_collection(user, course, collection):
"""
Prepare `collection(notes_list)` provided by edx-notes-api
for rendering in a template:
add information about ancestor blocks,
convert "updated" to date
Raises:
ItemNotFoundError - when appropriate module is not found.
"""
# pylint: disable=too-many-statements
store = modulestore()
filtered_collection = list()
cache = {}
include_path_info = ('course_structure' not in settings.NOTES_DISABLED_TABS)
with store.bulk_operations(course.id):
for model in collection:
update = {
u"updated": dateutil_parse(model["updated"]),
}
model.update(update)
usage_id = model["usage_id"]
if usage_id in cache:
model.update(cache[usage_id])
filtered_collection.append(model)
continue
usage_key = UsageKey.from_string(usage_id)
# Add a course run if necessary.
usage_key = usage_key.replace(course_key=store.fill_in_run(usage_key.course_key))
try:
item = store.get_item(usage_key)
except ItemNotFoundError:
log.debug(u"Module not found: %s", usage_key)
continue
if not has_access(user, "load", item, course_key=course.id):
log.debug(u"User %s does not have an access to %s", user, item)
continue
unit = get_parent_unit(item)
if unit is None:
log.debug(u"Unit not found: %s", usage_key)
continue
if include_path_info:
section = unit.get_parent()
if not section:
log.debug(u"Section not found: %s", usage_key)
continue
if section in cache:
usage_context = cache[section]
usage_context.update({
"unit": get_module_context(course, unit),
})
model.update(usage_context)
cache[usage_id] = cache[unit] = usage_context
filtered_collection.append(model)
continue
chapter = section.get_parent()
if not chapter:
log.debug(u"Chapter not found: %s", usage_key)
continue
if chapter in cache:
usage_context = cache[chapter]
usage_context.update({
"unit": get_module_context(course, unit),
"section": get_module_context(course, section),
})
model.update(usage_context)
cache[usage_id] = cache[unit] = cache[section] = usage_context
filtered_collection.append(model)
continue
usage_context = {
"unit": get_module_context(course, unit),
"section": get_module_context(course, section) if include_path_info else {},
"chapter": get_module_context(course, chapter) if include_path_info else {},
}
model.update(usage_context)
if include_path_info:
cache[section] = cache[chapter] = usage_context
cache[usage_id] = cache[unit] = usage_context
filtered_collection.append(model)
return filtered_collection
def get_module_context(course, item):
"""
Returns dispay_name and url for the parent module.
"""
item_dict = {
'location': six.text_type(item.location),
'display_name': Text(item.display_name_with_default),
}
if item.category == 'chapter' and item.get_parent():
# course is a locator w/o branch and version
# so for uniformity we replace it with one that has them
course = item.get_parent()
item_dict['index'] = get_index(item_dict['location'], course.children)
elif item.category == 'vertical':
section = item.get_parent()
chapter = section.get_parent()
# Position starts from 1, that's why we add 1.
position = get_index(six.text_type(item.location), section.children) + 1
item_dict['url'] = reverse('courseware_position', kwargs={
'course_id': six.text_type(course.id),
'chapter': chapter.url_name,
'section': section.url_name,
'position': position,
})
if item.category in ('chapter', 'sequential'):
item_dict['children'] = [six.text_type(child) for child in item.children]
return item_dict
def get_index(usage_key, children):
"""
Returns an index of the child with `usage_key`.
"""
children = [six.text_type(child) for child in children]
return children.index(usage_key)
def construct_pagination_urls(request, course_id, api_next_url, api_previous_url):
"""
Construct next and previous urls for LMS. `api_next_url` and `api_previous_url`
are returned from notes api but we need to transform them according to LMS notes
views by removing and replacing extra information.
Arguments:
request: HTTP request object
course_id: course id
api_next_url: notes api next url
api_previous_url: notes api previous url
Returns:
next_url: lms notes next url
previous_url: lms notes previous url
"""
def lms_url(url):
"""
Create lms url from api url.
"""
if url is None:
return None
keys = ('page', 'page_size', 'text')
parsed = urlparse(url)
query_params = parse_qs(parsed.query)
encoded_query_params = urlencode({key: query_params.get(key)[0] for key in keys if key in query_params})
return "{}?{}".format(request.build_absolute_uri(base_url), encoded_query_params)
base_url = reverse("notes", kwargs={"course_id": course_id})
next_url = lms_url(api_next_url)
previous_url = lms_url(api_previous_url)
return next_url, previous_url
def get_notes(request, course, page=DEFAULT_PAGE, page_size=DEFAULT_PAGE_SIZE, text=None):
"""
Returns paginated list of notes for the user.
Arguments:
request: HTTP request object
course: Course descriptor
page: requested or default page number
page_size: requested or default page size
text: text to search. If None then return all results for the current logged in user.
Returns:
Paginated dictionary with these key:
start: start of the current page
current_page: current page number
next: url for next page
previous: url for previous page
count: total number of notes available for the sent query
num_pages: number of pages available
results: list with notes info dictionary. each item in this list will be a dict
"""
path = 'search' if text else 'annotations'
response = send_request(request.user, course.id, page, page_size, path, text)
try:
collection = json.loads(response.content)
except ValueError:
log.error(u"Invalid JSON response received from notes api: response_content=%s", response.content)
raise EdxNotesParseError(_("Invalid JSON response received from notes api."))
# Verify response dict structure
expected_keys = ['total', 'rows', 'num_pages', 'start', 'next', 'previous', 'current_page']
keys = list(collection.keys())
if not keys or not all(key in expected_keys for key in keys):
log.error(u"Incorrect data received from notes api: collection_data=%s", str(collection))
raise EdxNotesParseError(_("Incorrect data received from notes api."))
filtered_results = preprocess_collection(request.user, course, collection['rows'])
# Notes API is called from:
# 1. The annotatorjs in courseware. It expects these attributes to be named "total" and "rows".
# 2. The Notes tab Javascript proxied through LMS. It expects these attributes to be called "count" and "results".
collection['count'] = collection['total']
del collection['total']
collection['results'] = filtered_results
del collection['rows']
collection['next'], collection['previous'] = construct_pagination_urls(
request,
course.id,
collection['next'],
collection['previous']
)
return collection
def get_endpoint(api_url, path=""):
"""
Returns edx-notes-api endpoint.
Arguments:
api_url (str): base url to the notes api
path (str): path to the resource
Returns:
str: full endpoint to the notes api
"""
try:
if not api_url.endswith("/"):
api_url += "/"
if path:
if path.startswith("/"):
path = path.lstrip("/")
if not path.endswith("/"):
path += "/"
return api_url + path
except (AttributeError, KeyError):
raise ImproperlyConfigured(_("No endpoint was provided for EdxNotes."))
def get_public_endpoint(path=""):
"""Get the full path to a resource on the public notes API."""
return get_endpoint(settings.EDXNOTES_PUBLIC_API, path)
def get_internal_endpoint(path=""):
"""Get the full path to a resource on the private notes API."""
return get_endpoint(settings.EDXNOTES_INTERNAL_API, path)
def get_course_position(course_module):
"""
Return the user's current place in the course.
If this is the user's first time, leads to COURSE/CHAPTER/SECTION.
If this isn't the users's first time, leads to COURSE/CHAPTER.
If there is no current position in the course or chapter, then selects
the first child.
"""
urlargs = {'course_id': six.text_type(course_module.id)}
chapter = get_current_child(course_module, min_depth=1)
if chapter is None:
log.debug("No chapter found when loading current position in course")
return None
urlargs['chapter'] = chapter.url_name
if course_module.position is not None:
return {
'display_name': Text(chapter.display_name_with_default),
'url': reverse('courseware_chapter', kwargs=urlargs),
}
# Relying on default of returning first child
section = get_current_child(chapter, min_depth=1)
if section is None:
log.debug("No section found when loading current position in course")
return None
urlargs['section'] = section.url_name
return {
'display_name': Text(section.display_name_with_default),
'url': reverse('courseware_section', kwargs=urlargs)
}
def generate_uid():
"""
Generates unique id.
"""
return uuid4().int # pylint: disable=no-member
def is_feature_enabled(course, user):
"""
Returns True if Student Notes feature is enabled for the course, False otherwise.
"""
return EdxNotesTab.is_enabled(course, user)
|
agpl-3.0
|
salaria/odoo
|
addons/payment_ogone/data/ogone.py
|
395
|
30321
|
# -*- coding: utf-8 -*-
OGONE_ERROR_MAP = {
'0020001001': "Authorization failed, please retry",
'0020001002': "Authorization failed, please retry",
'0020001003': "Authorization failed, please retry",
'0020001004': "Authorization failed, please retry",
'0020001005': "Authorization failed, please retry",
'0020001006': "Authorization failed, please retry",
'0020001007': "Authorization failed, please retry",
'0020001008': "Authorization failed, please retry",
'0020001009': "Authorization failed, please retry",
'0020001010': "Authorization failed, please retry",
'0030001999': "Our payment system is currently under maintenance, please try later",
'0050001005': "Expiry date error",
'0050001007': "Requested Operation code not allowed",
'0050001008': "Invalid delay value",
'0050001010': "Input date in invalid format",
'0050001013': "Unable to parse socket input stream",
'0050001014': "Error in parsing stream content",
'0050001015': "Currency error",
'0050001016': "Transaction still posted at end of wait",
'0050001017': "Sync value not compatible with delay value",
'0050001019': "Transaction duplicate of a pre-existing transaction",
'0050001020': "Acceptation code empty while required for the transaction",
'0050001024': "Maintenance acquirer differs from original transaction acquirer",
'0050001025': "Maintenance merchant differs from original transaction merchant",
'0050001028': "Maintenance operation not accurate for the original transaction",
'0050001031': "Host application unknown for the transaction",
'0050001032': "Unable to perform requested operation with requested currency",
'0050001033': "Maintenance card number differs from original transaction card number",
'0050001034': "Operation code not allowed",
'0050001035': "Exception occurred in socket input stream treatment",
'0050001036': "Card length does not correspond to an acceptable value for the brand",
'0050001036': "Card length does not correspond to an acceptable value for the brand",
'0050001068': "A technical problem occurred, please contact helpdesk",
'0050001069': "Invalid check for CardID and Brand",
'0050001070': "A technical problem occurred, please contact helpdesk",
'0050001116': "Unknown origin IP",
'0050001117': "No origin IP detected",
'0050001118': "Merchant configuration problem, please contact support",
'10001001': "Communication failure",
'10001002': "Communication failure",
'10001003': "Communication failure",
'10001004': "Communication failure",
'10001005': "Communication failure",
'20001001': "We received an unknown status for the transaction. We will contact your acquirer and update the status of the transaction within one working day. Please check the status later.",
'20001002': "We received an unknown status for the transaction. We will contact your acquirer and update the status of the transaction within one working day. Please check the status later.",
'20001003': "We received an unknown status for the transaction. We will contact your acquirer and update the status of the transaction within one working day. Please check the status later.",
'20001004': "We received an unknown status for the transaction. We will contact your acquirer and update the status of the transaction within one working day. Please check the status later.",
'20001005': "We received an unknown status for the transaction. We will contact your acquirer and update the status of the transaction within one working day. Please check the status later.",
'20001006': "We received an unknown status for the transaction. We will contact your acquirer and update the status of the transaction within one working day. Please check the status later.",
'20001007': "We received an unknown status for the transaction. We will contact your acquirer and update the status of the transaction within one working day. Please check the status later.",
'20001008': "We received an unknown status for the transaction. We will contact your acquirer and update the status of the transaction within one working day. Please check the status later.",
'20001009': "We received an unknown status for the transaction. We will contact your acquirer and update the status of the transaction within one working day. Please check the status later.",
'20001010': "We received an unknown status for the transaction. We will contact your acquirer and update the status of the transaction within one working day. Please check the status later.",
'20001101': "A technical problem occurred, please contact helpdesk",
'20001105': "We received an unknown status for the transaction. We will contact your acquirer and update the status of the transaction within one working day. Please check the status later.",
'20001111': "A technical problem occurred, please contact helpdesk",
'20002001': "Origin for the response of the bank can not be checked",
'20002002': "Beneficiary account number has been modified during processing",
'20002003': "Amount has been modified during processing",
'20002004': "Currency has been modified during processing",
'20002005': "No feedback from the bank server has been detected",
'30001001': "Payment refused by the acquirer",
'30001002': "Duplicate request",
'30001010': "A technical problem occurred, please contact helpdesk",
'30001011': "A technical problem occurred, please contact helpdesk",
'30001012': "Card black listed - Contact acquirer",
'30001015': "Your merchant's acquirer is temporarily unavailable, please try later or choose another payment method.",
'30001051': "A technical problem occurred, please contact helpdesk",
'30001054': "A technical problem occurred, please contact helpdesk",
'30001057': "Your merchant's acquirer is temporarily unavailable, please try later or choose another payment method.",
'30001058': "Your merchant's acquirer is temporarily unavailable, please try later or choose another payment method.",
'30001060': "Aquirer indicates that a failure occured during payment processing",
'30001070': "RATEPAY Invalid Response Type (Failure)",
'30001071': "RATEPAY Missing Mandatory status code field (failure)",
'30001072': "RATEPAY Missing Mandatory Result code field (failure)",
'30001073': "RATEPAY Response parsing Failed",
'30001090': "CVC check required by front end and returned invalid by acquirer",
'30001091': "ZIP check required by front end and returned invalid by acquirer",
'30001092': "Address check required by front end and returned as invalid by acquirer.",
'30001100': "Unauthorized buyer's country",
'30001101': "IP country <> card country",
'30001102': "Number of different countries too high",
'30001103': "unauthorized card country",
'30001104': "unauthorized ip address country",
'30001105': "Anonymous proxy",
'30001110': "If the problem persists, please contact Support, or go to paysafecard's card balance page (https://customer.cc.at.paysafecard.com/psccustomer/GetWelcomePanelServlet?language=en) to see when the amount reserved on your card will be available again.",
'30001120': "IP address in merchant's black list",
'30001130': "BIN in merchant's black list",
'30001131': "Wrong BIN for 3xCB",
'30001140': "Card in merchant's card blacklist",
'30001141': "Email in blacklist",
'30001142': "Passenger name in blacklist",
'30001143': "Card holder name in blacklist",
'30001144': "Passenger name different from owner name",
'30001145': "Time to departure too short",
'30001149': "Card Configured in Card Supplier Limit for another relation (CSL)",
'30001150': "Card not configured in the system for this customer (CSL)",
'30001151': "REF1 not allowed for this relationship (Contract number",
'30001152': "Card/Supplier Amount limit reached (CSL)",
'30001153': "Card not allowed for this supplier (Date out of contract bounds)",
'30001154': "You have reached the usage limit allowed",
'30001155': "You have reached the usage limit allowed",
'30001156': "You have reached the usage limit allowed",
'30001157': "Unauthorized IP country for itinerary",
'30001158': "email usage limit reached",
'30001159': "Unauthorized card country/IP country combination",
'30001160': "Postcode in highrisk group",
'30001161': "generic blacklist match",
'30001162': "Billing Address is a PO Box",
'30001180': "maximum scoring reached",
'30001997': "Authorization canceled by simulation",
'30001998': "A technical problem occurred, please try again.",
'30001999': "Your merchant's acquirer is temporarily unavailable, please try later or choose another payment method.",
'30002001': "Payment refused by the financial institution",
'30002001': "Payment refused by the financial institution",
'30021001': "Call acquirer support call number.",
'30022001': "Payment must be approved by the acquirer before execution.",
'30031001': "Invalid merchant number.",
'30041001': "Retain card.",
'30051001': "Authorization declined",
'30071001': "Retain card - special conditions.",
'30121001': "Invalid transaction",
'30131001': "Invalid amount",
'30131002': "You have reached the total amount allowed",
'30141001': "Invalid card number",
'30151001': "Unknown acquiring institution.",
'30171001': "Payment method cancelled by the buyer",
'30171002': "The maximum time allowed is elapsed.",
'30191001': "Try again later.",
'30201001': "A technical problem occurred, please contact helpdesk",
'30301001': "Invalid format",
'30311001': "Unknown acquirer ID.",
'30331001': "Card expired.",
'30341001': "Suspicion of fraud.",
'30341002': "Suspicion of fraud (3rdMan)",
'30341003': "Suspicion of fraud (Perseuss)",
'30341004': "Suspicion of fraud (ETHOCA)",
'30381001': "A technical problem occurred, please contact helpdesk",
'30401001': "Invalid function.",
'30411001': "Lost card.",
'30431001': "Stolen card, pick up",
'30511001': "Insufficient funds.",
'30521001': "No Authorization. Contact the issuer of your card.",
'30541001': "Card expired.",
'30551001': "Invalid PIN.",
'30561001': "Card not in authorizer's database.",
'30571001': "Transaction not permitted on card.",
'30581001': "Transaction not allowed on this terminal",
'30591001': "Suspicion of fraud.",
'30601001': "The merchant must contact the acquirer.",
'30611001': "Amount exceeds card ceiling.",
'30621001': "Restricted card.",
'30631001': "Security policy not respected.",
'30641001': "Amount changed from ref. trn.",
'30681001': "Tardy response.",
'30751001': "PIN entered incorrectly too often",
'30761001': "Card holder already contesting.",
'30771001': "PIN entry required.",
'30811001': "Message flow error.",
'30821001': "Authorization center unavailable",
'30831001': "Authorization center unavailable",
'30901001': "Temporary system shutdown.",
'30911001': "Acquirer unavailable.",
'30921001': "Invalid card type for acquirer.",
'30941001': "Duplicate transaction",
'30961001': "Processing temporarily not possible",
'30971001': "A technical problem occurred, please contact helpdesk",
'30981001': "A technical problem occurred, please contact helpdesk",
'31011001': "Unknown acceptance code",
'31021001': "Invalid currency",
'31031001': "Acceptance code missing",
'31041001': "Inactive card",
'31051001': "Merchant not active",
'31061001': "Invalid expiration date",
'31071001': "Interrupted host communication",
'31081001': "Card refused",
'31091001': "Invalid password",
'31101001': "Plafond transaction (majoré du bonus) dépassé",
'31111001': "Plafond mensuel (majoré du bonus) dépassé",
'31121001': "Plafond centre de facturation dépassé",
'31131001': "Plafond entreprise dépassé",
'31141001': "Code MCC du fournisseur non autorisé pour la carte",
'31151001': "Numéro SIRET du fournisseur non autorisé pour la carte",
'31161001': "This is not a valid online banking account",
'32001004': "A technical problem occurred, please try again.",
'34011001': "Bezahlung mit RatePAY nicht möglich.",
'39991001': "A technical problem occurred, please contact the helpdesk of your acquirer",
'40001001': "A technical problem occurred, please try again.",
'40001002': "A technical problem occurred, please try again.",
'40001003': "A technical problem occurred, please try again.",
'40001004': "A technical problem occurred, please try again.",
'40001005': "A technical problem occurred, please try again.",
'40001006': "A technical problem occurred, please try again.",
'40001007': "A technical problem occurred, please try again.",
'40001008': "A technical problem occurred, please try again.",
'40001009': "A technical problem occurred, please try again.",
'40001010': "A technical problem occurred, please try again.",
'40001011': "A technical problem occurred, please contact helpdesk",
'40001012': "Your merchant's acquirer is temporarily unavailable, please try later or choose another payment method.",
'40001013': "A technical problem occurred, please contact helpdesk",
'40001016': "A technical problem occurred, please contact helpdesk",
'40001018': "A technical problem occurred, please try again.",
'40001019': "Sorry, an error occurred during processing. Please retry the operation (use back button of the browser). If problem persists, contact your merchant's helpdesk.",
'40001020': "Sorry, an error occurred during processing. Please retry the operation (use back button of the browser). If problem persists, contact your merchant's helpdesk.",
'40001050': "A technical problem occurred, please contact helpdesk",
'40001133': "Authentication failed, the signature of your bank access control server is incorrect",
'40001134': "Authentication failed, please retry or cancel.",
'40001135': "Authentication temporary unavailable, please retry or cancel.",
'40001136': "Technical problem with your browser, please retry or cancel",
'40001137': "Your bank access control server is temporary unavailable, please retry or cancel",
'40001998': "Temporary technical problem. Please retry a little bit later.",
'50001001': "Unknown card type",
'50001002': "Card number format check failed for given card number.",
'50001003': "Merchant data error",
'50001004': "Merchant identification missing",
'50001005': "Expiry date error",
'50001006': "Amount is not a number",
'50001007': "A technical problem occurred, please contact helpdesk",
'50001008': "A technical problem occurred, please contact helpdesk",
'50001009': "A technical problem occurred, please contact helpdesk",
'50001010': "A technical problem occurred, please contact helpdesk",
'50001011': "Brand not supported for that merchant",
'50001012': "A technical problem occurred, please contact helpdesk",
'50001013': "A technical problem occurred, please contact helpdesk",
'50001014': "A technical problem occurred, please contact helpdesk",
'50001015': "Invalid currency code",
'50001016': "A technical problem occurred, please contact helpdesk",
'50001017': "A technical problem occurred, please contact helpdesk",
'50001018': "A technical problem occurred, please contact helpdesk",
'50001019': "A technical problem occurred, please contact helpdesk",
'50001020': "A technical problem occurred, please contact helpdesk",
'50001021': "A technical problem occurred, please contact helpdesk",
'50001022': "A technical problem occurred, please contact helpdesk",
'50001023': "A technical problem occurred, please contact helpdesk",
'50001024': "A technical problem occurred, please contact helpdesk",
'50001025': "A technical problem occurred, please contact helpdesk",
'50001026': "A technical problem occurred, please contact helpdesk",
'50001027': "A technical problem occurred, please contact helpdesk",
'50001028': "A technical problem occurred, please contact helpdesk",
'50001029': "A technical problem occurred, please contact helpdesk",
'50001030': "A technical problem occurred, please contact helpdesk",
'50001031': "A technical problem occurred, please contact helpdesk",
'50001032': "A technical problem occurred, please contact helpdesk",
'50001033': "A technical problem occurred, please contact helpdesk",
'50001034': "A technical problem occurred, please contact helpdesk",
'50001035': "A technical problem occurred, please contact helpdesk",
'50001036': "Card length does not correspond to an acceptable value for the brand",
'50001037': "Purchasing card number for a regular merchant",
'50001038': "Non Purchasing card for a Purchasing card merchant",
'50001039': "Details sent for a non-Purchasing card merchant, please contact helpdesk",
'50001040': "Details not sent for a Purchasing card transaction, please contact helpdesk",
'50001041': "Payment detail validation failed",
'50001042': "Given transactions amounts (tax,discount,shipping,net,etc…) do not compute correctly together",
'50001043': "A technical problem occurred, please contact helpdesk",
'50001044': "No acquirer configured for this operation",
'50001045': "No UID configured for this operation",
'50001046': "Operation not allowed for the merchant",
'50001047': "A technical problem occurred, please contact helpdesk",
'50001048': "A technical problem occurred, please contact helpdesk",
'50001049': "A technical problem occurred, please contact helpdesk",
'50001050': "A technical problem occurred, please contact helpdesk",
'50001051': "A technical problem occurred, please contact helpdesk",
'50001052': "A technical problem occurred, please contact helpdesk",
'50001053': "A technical problem occurred, please contact helpdesk",
'50001054': "Card number incorrect or incompatible",
'50001055': "A technical problem occurred, please contact helpdesk",
'50001056': "A technical problem occurred, please contact helpdesk",
'50001057': "A technical problem occurred, please contact helpdesk",
'50001058': "A technical problem occurred, please contact helpdesk",
'50001059': "A technical problem occurred, please contact helpdesk",
'50001060': "A technical problem occurred, please contact helpdesk",
'50001061': "A technical problem occurred, please contact helpdesk",
'50001062': "A technical problem occurred, please contact helpdesk",
'50001063': "Card Issue Number does not correspond to range or not present",
'50001064': "Start Date not valid or not present",
'50001066': "Format of CVC code invalid",
'50001067': "The merchant is not enrolled for 3D-Secure",
'50001068': "The card number or account number (PAN) is invalid",
'50001069': "Invalid check for CardID and Brand",
'50001070': "The ECI value given is either not supported, or in conflict with other data in the transaction",
'50001071': "Incomplete TRN demat",
'50001072': "Incomplete PAY demat",
'50001073': "No demat APP",
'50001074': "Authorisation too old",
'50001075': "VERRes was an error message",
'50001076': "DCP amount greater than authorisation amount",
'50001077': "Details negative amount",
'50001078': "Details negative quantity",
'50001079': "Could not decode/decompress received PARes (3D-Secure)",
'50001080': "Received PARes was an erereor message from ACS (3D-Secure)",
'50001081': "Received PARes format was invalid according to the 3DS specifications (3D-Secure)",
'50001082': "PAReq/PARes reconciliation failure (3D-Secure)",
'50001084': "Maximum amount reached",
'50001087': "The transaction type requires authentication, please check with your bank.",
'50001090': "CVC missing at input, but CVC check asked",
'50001091': "ZIP missing at input, but ZIP check asked",
'50001092': "Address missing at input, but Address check asked",
'50001095': "Invalid date of birth",
'50001096': "Invalid commodity code",
'50001097': "The requested currency and brand are incompatible.",
'50001111': "Data validation error",
'50001113': "This order has already been processed",
'50001114': "Error pre-payment check page access",
'50001115': "Request not received in secure mode",
'50001116': "Unknown IP address origin",
'50001117': "NO IP address origin",
'50001118': "Pspid not found or not correct",
'50001119': "Password incorrect or disabled due to numbers of errors",
'50001120': "Invalid currency",
'50001121': "Invalid number of decimals for the currency",
'50001122': "Currency not accepted by the merchant",
'50001123': "Card type not active",
'50001124': "Number of lines don't match with number of payments",
'50001125': "Format validation error",
'50001126': "Overflow in data capture requests for the original order",
'50001127': "The original order is not in a correct status",
'50001128': "missing authorization code for unauthorized order",
'50001129': "Overflow in refunds requests",
'50001130': "Error access to original order",
'50001131': "Error access to original history item",
'50001132': "The Selected Catalog is empty",
'50001133': "Duplicate request",
'50001134': "Authentication failed, please retry or cancel.",
'50001135': "Authentication temporary unavailable, please retry or cancel.",
'50001136': "Technical problem with your browser, please retry or cancel",
'50001137': "Your bank access control server is temporary unavailable, please retry or cancel",
'50001150': "Fraud Detection, Technical error (IP not valid)",
'50001151': "Fraud detection : technical error (IPCTY unknown or error)",
'50001152': "Fraud detection : technical error (CCCTY unknown or error)",
'50001153': "Overflow in redo-authorisation requests",
'50001170': "Dynamic BIN check failed",
'50001171': "Dynamic country check failed",
'50001172': "Error in Amadeus signature",
'50001174': "Card Holder Name is too long",
'50001175': "Name contains invalid characters",
'50001176': "Card number is too long",
'50001177': "Card number contains non-numeric info",
'50001178': "Card Number Empty",
'50001179': "CVC too long",
'50001180': "CVC contains non-numeric info",
'50001181': "Expiration date contains non-numeric info",
'50001182': "Invalid expiration month",
'50001183': "Expiration date must be in the future",
'50001184': "SHA Mismatch",
'50001205': "Missing mandatory fields for billing address.",
'50001206': "Missing mandatory field date of birth.",
'50001207': "Missing required shopping basket details.",
'50001208': "Missing social security number",
'50001209': "Invalid country code",
'50001210': "Missing yearly salary",
'50001211': "Missing gender",
'50001212': "Missing email",
'50001213': "Missing IP address",
'50001214': "Missing part payment campaign ID",
'50001215': "Missing invoice number",
'50001216': "The alias must be different than the card number",
'60000001': "account number unknown",
'60000003': "not credited dd-mm-yy",
'60000005': "name/number do not correspond",
'60000007': "account number blocked",
'60000008': "specific direct debit block",
'60000009': "account number WKA",
'60000010': "administrative reason",
'60000011': "account number expired",
'60000012': "no direct debit authorisation given",
'60000013': "debit not approved",
'60000014': "double payment",
'60000018': "name/address/city not entered",
'60001001': "no original direct debit for revocation",
'60001002': "payer’s account number format error",
'60001004': "payer’s account at different bank",
'60001005': "payee’s account at different bank",
'60001006': "payee’s account number format error",
'60001007': "payer’s account number blocked",
'60001008': "payer’s account number expired",
'60001009': "payee’s account number expired",
'60001010': "direct debit not possible",
'60001011': "creditor payment not possible",
'60001012': "payer’s account number unknown WKA-number",
'60001013': "payee’s account number unknown WKA-number",
'60001014': "impermissible WKA transaction",
'60001015': "period for revocation expired",
'60001017': "reason for revocation not correct",
'60001018': "original run number not numeric",
'60001019': "payment ID incorrect",
'60001020': "amount not numeric",
'60001021': "amount zero not permitted",
'60001022': "negative amount not permitted",
'60001023': "payer and payee giro account number",
'60001025': "processing code (verwerkingscode) incorrect",
'60001028': "revocation not permitted",
'60001029': "guaranteed direct debit on giro account number",
'60001030': "NBC transaction type incorrect",
'60001031': "description too large",
'60001032': "book account number not issued",
'60001034': "book account number incorrect",
'60001035': "payer’s account number not numeric",
'60001036': "payer’s account number not eleven-proof",
'60001037': "payer’s account number not issued",
'60001039': "payer’s account number of DNB/BGC/BLA",
'60001040': "payee’s account number not numeric",
'60001041': "payee’s account number not eleven-proof",
'60001042': "payee’s account number not issued",
'60001044': "payee’s account number unknown",
'60001050': "payee’s name missing",
'60001051': "indicate payee’s bank account number instead of 3102",
'60001052': "no direct debit contract",
'60001053': "amount beyond bounds",
'60001054': "selective direct debit block",
'60001055': "original run number unknown",
'60001057': "payer’s name missing",
'60001058': "payee’s account number missing",
'60001059': "restore not permitted",
'60001060': "bank’s reference (navraaggegeven) missing",
'60001061': "BEC/GBK number incorrect",
'60001062': "BEC/GBK code incorrect",
'60001087': "book account number not numeric",
'60001090': "cancelled on request",
'60001091': "cancellation order executed",
'60001092': "cancelled instead of bended",
'60001093': "book account number is a shortened account number",
'60001094': "instructing party account number not identical with payer",
'60001095': "payee unknown GBK acceptor",
'60001097': "instructing party account number not identical with payee",
'60001099': "clearing not permitted",
'60001101': "payer’s account number not spaces",
'60001102': "PAN length not numeric",
'60001103': "PAN length outside limits",
'60001104': "track number not numeric",
'60001105': "track number not valid",
'60001106': "PAN sequence number not numeric",
'60001107': "domestic PAN not numeric",
'60001108': "domestic PAN not eleven-proof",
'60001109': "domestic PAN not issued",
'60001110': "foreign PAN not numeric",
'60001111': "card valid date not numeric",
'60001112': "book period number (boekperiodenr) not numeric",
'60001113': "transaction number not numeric",
'60001114': "transaction time not numeric",
'60001115': "transaction no valid time",
'60001116': "transaction date not numeric",
'60001117': "transaction no valid date",
'60001118': "STAN not numeric",
'60001119': "instructing party’s name missing",
'60001120': "foreign amount (bedrag-vv) not numeric",
'60001122': "rate (verrekenkoers) not numeric",
'60001125': "number of decimals (aantaldecimalen) incorrect",
'60001126': "tariff (tarifering) not B/O/S",
'60001127': "domestic costs (kostenbinnenland) not numeric",
'60001128': "domestic costs (kostenbinnenland) not higher than zero",
'60001129': "foreign costs (kostenbuitenland) not numeric",
'60001130': "foreign costs (kostenbuitenland) not higher than zero",
'60001131': "domestic costs (kostenbinnenland) not zero",
'60001132': "foreign costs (kostenbuitenland) not zero",
'60001134': "Euro record not fully filled in",
'60001135': "Client currency incorrect",
'60001136': "Amount NLG not numeric",
'60001137': "Amount NLG not higher than zero",
'60001138': "Amount NLG not equal to Amount",
'60001139': "Amount NLG incorrectly converted",
'60001140': "Amount EUR not numeric",
'60001141': "Amount EUR not greater than zero",
'60001142': "Amount EUR not equal to Amount",
'60001143': "Amount EUR incorrectly converted",
'60001144': "Client currency not NLG",
'60001145': "rate euro-vv (Koerseuro-vv) not numeric",
'60001146': "comma rate euro-vv (Kommakoerseuro-vv) incorrect",
'60001147': "acceptgiro distributor not valid",
'60001148': "Original run number and/or BRN are missing",
'60001149': "Amount/Account number/ BRN different",
'60001150': "Direct debit already revoked/restored",
'60001151': "Direct debit already reversed/revoked/restored",
'60001153': "Payer’s account number not known",
}
DATA_VALIDATION_ERROR = '50001111'
def retryable(error):
return error in [
'0020001001', '0020001002', '0020001003', '0020001004', '0020001005',
'0020001006', '0020001007', '0020001008', '0020001009', '0020001010',
'30001010', '30001011', '30001015',
'30001057', '30001058',
'30001998', '30001999',
#'30611001', # amount exceeds card limit
'30961001',
'40001001', '40001002', '40001003', '40001004', '40001005',
'40001006', '40001007', '40001008', '40001009', '40001010',
'40001012',
'40001018', '40001019', '40001020',
'40001134', '40001135', '40001136', '40001137',
#'50001174', # cardholder name too long
]
|
agpl-3.0
|
one-2-z/a830s_kernel
|
tools/perf/scripts/python/syscall-counts.py
|
11181
|
1522
|
# system call counts
# (c) 2010, Tom Zanussi <[email protected]>
# Licensed under the terms of the GNU GPL License version 2
#
# Displays system-wide system call totals, broken down by syscall.
# If a [comm] arg is specified, only syscalls called by [comm] are displayed.
import os
import sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from Util import syscall_name
usage = "perf script -s syscall-counts.py [comm]\n";
for_comm = None
if len(sys.argv) > 2:
sys.exit(usage)
if len(sys.argv) > 1:
for_comm = sys.argv[1]
syscalls = autodict()
def trace_begin():
print "Press control+C to stop and show the summary"
def trace_end():
print_syscall_totals()
def raw_syscalls__sys_enter(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
id, args):
if for_comm is not None:
if common_comm != for_comm:
return
try:
syscalls[id] += 1
except TypeError:
syscalls[id] = 1
def print_syscall_totals():
if for_comm is not None:
print "\nsyscall events for %s:\n\n" % (for_comm),
else:
print "\nsyscall events:\n\n",
print "%-40s %10s\n" % ("event", "count"),
print "%-40s %10s\n" % ("----------------------------------------", \
"-----------"),
for id, val in sorted(syscalls.iteritems(), key = lambda(k, v): (v, k), \
reverse = True):
print "%-40s %10d\n" % (syscall_name(id), val),
|
gpl-2.0
|
britcey/ansible
|
lib/ansible/modules/network/lenovo/cnos_bgp.py
|
59
|
19250
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2017 Lenovo, Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
# Module to send BGP commands to Lenovo Switches
# Lenovo Networking
#
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: cnos_bgp
author: "Dave Kasberg (@dkasberg)"
short_description: Manage BGP resources and attributes on devices running Lenovo CNOS
description:
- This module allows you to work with Border Gateway Protocol (BGP) related configurations.
The operators used are overloaded to ensure control over switch BGP configurations. This
module is invoked using method with asNumber as one of its arguments. The first level of
the BGP configuration allows to set up an AS number, with the following attributes going
into various configuration operations under the context of BGP. After passing this level,
there are eight BGP arguments that will perform further configurations. They are bgpArg1,
bgpArg2, bgpArg3, bgpArg4, bgpArg5, bgpArg6, bgpArg7, and bgpArg8. For more details on
how to use these arguments, see [Overloaded Variables].
This module uses SSH to manage network device configuration.
The results of the operation will be placed in a directory named 'results'
that must be created by the user in their local directory to where the playbook is run.
For more information about this module from Lenovo and customizing it usage for your
use cases, please visit U(http://systemx.lenovofiles.com/help/index.jsp?topic=%2Fcom.lenovo.switchmgt.ansible.doc%2Fcnos_bgp.html)
version_added: "2.3"
extends_documentation_fragment: cnos
options:
asNum:
description:
- AS number
required: Yes
default: Null
bgpArg1:
description:
- This is an overloaded bgp first argument. Usage of this argument can be found is the User Guide referenced above.
required: Yes
default: Null
choices: [address-family,bestpath,bgp,cluster-id,confederation,enforce-first-as,fast-external-failover,
graceful-restart,graceful-restart-helper,log-neighbor-changes,maxas-limit,neighbor,router-id,shutdown,
synchronization,timers,vrf]
bgpArg2:
description:
- This is an overloaded bgp second argument. Usage of this argument can be found is the User Guide referenced above.
required: No
default: Null
choices: [ipv4 or ipv6, always-compare-med,compare-confed-aspath,compare-routerid,dont-compare-originator-id,tie-break-on-age,
as-path,med,identifier,peers]
bgpArg3:
description:
- This is an overloaded bgp third argument. Usage of this argument can be found is the User Guide referenced above.
required: No
default: Null
choices: [aggregate-address,client-to-client,dampening,distance,maximum-paths,network,nexthop,redistribute,save,synchronization,
ignore or multipath-relax, confed or missing-as-worst or non-deterministic or remove-recv-med or remove-send-med]
bgpArg4:
description:
- This is an overloaded bgp fourth argument. Usage of this argument can be found is the User Guide referenced above.
required: No
default: Null
choices: [Aggregate prefix, Reachability Half-life time,route-map, Distance for routes external,ebgp or ibgp,
IP prefix <network>,IP prefix <network>/<length>, synchronization, Delay value, direct, ospf, static, memory]
bgpArg5:
description:
- This is an overloaded bgp fifth argument. Usage of this argument can be found is the User Guide referenced above.
required: No
default: Null
choices: [as-set, summary-only, Value to start reusing a route, Distance for routes internal, Supported multipath numbers,
backdoor, map, route-map ]
bgpArg6:
description:
- This is an overloaded bgp sixth argument. Usage of this argument can be found is the User Guide referenced above.
required: No
default: Null
choices: [summary-only,as-set, route-map name, Value to start suppressing a route, Distance for local routes, Network mask,
Pointer to route-map entries]
bgpArg7:
description:
- This is an overloaded bgp seventh argument. Usage of this argument can be found is the User Guide referenced above.
required: No
default: Null
choices: [ Maximum duration to suppress a stable route(minutes), backdoor,route-map, Name of the route map ]
bgpArg8:
description:
- This is an overloaded bgp eigth argument. Usage of this argument can be found is the User Guide referenced above.
required: No
default: Null
choices: [ Un-reachability Half-life time for the penalty(minutes), backdoor]
'''
EXAMPLES = '''
Tasks: The following are examples of using the module cnos_bgp. These are written in the main.yml file of the tasks directory.
---
- name: Test BGP - neighbor
cnos_bgp:
host: "{{ inventory_hostname }}"
username: "{{ hostvars[inventory_hostname]['username'] }}"
password: "{{ hostvars[inventory_hostname]['password'] }}"
deviceType: "{{ hostvars[inventory_hostname]['deviceType'] }}"
enablePassword: "{{ hostvars[inventory_hostname]['enablePassword'] }}"
outputfile: "./results/test_bgp_{{ inventory_hostname }}_output.txt"
asNum: 33
bgpArg1: "neighbor"
bgpArg2: "10.241.107.40"
bgpArg3: 13
bgpArg4: "address-family"
bgpArg5: "ipv4"
bgpArg6: "next-hop-self"
- name: Test BGP - BFD
cnos_bgp:
host: "{{ inventory_hostname }}"
username: "{{ hostvars[inventory_hostname]['username'] }}"
password: "{{ hostvars[inventory_hostname]['password'] }}"
deviceType: "{{ hostvars[inventory_hostname]['deviceType'] }}"
enablePassword: "{{ hostvars[inventory_hostname]['enablePassword'] }}"
outputfile: "./results/test_bgp_{{ inventory_hostname }}_output.txt"
asNum: 33
bgpArg1: "neighbor"
bgpArg2: "10.241.107.40"
bgpArg3: 13
bgpArg4: "bfd"
- name: Test BGP - address-family - dampening
cnos_bgp:
host: "{{ inventory_hostname }}"
username: "{{ hostvars[inventory_hostname]['username'] }}"
password: "{{ hostvars[inventory_hostname]['password'] }}"
deviceType: "{{ hostvars[inventory_hostname]['deviceType'] }}"
enablePassword: "{{ hostvars[inventory_hostname]['enablePassword'] }}"
outputfile: "./results/test_bgp_{{ inventory_hostname }}_output.txt"
asNum: 33
bgpArg1: "address-family"
bgpArg2: "ipv4"
bgpArg3: "dampening"
bgpArg4: 13
bgpArg5: 233
bgpArg6: 333
bgpArg7: 15
bgpArg8: 33
- name: Test BGP - address-family - network
cnos_bgp:
host: "{{ inventory_hostname }}"
username: "{{ hostvars[inventory_hostname]['username'] }}"
password: "{{ hostvars[inventory_hostname]['password'] }}"
deviceType: "{{ hostvars[inventory_hostname]['deviceType'] }}"
enablePassword: "{{ hostvars[inventory_hostname]['enablePassword'] }}"
outputfile: "./results/test_bgp_{{ inventory_hostname }}_output.txt"
asNum: 33
bgpArg1: "address-family"
bgpArg2: "ipv4"
bgpArg3: "network"
bgpArg4: "1.2.3.4/5"
bgpArg5: "backdoor"
- name: Test BGP - bestpath - always-compare-med
cnos_bgp:
host: "{{ inventory_hostname }}"
username: "{{ hostvars[inventory_hostname]['username'] }}"
password: "{{ hostvars[inventory_hostname]['password'] }}"
deviceType: "{{ hostvars[inventory_hostname]['deviceType'] }}"
enablePassword: "{{ hostvars[inventory_hostname]['enablePassword'] }}"
outputfile: "./results/test_bgp_{{ inventory_hostname }}_output.txt"
asNum: 33
bgpArg1: "bestpath"
bgpArg2: "always-compare-med"
- name: Test BGP - bestpath-compare-confed-aspat
cnos_bgp:
host: "{{ inventory_hostname }}"
username: "{{ hostvars[inventory_hostname]['username'] }}"
password: "{{ hostvars[inventory_hostname]['password'] }}"
deviceType: "{{ hostvars[inventory_hostname]['deviceType'] }}"
enablePassword: "{{ hostvars[inventory_hostname]['enablePassword'] }}"
outputfile: "./results/test_bgp_{{ inventory_hostname }}_output.txt"
asNum: 33
bgpArg1: "bestpath"
bgpArg2: "compare-confed-aspath"
- name: Test BGP - bgp
cnos_bgp:
host: "{{ inventory_hostname }}"
username: "{{ hostvars[inventory_hostname]['username'] }}"
password: "{{ hostvars[inventory_hostname]['password'] }}"
deviceType: "{{ hostvars[inventory_hostname]['deviceType'] }}"
enablePassword: "{{ hostvars[inventory_hostname]['enablePassword'] }}"
outputfile: "./results/test_bgp_{{ inventory_hostname }}_output.txt"
asNum: 33
bgpArg1: "bgp"
bgpArg2: 33
- name: Test BGP - cluster-id
cnos_bgp:
host: "{{ inventory_hostname }}"
username: "{{ hostvars[inventory_hostname]['username'] }}"
password: "{{ hostvars[inventory_hostname]['password'] }}"
deviceType: "{{ hostvars[inventory_hostname]['deviceType'] }}"
enablePassword: "{{ hostvars[inventory_hostname]['enablePassword'] }}"
outputfile: "./results/test_bgp_{{ inventory_hostname }}_output.txt"
asNum: 33
bgpArg1: "cluster-id"
bgpArg2: "1.2.3.4"
- name: Test BGP - confederation-identifier
cnos_bgp:
host: "{{ inventory_hostname }}"
username: "{{ hostvars[inventory_hostname]['username'] }}"
password: "{{ hostvars[inventory_hostname]['password'] }}"
deviceType: "{{ hostvars[inventory_hostname]['deviceType'] }}"
enablePassword: "{{ hostvars[inventory_hostname]['enablePassword'] }}"
outputfile: "./results/test_bgp_{{ inventory_hostname }}_output.txt"
asNum: 33
bgpArg1: "confederation"
bgpArg2: "identifier"
bgpArg3: 333
- name: Test BGP - enforce-first-as
cnos_bgp:
host: "{{ inventory_hostname }}"
username: "{{ hostvars[inventory_hostname]['username'] }}"
password: "{{ hostvars[inventory_hostname]['password'] }}"
deviceType: "{{ hostvars[inventory_hostname]['deviceType'] }}"
enablePassword: "{{ hostvars[inventory_hostname]['enablePassword'] }}"
outputfile: "./results/test_bgp_{{ inventory_hostname }}_output.txt"
asNum: 33
bgpArg1: "enforce-first-as"
- name: Test BGP - fast-external-failover
cnos_bgp:
host: "{{ inventory_hostname }}"
username: "{{ hostvars[inventory_hostname]['username'] }}"
password: "{{ hostvars[inventory_hostname]['password'] }}"
deviceType: "{{ hostvars[inventory_hostname]['deviceType'] }}"
enablePassword: "{{ hostvars[inventory_hostname]['enablePassword'] }}"
outputfile: "./results/test_bgp_{{ inventory_hostname }}_output.txt"
asNum: 33
bgpArg1: "fast-external-failover"
- name: Test BGP - graceful-restart
cnos_bgp:
host: "{{ inventory_hostname }}"
username: "{{ hostvars[inventory_hostname]['username'] }}"
password: "{{ hostvars[inventory_hostname]['password'] }}"
deviceType: "{{ hostvars[inventory_hostname]['deviceType'] }}"
enablePassword: "{{ hostvars[inventory_hostname]['enablePassword'] }}"
outputfile: "./results/test_bgp_{{ inventory_hostname }}_output.txt"
asNum: 33
bgpArg1: "graceful-restart"
bgpArg2: 333
- name: Test BGP - graceful-restart-helper
cnos_bgp:
host: "{{ inventory_hostname }}"
username: "{{ hostvars[inventory_hostname]['username'] }}"
password: "{{ hostvars[inventory_hostname]['password'] }}"
deviceType: "{{ hostvars[inventory_hostname]['deviceType'] }}"
enablePassword: "{{ hostvars[inventory_hostname]['enablePassword'] }}"
outputfile: "./results/test_bgp_{{ inventory_hostname }}_output.txt"
asNum: 33
bgpArg1: "graceful-restart-helper"
- name: Test BGP - maxas-limit
cnos_bgp:
host: "{{ inventory_hostname }}"
username: "{{ hostvars[inventory_hostname]['username'] }}"
password: "{{ hostvars[inventory_hostname]['password'] }}"
deviceType: "{{ hostvars[inventory_hostname]['deviceType'] }}"
enablePassword: "{{ hostvars[inventory_hostname]['enablePassword'] }}"
outputfile: "./results/test_bgp_{{ inventory_hostname }}_output.txt"
asNum: 33
bgpArg1: "maxas-limit"
bgpArg2: 333
- name: Test BGP - neighbor
cnos_bgp:
host: "{{ inventory_hostname }}"
username: "{{ hostvars[inventory_hostname]['username'] }}"
password: "{{ hostvars[inventory_hostname]['password'] }}"
deviceType: "{{ hostvars[inventory_hostname]['deviceType'] }}"
enablePassword: "{{ hostvars[inventory_hostname]['enablePassword'] }}"
outputfile: "./results/test_bgp_{{ inventory_hostname }}_output.txt"
asNum: 33
bgpArg1: "neighbor"
bgpArg2: "10.241.107.40"
bgpArg3: 13
bgpArg4: "address-family"
bgpArg5: "ipv4"
bgpArg6: "next-hop-self"
- name: Test BGP - router-id
cnos_bgp:
host: "{{ inventory_hostname }}"
username: "{{ hostvars[inventory_hostname]['username'] }}"
password: "{{ hostvars[inventory_hostname]['password'] }}"
deviceType: "{{ hostvars[inventory_hostname]['deviceType'] }}"
enablePassword: "{{ hostvars[inventory_hostname]['enablePassword'] }}"
outputfile: "./results/test_bgp_{{ inventory_hostname }}_output.txt"
asNum: 33
bgpArg1: "router-id"
bgpArg2: "1.2.3.4"
- name: Test BGP - synchronization
cnos_bgp:
host: "{{ inventory_hostname }}"
username: "{{ hostvars[inventory_hostname]['username'] }}"
password: "{{ hostvars[inventory_hostname]['password'] }}"
deviceType: "{{ hostvars[inventory_hostname]['deviceType'] }}"
enablePassword: "{{ hostvars[inventory_hostname]['enablePassword'] }}"
outputfile: "./results/test_bgp_{{ inventory_hostname }}_output.txt"
asNum: 33
bgpArg1: "synchronization"
- name: Test BGP - timers
cnos_bgp:
host: "{{ inventory_hostname }}"
username: "{{ hostvars[inventory_hostname]['username'] }}"
password: "{{ hostvars[inventory_hostname]['password'] }}"
deviceType: "{{ hostvars[inventory_hostname]['deviceType'] }}"
enablePassword: "{{ hostvars[inventory_hostname]['enablePassword'] }}"
outputfile: "./results/test_bgp_{{ inventory_hostname }}_output.txt"
asNum: 33
bgpArg1: "timers"
bgpArg2: 333
bgpArg3: 3333
- name: Test BGP - vrf
cnos_bgp:
host: "{{ inventory_hostname }}"
username: "{{ hostvars[inventory_hostname]['username'] }}"
password: "{{ hostvars[inventory_hostname]['password'] }}"
deviceType: "{{ hostvars[inventory_hostname]['deviceType'] }}"
enablePassword: "{{ hostvars[inventory_hostname]['enablePassword'] }}"
outputfile: "./results/test_bgp_{{ inventory_hostname }}_output.txt"
asNum: 33
bgpArg1: "vrf"
'''
RETURN = '''
msg:
description: Success or failure message. Upon any failure, the method returns an error display string.
returned: always
type: string
'''
import sys
import paramiko
import time
import argparse
import socket
import array
import json
import time
import re
try:
from ansible.module_utils import cnos
HAS_LIB = True
except:
HAS_LIB = False
from ansible.module_utils.basic import AnsibleModule
from collections import defaultdict
def main():
module = AnsibleModule(
argument_spec=dict(
outputfile=dict(required=True),
host=dict(required=True),
username=dict(required=True),
password=dict(required=True, no_log=True),
enablePassword=dict(required=False, no_log=True),
deviceType=dict(required=True),
bgpArg1=dict(required=True),
bgpArg2=dict(required=False),
bgpArg3=dict(required=False),
bgpArg4=dict(required=False),
bgpArg5=dict(required=False),
bgpArg6=dict(required=False),
bgpArg7=dict(required=False),
bgpArg8=dict(required=False),
asNum=dict(required=True),),
supports_check_mode=False)
username = module.params['username']
password = module.params['password']
enablePassword = module.params['enablePassword']
bgpArg1 = module.params['bgpArg1']
bgpArg2 = module.params['bgpArg2']
bgpArg3 = module.params['bgpArg3']
bgpArg4 = module.params['bgpArg4']
bgpArg5 = module.params['bgpArg5']
bgpArg6 = module.params['bgpArg6']
bgpArg7 = module.params['bgpArg7']
bgpArg8 = module.params['bgpArg8']
asNum = module.params['asNum']
outputfile = module.params['outputfile']
hostIP = module.params['host']
deviceType = module.params['deviceType']
output = ""
# Create instance of SSHClient object
remote_conn_pre = paramiko.SSHClient()
# Automatically add untrusted hosts (make sure okay for security policy in your environment)
remote_conn_pre.set_missing_host_key_policy(paramiko.AutoAddPolicy())
# initiate SSH connection with the switch
remote_conn_pre.connect(hostIP, username=username, password=password)
time.sleep(2)
# Use invoke_shell to establish an 'interactive session'
remote_conn = remote_conn_pre.invoke_shell()
time.sleep(2)
# Enable and enter configure terminal then send command
output = output + cnos.waitForDeviceResponse("\n", ">", 2, remote_conn)
output = output + cnos.enterEnableModeForDevice(enablePassword, 3, remote_conn)
# Make terminal length = 0
output = output + cnos.waitForDeviceResponse("terminal length 0\n", "#", 2, remote_conn)
# Go to config mode
output = output + cnos.waitForDeviceResponse("configure d\n", "(config)#", 2, remote_conn)
# Send the CLi command
output = output + cnos.routerConfig(remote_conn, deviceType, "(config)#", 2, "bgp", asNum,
bgpArg1, bgpArg2, bgpArg3, bgpArg4, bgpArg5, bgpArg6, bgpArg7, bgpArg8)
# Save it into the file
file = open(outputfile, "a")
file.write(output)
file.close()
# Logic to check when changes occur or not
errorMsg = cnos.checkOutputForError(output)
if(errorMsg is None):
module.exit_json(changed=True, msg="BGP configurations accomplished")
else:
module.fail_json(msg=errorMsg)
if __name__ == '__main__':
main()
|
gpl-3.0
|
pjimmybrcd/bwctest
|
sensors/syslog_sensor.py
|
3
|
1790
|
import os,re
from logshipper.tail import Tail
from st2reactor.sensor.base import Sensor
class Syslog_Sensor(Sensor):
def __init__(self, sensor_service, config=None):
super(Syslog_Sensor, self).__init__(sensor_service=sensor_service,
config=config)
self._config = self._config['syslog_watch_sensor']
self._file_paths = self._config.get('syslog_paths', [])
self._triggers = self._config.get('triggers', [])
self._tail = None
self._logger = self.sensor_service.get_logger(name=self.__class__.__name__)
def setup(self):
if not self._file_paths:
raise ValueError('No file_paths configured to monitor')
if not self._triggers:
raise ValueError('No triggers to evaluate for matches')
self._tail = Tail(filenames=self._file_paths)
self._tail.handler = self._handle_line
self._tail.should_run = True
def run(self):
self._tail.run()
def cleanup(self):
if self._tail:
self._tail.should_run = False
try:
self._tail.notifier.stop()
except Exception:
pass
def add_trigger(self, trigger):
pass
def update_trigger(self, trigger):
pass
def remove_trigger(self, trigger):
pass
def _handle_line(self, file_path, line):
for trigger in self._triggers:
regex = re.compile(trigger['regex'])
match = regex.match(line)
if match:
payload = {}
for k,v in trigger['groups'].items():
payload.update({v: match.group(k)})
self.sensor_service.dispatch(trigger=trigger['trigger'], payload=payload)
|
apache-2.0
|
bluevoda/BloggyBlog
|
lib/python3.4/site-packages/pip/operations/freeze.py
|
127
|
4048
|
from __future__ import absolute_import
import logging
import re
import pip
from pip.req import InstallRequirement
from pip.utils import get_installed_distributions
from pip._vendor import pkg_resources
from pip._vendor.packaging.utils import canonicalize_name
logger = logging.getLogger(__name__)
def freeze(
requirement=None,
find_links=None, local_only=None, user_only=None, skip_regex=None,
default_vcs=None,
isolated=False,
wheel_cache=None,
skip=()):
find_links = find_links or []
skip_match = None
if skip_regex:
skip_match = re.compile(skip_regex).search
dependency_links = []
for dist in pkg_resources.working_set:
if dist.has_metadata('dependency_links.txt'):
dependency_links.extend(
dist.get_metadata_lines('dependency_links.txt')
)
for link in find_links:
if '#egg=' in link:
dependency_links.append(link)
for link in find_links:
yield '-f %s' % link
installations = {}
for dist in get_installed_distributions(local_only=local_only,
skip=(),
user_only=user_only):
req = pip.FrozenRequirement.from_dist(
dist,
dependency_links
)
installations[req.name] = req
if requirement:
with open(requirement) as req_file:
for line in req_file:
if (not line.strip() or
line.strip().startswith('#') or
(skip_match and skip_match(line)) or
line.startswith((
'-r', '--requirement',
'-Z', '--always-unzip',
'-f', '--find-links',
'-i', '--index-url',
'--pre',
'--trusted-host',
'--process-dependency-links',
'--extra-index-url'))):
yield line.rstrip()
continue
if line.startswith('-e') or line.startswith('--editable'):
if line.startswith('-e'):
line = line[2:].strip()
else:
line = line[len('--editable'):].strip().lstrip('=')
line_req = InstallRequirement.from_editable(
line,
default_vcs=default_vcs,
isolated=isolated,
wheel_cache=wheel_cache,
)
else:
line_req = InstallRequirement.from_line(
line,
isolated=isolated,
wheel_cache=wheel_cache,
)
if not line_req.name:
logger.info(
"Skipping line because it's not clear what it "
"would install: %s",
line.strip(),
)
logger.info(
" (add #egg=PackageName to the URL to avoid"
" this warning)"
)
elif line_req.name not in installations:
logger.warning(
"Requirement file contains %s, but that package is"
" not installed",
line.strip(),
)
else:
yield str(installations[line_req.name]).rstrip()
del installations[line_req.name]
yield(
'## The following requirements were added by '
'pip freeze:'
)
for installation in sorted(
installations.values(), key=lambda x: x.name.lower()):
if canonicalize_name(installation.name) not in skip:
yield str(installation).rstrip()
|
gpl-3.0
|
tsgit/invenio
|
modules/miscutil/lib/gc_workaround.py
|
2
|
1297
|
# -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2017 CERN, Stanford.
#
# Invenio is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
import gc
from functools import wraps
from sys import version_info
def gcfix(f):
"""
delegate garbage collection to the end of the decorated function for python version < 2.7
"""
@wraps(f)
def gcwrapper(*args, **kwargs):
gcfixme = version_info[0] < 3 and version_info[1] < 7 \
and gc.isenabled()
if gcfixme:
gc.disable()
retval = f(*args, **kwargs)
if gcfixme:
gc.enable()
return retval
return gcwrapper
|
gpl-2.0
|
open-power-ref-design/opsmgr
|
plugins/provisioning/swift-proxy/nagios/plugins/check-swift-space.py
|
4
|
2117
|
#!/usr/bin/python
#
# Copyright 2012 iomart Cloud Services Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import os
import subprocess
from optparse import OptionParser
from swift_commands import SWIFT_RECON
parser = OptionParser()
parser.add_option("-w", "--warning", dest="warning",
help="threshold for WARNING", default="70", type="int")
parser.add_option("-c", "--critical", dest="critical",
help="threshold for CRITICAL", default="80", type="int")
(options, args) = parser.parse_args()
process = subprocess.Popen([SWIFT_RECON, "-d"], stdout=subprocess.PIPE)
# looking for line like
# Disk usage: lowest: 18.02%, highest: 29.2%, avg: 25.8431666667%
for line in process.stdout.readlines():
if line.startswith("Disk usage: lowest"):
usage_line_parts = line.split(" ")
i=0
for part in usage_line_parts:
if part=="highest:":
highest_percent = usage_line_parts[i+1]
break
i+=1
if highest_percent==None:
print "UNKNOWN : highest percentage could not be located in %s" % line
sys.exit(3)
highest = float(highest_percent.split("%")[0])
if highest>options.critical:
print "CRITICAL : at least one disk at %d%%" % highest
sys.exit(2)
if highest>options.warning:
print "WARNING : at least one disk at %d%%" % highest
sys.exit(1)
print "OK : %s" % line
sys.exit(0)
print "UNKNOWN: Disk usage line not found"
sys.exit(3)
|
apache-2.0
|
pombredanne/sortinghat
|
tests/test_cmd_move.py
|
1
|
5882
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2014-2015 Bitergia
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
# Authors:
# Santiago Dueñas <[email protected]>
#
from __future__ import absolute_import
from __future__ import unicode_literals
import sys
import unittest
if not '..' in sys.path:
sys.path.insert(0, '..')
from sortinghat import api
from sortinghat.command import CMD_SUCCESS, CMD_FAILURE
from sortinghat.cmd.move import Move
from sortinghat.db.database import Database
from tests.config import DB_USER, DB_PASSWORD, DB_NAME, DB_HOST, DB_PORT
MOVE_FROM_ID_NOT_FOUND_ERROR = "Error: FFFFFFFFFFF not found in the registry"
MOVE_TO_UUID_NOT_FOUND_ERROR = "Error: Jane Rae not found in the registry"
MOVE_OUTPUT = """Identity b4c250eaaf873a04093319f26ca13b02a9248251 moved to unique identity John Smith"""
MOVE_NEW_UID_OUTPUT = """New unique identity b4c250eaaf873a04093319f26ca13b02a9248251 created. Identity moved"""
MOVE_EMPTY_OUTPUT = ""
class TestBaseCase(unittest.TestCase):
"""Defines common setup and teardown methods on add unit tests"""
def setUp(self):
if not hasattr(sys.stdout, 'getvalue') and not hasattr(sys.stderr, 'getvalue'):
self.fail('This test needs to be run in buffered mode')
# Create a connection to check the contents of the registry
self.db = Database(DB_USER, DB_PASSWORD, DB_NAME, DB_HOST, DB_PORT)
self.db.clear()
self._load_test_dataset()
# Create command
self.kwargs = {'user' : DB_USER,
'password' : DB_PASSWORD,
'database' :DB_NAME,
'host' : DB_HOST,
'port' : DB_PORT}
self.cmd = Move(**self.kwargs)
def tearDown(self):
self.db.clear()
def _load_test_dataset(self):
api.add_unique_identity(self.db, 'John Smith')
api.add_identity(self.db, 'scm', '[email protected]',
uuid='John Smith')
api.add_identity(self.db, 'scm', '[email protected]', 'John Smith',
uuid='John Smith')
api.add_unique_identity(self.db, 'John Doe')
api.add_identity(self.db, 'scm', '[email protected]',
uuid='John Doe')
class TestMoveCommand(TestBaseCase):
"""Move command unit tests"""
def test_move(self):
"""Check how it works when moving an identity"""
# Move an identity
code = self.cmd.run('b4c250eaaf873a04093319f26ca13b02a9248251', 'John Smith')
self.assertEqual(code, CMD_SUCCESS)
output = sys.stdout.getvalue().strip()
self.assertEqual(output, MOVE_OUTPUT)
class TestMove(TestBaseCase):
"""Unit tests for move"""
def test_move(self):
"""Check behaviour moving an identity"""
code = self.cmd.move('b4c250eaaf873a04093319f26ca13b02a9248251', 'John Smith')
self.assertEqual(code, CMD_SUCCESS)
output = sys.stdout.getvalue().strip()
self.assertEqual(output, MOVE_OUTPUT)
def test_not_found_from_id_identity(self):
"""Check if it fails moving an identity that does not exist"""
code = self.cmd.move('FFFFFFFFFFF', 'John Smith')
self.assertEqual(code, CMD_FAILURE)
output = sys.stderr.getvalue().strip()
self.assertEqual(output, MOVE_FROM_ID_NOT_FOUND_ERROR)
def test_not_found_to_uuid_unique_identity(self):
"""Check if it fails moving an identity to a unique identity that does not exist"""
code = self.cmd.move('b4c250eaaf873a04093319f26ca13b02a9248251', 'Jane Rae')
self.assertEqual(code, CMD_FAILURE)
output = sys.stderr.getvalue().strip()
self.assertEqual(output, MOVE_TO_UUID_NOT_FOUND_ERROR)
def test_create_new_unique_identity(self):
"""Check if a new unique identity is created when both uuids are equal"""
code = self.cmd.move('b4c250eaaf873a04093319f26ca13b02a9248251', 'b4c250eaaf873a04093319f26ca13b02a9248251')
self.assertEqual(code, CMD_SUCCESS)
output = sys.stdout.getvalue().strip()
self.assertEqual(output, MOVE_NEW_UID_OUTPUT)
def test_none_ids(self):
"""Check behavior moving None ids"""
code = self.cmd.move(None, 'John Smith')
self.assertEqual(code, CMD_SUCCESS)
code = self.cmd.move('b4c250eaaf873a04093319f26ca13b02a9248251', None)
self.assertEqual(code, CMD_SUCCESS)
output = sys.stdout.getvalue().strip()
self.assertEqual(output, MOVE_EMPTY_OUTPUT)
output = sys.stderr.getvalue().strip()
self.assertEqual(output, MOVE_EMPTY_OUTPUT)
def test_empty_ids(self):
"""Check behavior moving empty ids"""
code = self.cmd.move('', 'John Smith')
self.assertEqual(code, CMD_SUCCESS)
code = self.cmd.move('b4c250eaaf873a04093319f26ca13b02a9248251', '')
self.assertEqual(code, CMD_SUCCESS)
output = sys.stdout.getvalue().strip()
self.assertEqual(output, MOVE_EMPTY_OUTPUT)
output = sys.stderr.getvalue().strip()
self.assertEqual(output, MOVE_EMPTY_OUTPUT)
if __name__ == "__main__":
unittest.main(buffer=True, exit=False)
|
gpl-3.0
|
b0ri5/nishe-googlecode
|
scons/scons-local-1.3.0/SCons/Tool/gs.py
|
5
|
2556
|
"""SCons.Tool.gs
Tool-specific initialization for Ghostscript.
There normally shouldn't be any need to import this module directly.
It will usually be imported through the generic SCons.Tool.Tool()
selection method.
"""
#
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Tool/gs.py 4720 2010/03/24 03:14:11 jars"
import SCons.Action
import SCons.Platform
import SCons.Util
# Ghostscript goes by different names on different platforms...
platform = SCons.Platform.platform_default()
if platform == 'os2':
gs = 'gsos2'
elif platform == 'win32':
gs = 'gswin32c'
else:
gs = 'gs'
GhostscriptAction = None
def generate(env):
"""Add Builders and construction variables for Ghostscript to an
Environment."""
global GhostscriptAction
if GhostscriptAction is None:
GhostscriptAction = SCons.Action.Action('$GSCOM', '$GSCOMSTR')
import pdf
pdf.generate(env)
bld = env['BUILDERS']['PDF']
bld.add_action('.ps', GhostscriptAction)
env['GS'] = gs
env['GSFLAGS'] = SCons.Util.CLVar('-dNOPAUSE -dBATCH -sDEVICE=pdfwrite')
env['GSCOM'] = '$GS $GSFLAGS -sOutputFile=$TARGET $SOURCES'
def exists(env):
if env.has_key('PS2PDF'):
return env.Detect(env['PS2PDF'])
else:
return env.Detect(gs) or SCons.Util.WhereIs(gs)
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
|
lgpl-3.0
|
sderrick57/adaTRHsensor
|
PyMata/pymata_serial.py
|
1
|
3837
|
__author__ = 'Copyright (c) 2013 Alan Yorinks All rights reserved.'
"""
@author: Alan Yorinks
Copyright (c) 2013-17 Alan Yorinks All rights reserved.
This program is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public
License as published by the Free Software Foundation; either
version 3 of the License, or (at your option) any later version.
This library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License for more details.
You should have received a copy of the GNU General Public
License along with this library; if not, write to the Free Software
Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
"""
import threading
import time
import sys
import serial
class PyMataSerial(threading.Thread):
"""
This class manages the serial port for Arduino serial communications
"""
# class variables
arduino = serial.Serial()
port_id = ""
baud_rate = 57600
timeout = 1
command_deque = None
def __init__(self, port_id, command_deque):
"""
Constructor:
@param command_deque: A reference to the deque shared with the _command_handler
"""
self.port_id = port_id
self.command_deque = command_deque
threading.Thread.__init__(self)
self.daemon = True
self.arduino = serial.Serial(self.port_id, self.baud_rate,
timeout=int(self.timeout), writeTimeout=0)
self.stop_event = threading.Event()
# without this, running python 3.4 is extremely sluggish
if sys.platform == 'linux':
# noinspection PyUnresolvedReferences
self.arduino.nonblocking()
def stop(self):
self.stop_event.set()
def is_stopped(self):
return self.stop_event.is_set()
def open(self, verbose):
"""
open the serial port using the configuration data
returns a reference to this instance
"""
# open a serial port
if verbose:
print('\nOpening Arduino Serial port %s ' % self.port_id)
try:
# in case the port is already open, let's close it and then
# reopen it
self.arduino.close()
time.sleep(1)
self.arduino.open()
time.sleep(1)
return self.arduino
except Exception:
# opened failed - will report back to caller
raise
def close(self):
"""
Close the serial port
return: None
"""
try:
self.arduino.close()
except OSError:
pass
def write(self, data):
"""
write the data to the serial port
return: None
"""
if sys.version_info[0] < 3:
self.arduino.write(data)
else:
self.arduino.write(bytes([ord(data)]))
# noinspection PyExceptClausesOrder
def run(self):
"""
This method continually runs. If an incoming character is available on the serial port
it is read and placed on the _command_deque
@return: Never Returns
"""
while not self.is_stopped():
# we can get an OSError: [Errno9] Bad file descriptor when shutting down
# just ignore it
try:
if self.arduino.inWaiting():
c = self.arduino.read()
self.command_deque.append(ord(c))
else:
time.sleep(.1)
except OSError:
pass
except IOError:
self.stop()
self.close()
|
gpl-3.0
|
LukeM12/samba
|
buildtools/wafadmin/Tools/osx.py
|
14
|
5819
|
#!/usr/bin/env python
# encoding: utf-8
# Thomas Nagy 2008
"""MacOSX related tools
To compile an executable into a Mac application bundle (a .app), set its 'mac_app' attribute
obj.mac_app = True
To make a bundled shared library (a .bundle), set the 'mac_bundle' attribute:
obj.mac_bundle = True
"""
import os, shutil, sys, platform
import TaskGen, Task, Build, Options, Utils
from TaskGen import taskgen, feature, after, before
from Logs import error, debug
# plist template
app_info = '''
<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE plist SYSTEM "file://localhost/System/Library/DTDs/PropertyList.dtd">
<plist version="0.9">
<dict>
<key>CFBundlePackageType</key>
<string>APPL</string>
<key>CFBundleGetInfoString</key>
<string>Created by Waf</string>
<key>CFBundleSignature</key>
<string>????</string>
<key>NOTE</key>
<string>THIS IS A GENERATED FILE, DO NOT MODIFY</string>
<key>CFBundleExecutable</key>
<string>%s</string>
</dict>
</plist>
'''
# see WAF issue 285
# and also http://trac.macports.org/ticket/17059
@feature('cc', 'cxx')
@before('apply_lib_vars')
def set_macosx_deployment_target(self):
if self.env['MACOSX_DEPLOYMENT_TARGET']:
os.environ['MACOSX_DEPLOYMENT_TARGET'] = self.env['MACOSX_DEPLOYMENT_TARGET']
elif 'MACOSX_DEPLOYMENT_TARGET' not in os.environ:
if sys.platform == 'darwin':
os.environ['MACOSX_DEPLOYMENT_TARGET'] = '.'.join(platform.mac_ver()[0].split('.')[:2])
@feature('cc', 'cxx')
@after('apply_lib_vars')
def apply_framework(self):
for x in self.to_list(self.env['FRAMEWORKPATH']):
frameworkpath_st = '-F%s'
self.env.append_unique('CXXFLAGS', frameworkpath_st % x)
self.env.append_unique('CCFLAGS', frameworkpath_st % x)
self.env.append_unique('LINKFLAGS', frameworkpath_st % x)
for x in self.to_list(self.env['FRAMEWORK']):
self.env.append_value('LINKFLAGS', ['-framework', x])
@taskgen
def create_bundle_dirs(self, name, out):
bld = self.bld
dir = out.parent.get_dir(name)
if not dir:
dir = out.__class__(name, out.parent, 1)
bld.rescan(dir)
contents = out.__class__('Contents', dir, 1)
bld.rescan(contents)
macos = out.__class__('MacOS', contents, 1)
bld.rescan(macos)
return dir
def bundle_name_for_output(out):
name = out.name
k = name.rfind('.')
if k >= 0:
name = name[:k] + '.app'
else:
name = name + '.app'
return name
@taskgen
@after('apply_link')
@feature('cprogram')
def create_task_macapp(self):
"""Use env['MACAPP'] to force *all* executables to be transformed into Mac applications
or use obj.mac_app = True to build specific targets as Mac apps"""
if self.env['MACAPP'] or getattr(self, 'mac_app', False):
apptask = self.create_task('macapp')
apptask.set_inputs(self.link_task.outputs)
out = self.link_task.outputs[0]
name = bundle_name_for_output(out)
dir = self.create_bundle_dirs(name, out)
n1 = dir.find_or_declare(['Contents', 'MacOS', out.name])
apptask.set_outputs([n1])
apptask.chmod = 0755
apptask.install_path = os.path.join(self.install_path, name, 'Contents', 'MacOS')
self.apptask = apptask
@after('apply_link')
@feature('cprogram')
def create_task_macplist(self):
"""Use env['MACAPP'] to force *all* executables to be transformed into Mac applications
or use obj.mac_app = True to build specific targets as Mac apps"""
if self.env['MACAPP'] or getattr(self, 'mac_app', False):
# check if the user specified a plist before using our template
if not getattr(self, 'mac_plist', False):
self.mac_plist = app_info
plisttask = self.create_task('macplist')
plisttask.set_inputs(self.link_task.outputs)
out = self.link_task.outputs[0]
self.mac_plist = self.mac_plist % (out.name)
name = bundle_name_for_output(out)
dir = self.create_bundle_dirs(name, out)
n1 = dir.find_or_declare(['Contents', 'Info.plist'])
plisttask.set_outputs([n1])
plisttask.mac_plist = self.mac_plist
plisttask.install_path = os.path.join(self.install_path, name, 'Contents')
self.plisttask = plisttask
@after('apply_link')
@feature('cshlib')
def apply_link_osx(self):
name = self.link_task.outputs[0].name
if not self.install_path:
return
if getattr(self, 'vnum', None):
name = name.replace('.dylib', '.%s.dylib' % self.vnum)
path = os.path.join(Utils.subst_vars(self.install_path, self.env), name)
if '-dynamiclib' in self.env['LINKFLAGS']:
self.env.append_value('LINKFLAGS', '-install_name')
self.env.append_value('LINKFLAGS', path)
@before('apply_link', 'apply_lib_vars')
@feature('cc', 'cxx')
def apply_bundle(self):
"""use env['MACBUNDLE'] to force all shlibs into mac bundles
or use obj.mac_bundle = True for specific targets only"""
if not ('cshlib' in self.features or 'shlib' in self.features): return
if self.env['MACBUNDLE'] or getattr(self, 'mac_bundle', False):
self.env['shlib_PATTERN'] = self.env['macbundle_PATTERN']
uselib = self.uselib = self.to_list(self.uselib)
if not 'MACBUNDLE' in uselib: uselib.append('MACBUNDLE')
@after('apply_link')
@feature('cshlib')
def apply_bundle_remove_dynamiclib(self):
if self.env['MACBUNDLE'] or getattr(self, 'mac_bundle', False):
if not getattr(self, 'vnum', None):
try:
self.env['LINKFLAGS'].remove('-dynamiclib')
self.env['LINKFLAGS'].remove('-single_module')
except ValueError:
pass
# TODO REMOVE IN 1.6 (global variable)
app_dirs = ['Contents', 'Contents/MacOS', 'Contents/Resources']
def app_build(task):
env = task.env
shutil.copy2(task.inputs[0].srcpath(env), task.outputs[0].abspath(env))
return 0
def plist_build(task):
env = task.env
f = open(task.outputs[0].abspath(env), "w")
f.write(task.mac_plist)
f.close()
return 0
Task.task_type_from_func('macapp', vars=[], func=app_build, after="cxx_link cc_link static_link")
Task.task_type_from_func('macplist', vars=[], func=plist_build, after="cxx_link cc_link static_link")
|
gpl-3.0
|
AppRevelations/Utsav
|
utsav/migrations/0001_initial.py
|
1
|
2263
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='Event',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('event_name', models.CharField(max_length=256)),
('event_desc', models.CharField(max_length=1000)),
('place', models.CharField(max_length=128)),
('timings', models.CharField(max_length=100)),
('image_url', models.URLField()),
('contact_person', models.CharField(max_length=256)),
('contact_number', models.CharField(max_length=20)),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Fest',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('fest_name', models.CharField(max_length=256)),
('fest_desc', models.CharField(max_length=1000)),
('place', models.CharField(max_length=128)),
('timings', models.CharField(max_length=100)),
('image_url', models.URLField()),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='User',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('user_name', models.CharField(max_length=256)),
('email_id', models.EmailField(max_length=75)),
('contact_number', models.CharField(max_length=20)),
('image_url', models.URLField()),
],
options={
},
bases=(models.Model,),
),
migrations.AddField(
model_name='event',
name='fest_id',
field=models.ForeignKey(to='utsav.Fest'),
preserve_default=True,
),
]
|
apache-2.0
|
zaafar/bcc
|
src/cc/frontends/p4/compiler/programSerializer.py
|
10
|
1508
|
#!/usr/bin/env python
# helper for building C program source text
from compilationException import *
class ProgramSerializer(object):
def __init__(self):
self.program = ""
self.eol = "\n"
self.currentIndent = 0
self.INDENT_AMOUNT = 4 # default indent amount
def __str__(self):
return self.program
def increaseIndent(self):
self.currentIndent += self.INDENT_AMOUNT
def decreaseIndent(self):
self.currentIndent -= self.INDENT_AMOUNT
if self.currentIndent < 0:
raise CompilationException(True, "Negative indentation level")
def toString(self):
return self.program
def space(self):
self.append(" ")
def newline(self):
self.program += self.eol
def endOfStatement(self, addNewline):
self.append(";")
if addNewline:
self.newline()
def append(self, string):
self.program += str(string)
def appendFormat(self, format, *args):
string = format.format(*args)
self.append(string)
def appendLine(self, string):
self.append(string)
self.newline()
def emitIndent(self):
self.program += " " * self.currentIndent
def blockStart(self):
self.append("{")
self.newline()
self.increaseIndent()
def blockEnd(self, addNewline):
self.decreaseIndent()
self.emitIndent()
self.append("}")
if addNewline:
self.newline()
|
apache-2.0
|
coreboot-gs45/coreboot
|
util/dtd_parser/dtd_parser.py
|
18
|
7171
|
#!/usr/bin/python
# dtd_parser.py - DTD structure parser
#
# Copyright (C) 2012 The ChromiumOS Authors. All rights reserved.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; version 2 of the License
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
#
'''
DTD string parser/generator.
Detailed timing descriptor (DTD) is an 18 byte array describing video mode
(screen resolution, display properties, etc.) in EDID and used by Intel Option
ROM. Option ROM can support multiple video modes, specific mode is picked by
the BIOS through the appropriate Option ROM callback function.
This program allows to interpret the 18 byte hex DTD dump, and/or modify
certain values and generate a new DTD.
'''
import sys
#
# The DTD array format description can be found in
# http://en.wikipedia.org/wiki/Extended_display_identification_data, (see the
# EDID Detailed Timing Descriptor section).
#
# The below dictionary describes how different DTD parameters are laid out in
# the array. Note that many parameters span multiple bit fields in the DTD.
#
# The keys in the dictionary are stings (field names), the values are tuples
# of either numbers or tri-tuples. If the element of the tuple is a number, it
# is the offset in DTD, and the entire byte is used in this field. If the
# element is a tri-tuple, its components are (DTD offset, bit shift, field
# width).
#
# The partial values are extracted from the DTD fields and concatenated
# together to form actual parameter value.
#
dtd_descriptor = {
'dclck' : (1, 0),
'hor_active' : ((4, 4, 4), 2),
'hor_blank' : ((4, 0, 4), 3),
'vert_act' : ((7, 4, 4), 5),
'vert_blank' : ((7, 0, 4), 6),
'hsync_offset' : ((11, 6, 2), 8),
'hsync_pulse_width' : ((11, 4, 2), 9),
'vsync_offset' : ((11, 2, 2), (10, 4, 4)),
'vsync_pulse_width' : ((11, 0, 2), (10, 0, 4)),
'hor_image_size' : ((14, 4, 4), 12),
'vert_image_size' : ((14, 0, 4), 13),
'hor_border' : (15,),
'vert_border' : (16,),
'interlaced' : ((17, 7, 1),),
'reserved' : ((17, 5, 2), (17, 0, 1)),
'digital_separate' : ((17, 3, 2),),
'vert_polarity' : ((17, 2, 1),),
'hor_polarity' : ((17, 1, 1),),
}
PREFIX = 'attr_'
class DTD(object):
'''An object containing all DTD information.
The attributes are created dynamically when the input DTD string is
parsed. For each element of the above dictionary two attributes are added:
'attr_<param>' to hold the actual parameter value
'max_attr_<param>' to hold the maximum allowed value for this parameter.
'''
def __init__(self):
for name in dtd_descriptor:
setattr(self, PREFIX + name, 0)
def init(self, sarray):
'''Initialize the object with values from a DTD array.
Inputs:
sarray: a string, an array of ASCII hex representations of the 18 DTD
bytes.
Raises: implicitly raises ValueError or IndexError exceptions in case
the input string has less than 18 elements, or some of the
elements can not be converted to integer.
'''
harray = [int(x, 16) for x in sarray]
for name, desc in dtd_descriptor.iteritems():
attr_value = 0
total_width = 0
for tup in desc:
if isinstance(tup, tuple):
offset, shift, width = tup
else:
offset, shift, width = tup, 0, 8
mask = (1 << width) - 1
attr_value = (attr_value << width) + (
(harray[offset] >> shift) & mask)
total_width += width
setattr(self, PREFIX + name, attr_value)
setattr(self, 'max_' + PREFIX + name, (1 << total_width) - 1)
def __str__(self):
text = []
for name in sorted(dtd_descriptor.keys()):
text.append('%20s: %d' % (name, getattr(self, PREFIX + name)))
return '\n'.join(text)
def inhex(self):
'''Generate contents of the DTD as a 18 byte ASCII hex array.'''
result = [0] * 18
for name, desc in dtd_descriptor.iteritems():
attr_value = getattr(self, PREFIX + name)
rdesc = list(desc)
rdesc.reverse()
for tup in rdesc:
if isinstance(tup, tuple):
offset, shift, width = tup
else:
offset, shift, width = tup, 0, 8
mask = (1 << width) - 1
value = attr_value & mask
attr_value = attr_value >> width
result[offset] = (result[offset] & ~(
mask << shift)) | (value << shift)
return ' '.join('%2.2x' % x for x in result)
def handle_input(self, name):
'''Get user input and set a new parameter value if required.
Display the parameter name, its current value, and prompt user for a
new value.
If the user enters a dot, stop processing (return True).
Empty user input means that this parameter does not have to change,
but the next parameter should be prompted.
If input is non-empty, it is interpreted as a hex number, checked if
it fits the parameter and the new parameter value is set if checks
pass.
Inputs:
name - a string, parameter name, a key in dtd_descriptor
Returns:
Boolean, True meaning no more field are required to be modified, False
meaning that more field mods need to be prompted..
'''
param = PREFIX + name
vmax = getattr(self, 'max_' + param)
new_value = raw_input('%s : %d ' % (name, getattr(self, param)))
if new_value == '':
return False
if new_value == '.':
return True
new_int = int(new_value)
if new_int > vmax:
print '%s exceeds maximum for %s (%d)' % (new_value, name, vmax)
else:
setattr(self, param, new_int)
return False
def main(args):
if args[0] == '-m':
modify = True
base = 1
else:
modify = False
base = 0
d = DTD()
d.init(args[base:])
if modify:
for line in str(d).splitlines():
if d.handle_input(line.split(':')[0].strip()):
break
print d
if modify:
print d.inhex()
if __name__ == '__main__':
try:
main(sys.argv[1:])
except (ValueError, IndexError):
print """
A string of 18 byte values in hex is required.
'-m' preceding the string will allow setting new parameter values.
"""
sys.exit(1)
|
gpl-2.0
|
mindbender-studio/setup
|
bin/windows/python36/Lib/curses/__init__.py
|
116
|
3366
|
"""curses
The main package for curses support for Python. Normally used by importing
the package, and perhaps a particular module inside it.
import curses
from curses import textpad
curses.initscr()
...
"""
from _curses import *
import os as _os
import sys as _sys
# Some constants, most notably the ACS_* ones, are only added to the C
# _curses module's dictionary after initscr() is called. (Some
# versions of SGI's curses don't define values for those constants
# until initscr() has been called.) This wrapper function calls the
# underlying C initscr(), and then copies the constants from the
# _curses module to the curses package's dictionary. Don't do 'from
# curses import *' if you'll be needing the ACS_* constants.
def initscr():
import _curses, curses
# we call setupterm() here because it raises an error
# instead of calling exit() in error cases.
setupterm(term=_os.environ.get("TERM", "unknown"),
fd=_sys.__stdout__.fileno())
stdscr = _curses.initscr()
for key, value in _curses.__dict__.items():
if key[0:4] == 'ACS_' or key in ('LINES', 'COLS'):
setattr(curses, key, value)
return stdscr
# This is a similar wrapper for start_color(), which adds the COLORS and
# COLOR_PAIRS variables which are only available after start_color() is
# called.
def start_color():
import _curses, curses
retval = _curses.start_color()
if hasattr(_curses, 'COLORS'):
curses.COLORS = _curses.COLORS
if hasattr(_curses, 'COLOR_PAIRS'):
curses.COLOR_PAIRS = _curses.COLOR_PAIRS
return retval
# Import Python has_key() implementation if _curses doesn't contain has_key()
try:
has_key
except NameError:
from .has_key import has_key
# Wrapper for the entire curses-based application. Runs a function which
# should be the rest of your curses-based application. If the application
# raises an exception, wrapper() will restore the terminal to a sane state so
# you can read the resulting traceback.
def wrapper(func, *args, **kwds):
"""Wrapper function that initializes curses and calls another function,
restoring normal keyboard/screen behavior on error.
The callable object 'func' is then passed the main window 'stdscr'
as its first argument, followed by any other arguments passed to
wrapper().
"""
try:
# Initialize curses
stdscr = initscr()
# Turn off echoing of keys, and enter cbreak mode,
# where no buffering is performed on keyboard input
noecho()
cbreak()
# In keypad mode, escape sequences for special keys
# (like the cursor keys) will be interpreted and
# a special value like curses.KEY_LEFT will be returned
stdscr.keypad(1)
# Start color, too. Harmless if the terminal doesn't have
# color; user can test with has_color() later on. The try/catch
# works around a minor bit of over-conscientiousness in the curses
# module -- the error return from C start_color() is ignorable.
try:
start_color()
except:
pass
return func(stdscr, *args, **kwds)
finally:
# Set everything back to normal
if 'stdscr' in locals():
stdscr.keypad(0)
echo()
nocbreak()
endwin()
|
mit
|
Crandy/robobrowser
|
robobrowser/helpers.py
|
5
|
2534
|
"""
Miscellaneous helper functions
"""
import re
from bs4 import BeautifulSoup
from bs4.element import Tag
from robobrowser.compat import string_types, iteritems
def match_text(text, tag):
if isinstance(text, string_types):
return text in tag.text
if isinstance(text, re._pattern_type):
return text.search(tag.text)
def find_all(soup, name=None, attrs=None, recursive=True, text=None,
limit=None, **kwargs):
"""The `find` and `find_all` methods of `BeautifulSoup` don't handle the
`text` parameter combined with other parameters. This is necessary for
e.g. finding links containing a string or pattern. This method first
searches by text content, and then by the standard BeautifulSoup arguments.
"""
if text is None:
return soup.find_all(
name, attrs or {}, recursive, text, limit, **kwargs
)
if isinstance(text, string_types):
text = re.compile(re.escape(text), re.I)
tags = soup.find_all(
name, attrs or {}, recursive, **kwargs
)
rv = []
for tag in tags:
if match_text(text, tag):
rv.append(tag)
if limit is not None and len(rv) >= limit:
break
return rv
def find(soup, name=None, attrs=None, recursive=True, text=None, **kwargs):
"""Modified find method; see `find_all`, above.
"""
tags = find_all(
soup, name, attrs or {}, recursive, text, 1, **kwargs
)
if tags:
return tags[0]
def ensure_soup(value, parser=None):
"""Coerce a value (or list of values) to Tag (or list of Tag).
:param value: String, BeautifulSoup, Tag, or list of the above
:param str parser: Parser to use; defaults to BeautifulSoup default
:return: Tag or list of Tags
"""
if isinstance(value, BeautifulSoup):
return value.find()
if isinstance(value, Tag):
return value
if isinstance(value, list):
return [
ensure_soup(item, parser=parser)
for item in value
]
parsed = BeautifulSoup(value, features=parser)
return parsed.find()
def lowercase_attr_names(tag):
"""Lower-case all attribute names of the provided BeautifulSoup tag.
Note: this mutates the tag's attribute names and does not return a new
tag.
:param Tag: BeautifulSoup tag
"""
# Use list comprehension instead of dict comprehension for 2.6 support
tag.attrs = dict([
(key.lower(), value)
for key, value in iteritems(tag.attrs)
])
|
bsd-3-clause
|
VaybhavSharma/commons
|
pants-plugins/src/python/twitter/common/pants/python/commons/read_contents.py
|
13
|
1279
|
# ==================================================================================================
# Copyright 2014 Twitter, Inc.
# --------------------------------------------------------------------------------------------------
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this work except in compliance with the License.
# You may obtain a copy of the License in the LICENSE file, or at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==================================================================================================
import os
def read_contents_factory(parse_context):
def read_contents(*paths):
"""Returns the concatenated contents of the files at the given paths relative to this BUILD
file.
"""
contents = ''
for path in paths:
with open(os.path.join(parse_context.rel_path, path)) as fp:
contents += fp.read()
return contents
return read_contents
|
apache-2.0
|
hyowon/servo
|
tests/wpt/web-platform-tests/tools/pywebsocket/src/mod_pywebsocket/handshake/__init__.py
|
658
|
4406
|
# Copyright 2011, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""WebSocket opening handshake processor. This class try to apply available
opening handshake processors for each protocol version until a connection is
successfully established.
"""
import logging
from mod_pywebsocket import common
from mod_pywebsocket.handshake import hybi00
from mod_pywebsocket.handshake import hybi
# Export AbortedByUserException, HandshakeException, and VersionException
# symbol from this module.
from mod_pywebsocket.handshake._base import AbortedByUserException
from mod_pywebsocket.handshake._base import HandshakeException
from mod_pywebsocket.handshake._base import VersionException
_LOGGER = logging.getLogger(__name__)
def do_handshake(request, dispatcher, allowDraft75=False, strict=False):
"""Performs WebSocket handshake.
Args:
request: mod_python request.
dispatcher: Dispatcher (dispatch.Dispatcher).
allowDraft75: obsolete argument. ignored.
strict: obsolete argument. ignored.
Handshaker will add attributes such as ws_resource in performing
handshake.
"""
_LOGGER.debug('Client\'s opening handshake resource: %r', request.uri)
# To print mimetools.Message as escaped one-line string, we converts
# headers_in to dict object. Without conversion, if we use %r, it just
# prints the type and address, and if we use %s, it prints the original
# header string as multiple lines.
#
# Both mimetools.Message and MpTable_Type of mod_python can be
# converted to dict.
#
# mimetools.Message.__str__ returns the original header string.
# dict(mimetools.Message object) returns the map from header names to
# header values. While MpTable_Type doesn't have such __str__ but just
# __repr__ which formats itself as well as dictionary object.
_LOGGER.debug(
'Client\'s opening handshake headers: %r', dict(request.headers_in))
handshakers = []
handshakers.append(
('RFC 6455', hybi.Handshaker(request, dispatcher)))
handshakers.append(
('HyBi 00', hybi00.Handshaker(request, dispatcher)))
for name, handshaker in handshakers:
_LOGGER.debug('Trying protocol version %s', name)
try:
handshaker.do_handshake()
_LOGGER.info('Established (%s protocol)', name)
return
except HandshakeException, e:
_LOGGER.debug(
'Failed to complete opening handshake as %s protocol: %r',
name, e)
if e.status:
raise e
except AbortedByUserException, e:
raise
except VersionException, e:
raise
# TODO(toyoshim): Add a test to cover the case all handshakers fail.
raise HandshakeException(
'Failed to complete opening handshake for all available protocols',
status=common.HTTP_STATUS_BAD_REQUEST)
# vi:sts=4 sw=4 et
|
mpl-2.0
|
sss/calibre-at-bzr
|
src/calibre/ebooks/pdf/render/engine.py
|
3
|
14275
|
#!/usr/bin/env python
# vim:fileencoding=UTF-8:ts=4:sw=4:sta:et:sts=4:fdm=marker:ai
from __future__ import (unicode_literals, division, absolute_import,
print_function)
__license__ = 'GPL v3'
__copyright__ = '2012, Kovid Goyal <kovid at kovidgoyal.net>'
__docformat__ = 'restructuredtext en'
import sys, traceback, math
from collections import namedtuple
from functools import wraps, partial
from future_builtins import map
import sip
from PyQt4.Qt import (QPaintEngine, QPaintDevice, Qt, QTransform, QBrush)
from calibre.constants import plugins
from calibre.ebooks.pdf.render.serialize import (PDFStream, Path)
from calibre.ebooks.pdf.render.common import inch, A4, fmtnum
from calibre.ebooks.pdf.render.graphics import convert_path, Graphics
from calibre.utils.fonts.sfnt.container import Sfnt, UnsupportedFont
from calibre.utils.fonts.sfnt.metrics import FontMetrics
Point = namedtuple('Point', 'x y')
ColorState = namedtuple('ColorState', 'color opacity do')
def repr_transform(t):
vals = map(fmtnum, (t.m11(), t.m12(), t.m21(), t.m22(), t.dx(), t.dy()))
return '[%s]'%' '.join(vals)
def store_error(func):
@wraps(func)
def errh(self, *args, **kwargs):
try:
func(self, *args, **kwargs)
except:
self.errors_occurred = True
self.errors(traceback.format_exc())
return errh
class Font(FontMetrics):
def __init__(self, sfnt):
FontMetrics.__init__(self, sfnt)
self.glyph_map = {}
class PdfEngine(QPaintEngine):
FEATURES = QPaintEngine.AllFeatures & ~(
QPaintEngine.PorterDuff | QPaintEngine.PerspectiveTransform
| QPaintEngine.ObjectBoundingModeGradients
| QPaintEngine.RadialGradientFill
| QPaintEngine.ConicalGradientFill
)
def __init__(self, file_object, page_width, page_height, left_margin,
top_margin, right_margin, bottom_margin, width, height,
errors=print, debug=print, compress=True,
mark_links=False):
QPaintEngine.__init__(self, self.FEATURES)
self.file_object = file_object
self.compress, self.mark_links = compress, mark_links
self.page_height, self.page_width = page_height, page_width
self.left_margin, self.top_margin = left_margin, top_margin
self.right_margin, self.bottom_margin = right_margin, bottom_margin
self.pixel_width, self.pixel_height = width, height
# Setup a co-ordinate transform that allows us to use co-ords
# from Qt's pixel based co-ordinate system with its origin at the top
# left corner. PDF's co-ordinate system is based on pts and has its
# origin in the bottom left corner. We also have to implement the page
# margins. Therefore, we need to translate, scale and reflect about the
# x-axis.
dy = self.page_height - self.top_margin
dx = self.left_margin
sx = (self.page_width - self.left_margin -
self.right_margin) / self.pixel_width
sy = (self.page_height - self.top_margin -
self.bottom_margin) / self.pixel_height
self.pdf_system = QTransform(sx, 0, 0, -sy, dx, dy)
self.graphics = Graphics(self.pixel_width, self.pixel_height)
self.errors_occurred = False
self.errors, self.debug = errors, debug
self.fonts = {}
self.current_page_num = 1
self.current_page_inited = False
self.qt_hack, err = plugins['qt_hack']
if err:
raise RuntimeError('Failed to load qt_hack with err: %s'%err)
def apply_graphics_state(self):
self.graphics(self.pdf_system, self.painter())
def resolve_fill(self, rect):
self.graphics.resolve_fill(rect, self.pdf_system,
self.painter().transform())
@property
def do_fill(self):
return self.graphics.current_state.do_fill
@property
def do_stroke(self):
return self.graphics.current_state.do_stroke
def init_page(self):
self.pdf.transform(self.pdf_system)
self.pdf.apply_fill(color=(1, 1, 1)) # QPainter has a default background brush of white
self.graphics.reset()
self.pdf.save_stack()
self.current_page_inited = True
def begin(self, device):
if not hasattr(self, 'pdf'):
try:
self.pdf = PDFStream(self.file_object, (self.page_width,
self.page_height), compress=self.compress,
mark_links=self.mark_links,
debug=self.debug)
self.graphics.begin(self.pdf)
except:
self.errors(traceback.format_exc())
self.errors_occurred = True
return False
return True
def end_page(self):
if self.current_page_inited:
self.pdf.restore_stack()
self.pdf.end_page()
self.current_page_inited = False
self.current_page_num += 1
def end(self):
try:
self.end_page()
self.pdf.end()
except:
self.errors(traceback.format_exc())
self.errors_occurred = True
return False
finally:
self.pdf = self.file_object = None
return True
def type(self):
return QPaintEngine.Pdf
def add_image(self, img, cache_key):
if img.isNull(): return
return self.pdf.add_image(img, cache_key)
@store_error
def drawTiledPixmap(self, rect, pixmap, point):
self.apply_graphics_state()
brush = QBrush(pixmap)
bl = rect.topLeft()
color, opacity, pattern, do_fill = self.graphics.convert_brush(
brush, bl-point, 1.0, self.pdf_system,
self.painter().transform())
self.pdf.save_stack()
self.pdf.apply_fill(color, pattern)
self.pdf.draw_rect(bl.x(), bl.y(), rect.width(), rect.height(),
stroke=False, fill=True)
self.pdf.restore_stack()
@store_error
def drawPixmap(self, rect, pixmap, source_rect):
self.apply_graphics_state()
source_rect = source_rect.toRect()
pixmap = (pixmap if source_rect == pixmap.rect() else
pixmap.copy(source_rect))
image = pixmap.toImage()
ref = self.add_image(image, pixmap.cacheKey())
if ref is not None:
self.pdf.draw_image(rect.x(), rect.y(), rect.width(),
rect.height(), ref)
@store_error
def drawImage(self, rect, image, source_rect, flags=Qt.AutoColor):
self.apply_graphics_state()
source_rect = source_rect.toRect()
image = (image if source_rect == image.rect() else
image.copy(source_rect))
ref = self.add_image(image, image.cacheKey())
if ref is not None:
self.pdf.draw_image(rect.x(), rect.y(), rect.width(),
rect.height(), ref)
@store_error
def updateState(self, state):
self.graphics.update_state(state, self.painter())
@store_error
def drawPath(self, path):
self.apply_graphics_state()
p = convert_path(path)
fill_rule = {Qt.OddEvenFill:'evenodd',
Qt.WindingFill:'winding'}[path.fillRule()]
self.pdf.draw_path(p, stroke=self.do_stroke,
fill=self.do_fill, fill_rule=fill_rule)
@store_error
def drawPoints(self, points):
self.apply_graphics_state()
p = Path()
for point in points:
p.move_to(point.x(), point.y())
p.line_to(point.x(), point.y() + 0.001)
self.pdf.draw_path(p, stroke=self.do_stroke, fill=False)
@store_error
def drawRects(self, rects):
self.apply_graphics_state()
with self.graphics:
for rect in rects:
self.resolve_fill(rect)
bl = rect.topLeft()
self.pdf.draw_rect(bl.x(), bl.y(), rect.width(), rect.height(),
stroke=self.do_stroke, fill=self.do_fill)
def create_sfnt(self, text_item):
get_table = partial(self.qt_hack.get_sfnt_table, text_item)
try:
ans = Font(Sfnt(get_table))
except UnsupportedFont as e:
raise UnsupportedFont('The font %s is not a valid sfnt. Error: %s'%(
text_item.font().family(), e))
glyph_map = self.qt_hack.get_glyph_map(text_item)
gm = {}
for uc, glyph_id in enumerate(glyph_map):
if glyph_id not in gm:
gm[glyph_id] = unichr(uc)
ans.full_glyph_map = gm
return ans
@store_error
def drawTextItem(self, point, text_item):
# return super(PdfEngine, self).drawTextItem(point, text_item)
self.apply_graphics_state()
gi = self.qt_hack.get_glyphs(point, text_item)
if not gi.indices:
sip.delete(gi)
return
name = hash(bytes(gi.name))
if name not in self.fonts:
try:
self.fonts[name] = self.create_sfnt(text_item)
except UnsupportedFont:
return super(PdfEngine, self).drawTextItem(point, text_item)
metrics = self.fonts[name]
for glyph_id in gi.indices:
try:
metrics.glyph_map[glyph_id] = metrics.full_glyph_map[glyph_id]
except (KeyError, ValueError):
pass
glyphs = []
last_x = last_y = 0
for i, pos in enumerate(gi.positions):
x, y = pos.x(), pos.y()
glyphs.append((x-last_x, last_y - y, gi.indices[i]))
last_x, last_y = x, y
self.pdf.draw_glyph_run([gi.stretch, 0, 0, -1, 0, 0], gi.size, metrics,
glyphs)
sip.delete(gi)
@store_error
def drawPolygon(self, points, mode):
self.apply_graphics_state()
if not points: return
p = Path()
p.move_to(points[0].x(), points[0].y())
for point in points[1:]:
p.line_to(point.x(), point.y())
p.close()
fill_rule = {self.OddEvenMode:'evenodd',
self.WindingMode:'winding'}.get(mode, 'evenodd')
self.pdf.draw_path(p, stroke=True, fill_rule=fill_rule,
fill=(mode in (self.OddEvenMode, self.WindingMode, self.ConvexMode)))
def set_metadata(self, *args, **kwargs):
self.pdf.set_metadata(*args, **kwargs)
def add_outline(self, toc):
self.pdf.links.add_outline(toc)
def add_links(self, current_item, start_page, links, anchors):
for pos in anchors.itervalues():
pos['left'], pos['top'] = self.pdf_system.map(pos['left'], pos['top'])
for link in links:
pos = link[1]
llx = pos['left']
lly = pos['top'] + pos['height']
urx = pos['left'] + pos['width']
ury = pos['top']
llx, lly = self.pdf_system.map(llx, lly)
urx, ury = self.pdf_system.map(urx, ury)
link[1] = pos['column'] + start_page
link.append((llx, lly, urx, ury))
self.pdf.links.add(current_item, start_page, links, anchors)
class PdfDevice(QPaintDevice): # {{{
def __init__(self, file_object, page_size=A4, left_margin=inch,
top_margin=inch, right_margin=inch, bottom_margin=inch,
xdpi=1200, ydpi=1200, errors=print, debug=print,
compress=True, mark_links=False):
QPaintDevice.__init__(self)
self.xdpi, self.ydpi = xdpi, ydpi
self.page_width, self.page_height = page_size
self.body_width = self.page_width - left_margin - right_margin
self.body_height = self.page_height - top_margin - bottom_margin
self.left_margin, self.right_margin = left_margin, right_margin
self.top_margin, self.bottom_margin = top_margin, bottom_margin
self.engine = PdfEngine(file_object, self.page_width, self.page_height,
left_margin, top_margin, right_margin,
bottom_margin, self.width(), self.height(),
errors=errors, debug=debug, compress=compress,
mark_links=mark_links)
self.add_outline = self.engine.add_outline
self.add_links = self.engine.add_links
def paintEngine(self):
return self.engine
def metric(self, m):
if m in (self.PdmDpiX, self.PdmPhysicalDpiX):
return self.xdpi
if m in (self.PdmDpiY, self.PdmPhysicalDpiY):
return self.ydpi
if m == self.PdmDepth:
return 32
if m == self.PdmNumColors:
return sys.maxint
if m == self.PdmWidthMM:
return int(round(self.body_width * 0.35277777777778))
if m == self.PdmHeightMM:
return int(round(self.body_height * 0.35277777777778))
if m == self.PdmWidth:
return int(round(self.body_width * self.xdpi / 72.0))
if m == self.PdmHeight:
return int(round(self.body_height * self.ydpi / 72.0))
return 0
def end_page(self, *args, **kwargs):
self.engine.end_page(*args, **kwargs)
def init_page(self):
self.engine.init_page()
@property
def full_page_rect(self):
page_width = int(math.ceil(self.page_width * self.xdpi / 72.0))
lm = int(math.ceil(self.left_margin * self.xdpi / 72.0))
page_height = int(math.ceil(self.page_height * self.ydpi / 72.0))
tm = int(math.ceil(self.top_margin * self.ydpi / 72.0))
return (-lm, -tm, page_width+1, page_height+1)
@property
def current_page_num(self):
return self.engine.current_page_num
@property
def errors_occurred(self):
return self.engine.errors_occurred
def to_px(self, pt, vertical=True):
return pt * (self.height()/self.page_height if vertical else
self.width()/self.page_width)
def set_metadata(self, *args, **kwargs):
self.engine.set_metadata(*args, **kwargs)
# }}}
|
gpl-3.0
|
sbalde/edxplatform
|
common/djangoapps/course_modes/migrations/0007_auto__add_coursemodesarchive__chg_field_coursemode_course_id.py
|
102
|
4360
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'CourseModesArchive'
db.create_table('course_modes_coursemodesarchive', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('course_id', self.gf('xmodule_django.models.CourseKeyField')(max_length=255, db_index=True)),
('mode_slug', self.gf('django.db.models.fields.CharField')(max_length=100)),
('mode_display_name', self.gf('django.db.models.fields.CharField')(max_length=255)),
('min_price', self.gf('django.db.models.fields.IntegerField')(default=0)),
('suggested_prices', self.gf('django.db.models.fields.CommaSeparatedIntegerField')(default='', max_length=255, blank=True)),
('currency', self.gf('django.db.models.fields.CharField')(default='usd', max_length=8)),
('expiration_date', self.gf('django.db.models.fields.DateField')(default=None, null=True, blank=True)),
('expiration_datetime', self.gf('django.db.models.fields.DateTimeField')(default=None, null=True, blank=True)),
))
db.send_create_signal('course_modes', ['CourseModesArchive'])
# Changing field 'CourseMode.course_id'
db.alter_column('course_modes_coursemode', 'course_id', self.gf('xmodule_django.models.CourseKeyField')(max_length=255))
def backwards(self, orm):
# Deleting model 'CourseModesArchive'
db.delete_table('course_modes_coursemodesarchive')
# Changing field 'CourseMode.course_id'
db.alter_column('course_modes_coursemode', 'course_id', self.gf('django.db.models.fields.CharField')(max_length=255))
models = {
'course_modes.coursemode': {
'Meta': {'unique_together': "(('course_id', 'mode_slug', 'currency'),)", 'object_name': 'CourseMode'},
'course_id': ('xmodule_django.models.CourseKeyField', [], {'max_length': '255', 'db_index': 'True'}),
'currency': ('django.db.models.fields.CharField', [], {'default': "'usd'", 'max_length': '8'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'expiration_date': ('django.db.models.fields.DateField', [], {'default': 'None', 'null': 'True', 'blank': 'True'}),
'expiration_datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'None', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'min_price': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'mode_display_name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'mode_slug': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'suggested_prices': ('django.db.models.fields.CommaSeparatedIntegerField', [], {'default': "''", 'max_length': '255', 'blank': 'True'})
},
'course_modes.coursemodesarchive': {
'Meta': {'object_name': 'CourseModesArchive'},
'course_id': ('xmodule_django.models.CourseKeyField', [], {'max_length': '255', 'db_index': 'True'}),
'currency': ('django.db.models.fields.CharField', [], {'default': "'usd'", 'max_length': '8'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'expiration_date': ('django.db.models.fields.DateField', [], {'default': 'None', 'null': 'True', 'blank': 'True'}),
'expiration_datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'None', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'min_price': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'mode_display_name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'mode_slug': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'suggested_prices': ('django.db.models.fields.CommaSeparatedIntegerField', [], {'default': "''", 'max_length': '255', 'blank': 'True'})
}
}
complete_apps = ['course_modes']
|
agpl-3.0
|
danmergens/mi-instrument
|
mi/dataset/driver/spkir_abj/dcl/spkir_abj_dcl_telemetered_driver.py
|
7
|
1684
|
# #
# OOIPLACEHOLDER
#
# Copyright 2014 Raytheon Co.
##
import os
from mi.core.log import get_logger
from mi.logging import config
from mi.dataset.parser.spkir_abj_dcl import SpkirAbjDclTelemeteredParser
from mi.dataset.dataset_driver import DataSetDriver
from mi.dataset.dataset_parser import DataSetDriverConfigKeys
from mi.core.versioning import version
__author__ = "mworden"
class SpkirAbjDclTelemeteredDriver:
def __init__(self, source_file_path, particle_data_handler, parser_config):
self._source_file_path = source_file_path
self._particle_data_handler = particle_data_handler
self._parser_config = parser_config
def process(self):
log = get_logger()
with open(self._source_file_path, "r") as file_handle:
def exception_callback(exception):
log.debug("Exception: %s", exception)
self._particle_data_handler.setParticleDataCaptureFailure()
parser = SpkirAbjDclTelemeteredParser(self._parser_config,
file_handle,
exception_callback)
driver = DataSetDriver(parser, self._particle_data_handler)
driver.processFileStream()
return self._particle_data_handler
@version("15.6.2")
def parse(unused, source_file_path, particle_data_handler):
parser_config = {
DataSetDriverConfigKeys.PARTICLE_MODULE: "mi.dataset.parser.spkir_abj_dcl",
DataSetDriverConfigKeys.PARTICLE_CLASS: None
}
driver = SpkirAbjDclTelemeteredDriver(source_file_path, particle_data_handler, parser_config)
return driver.process()
|
bsd-2-clause
|
nagyistoce/edx-platform
|
lms/djangoapps/certificates/migrations/0005_auto__add_field_generatedcertificate_name.py
|
188
|
7270
|
# encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'GeneratedCertificate.name'
db.add_column('certificates_generatedcertificate', 'name', self.gf('django.db.models.fields.CharField')(default='', max_length=255, blank=True), keep_default=False)
def backwards(self, orm):
# Deleting field 'GeneratedCertificate.name'
db.delete_column('certificates_generatedcertificate', 'name')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'about': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'avatar_type': ('django.db.models.fields.CharField', [], {'default': "'n'", 'max_length': '1'}),
'bronze': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'consecutive_days_visit_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'country': ('django_countries.fields.CountryField', [], {'max_length': '2', 'blank': 'True'}),
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'date_of_birth': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'display_tag_filter_strategy': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'email_isvalid': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'email_key': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True'}),
'email_tag_filter_strategy': ('django.db.models.fields.SmallIntegerField', [], {'default': '1'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'gold': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'gravatar': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ignored_tags': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'interesting_tags': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'location': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'new_response_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'questions_per_page': ('django.db.models.fields.SmallIntegerField', [], {'default': '10'}),
'real_name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'reputation': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1'}),
'seen_response_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'show_country': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'silver': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'status': ('django.db.models.fields.CharField', [], {'default': "'w'", 'max_length': '2'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'}),
'website': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'})
},
'certificates.generatedcertificate': {
'Meta': {'object_name': 'GeneratedCertificate'},
'certificate_id': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'download_url': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True'}),
'enabled': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'grade': ('django.db.models.fields.CharField', [], {'max_length': '5', 'null': 'True'}),
'graded_certificate_id': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True'}),
'graded_download_url': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
}
}
complete_apps = ['certificates']
|
agpl-3.0
|
flavour/RedHat
|
modules/s3migration.py
|
12
|
43959
|
# -*- coding: utf-8 -*-
""" Database Migration Toolkit
@requires: U{B{I{gluon}} <http://web2py.com>}
@copyright: 2012-2015 (c) Sahana Software Foundation
@license: MIT
Permission is hereby granted, free of charge, to any person
obtaining a copy of this software and associated documentation
files (the "Software"), to deal in the Software without
restriction, including without limitation the rights to use,
copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following
conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
"""
__all__ = ("S3Migration",)
import datetime
import os
from uuid import uuid4
from gluon import current, DAL, Field
from gluon.cfs import getcfs
from gluon.compileapp import build_environment
from gluon.restricted import restricted
from gluon.storage import Storage
class S3Migration(object):
"""
Database Migration Toolkit
- used to help migrate both a production database on a server
and also an offline client
Normally run from a script in web2py context, but without models loaded:
cd web2py
python web2py.py -S eden -R <script.py>
Where script looks like:
m = local_import("s3migration")
migrate = m.S3Migration()
migrate.prep(foreigns=[],
moves=[],
news=[],
ondeletes=[],
strbools=[],
strints=[],
uniques=[],
)
#migrate.migrate()
migrate.post(moves=[],
news=[],
strbools=[],
strints=[],
)
FYI: If you need to access a filename in eden/databases/ then here is how:
import hashlib
(db_string, pool_size) = settings.get_database_string()
prefix = hashlib.md5(db_string).hexdigest()
filename = "%s_%s.table" % (prefix, tablename)
FYI: To view all constraints on a table in MySQL:
SHOW CREATE TABLE tablename;
or
select COLUMN_NAME, CONSTRAINT_NAME, REFERENCED_COLUMN_NAME, REFERENCED_TABLE_NAME
from information_schema.KEY_COLUMN_USAGE
where TABLE_NAME = 'module_resourcename';
@ToDo: Function to ensure that roles match those in prepop
@ToDo: Function to do selective additional prepop
"""
def __init__(self):
request = current.request
# Load s3cfg => but why do this so complicated?
#name = "applications.%s.modules.s3cfg" % request.application
#s3cfg = __import__(name)
#for item in name.split(".")[1:]:
## Remove the dot
#s3cfg = getattr(s3cfg, item)
#settings = s3cfg.S3Config()
# Can use normal import here since executed in web2py environment:
import s3cfg
settings = s3cfg.S3Config()
# Pass into template
current.deployment_settings = settings
# Read settings
model = "%s/models/000_config.py" % request.folder
code = getcfs(model, model, None)
response = current.response
# Needed as some Templates look at this & we don't wish to crash:
response.s3 = Storage()
# Global variables for 000_config.py
environment = build_environment(request, response, current.session)
environment["settings"] = settings
# Some (older) 000_config.py also use "deployment_settings":
environment["deployment_settings"] = settings
# For backwards-compatibility with older 000_config.py:
def template_path():
# When you see this warning, you should update 000_config.py
# See: http://eden.sahanafoundation.org/wiki/DeveloperGuidelines/Templates/Migration#Changesin000_config.py
print "template_path() is deprecated, please update 000_config.py"
# Return just any valid path to make sure the path-check succeeds,
# => modern S3Config will find the template itself
return request.folder
environment["template_path"] = template_path
environment["os"] = os
environment["Storage"] = Storage
# Execute 000_config.py
restricted(code, environment, layer=model)
self.db_engine = settings.get_database_type()
(db_string, pool_size) = settings.get_database_string()
# Get a handle to the database
self.db = DAL(db_string,
#folder="%s/databases" % request.folder,
auto_import=True,
# @ToDo: Set to False until we migrate
migrate_enabled=True,
)
# -------------------------------------------------------------------------
def prep(self, foreigns=None,
moves=None,
news=None,
ondeletes=None,
strbools=None,
strints=None,
uniques=None,
):
"""
Preparation before migration
@param foreigns : List of tuples (tablename, fieldname) to have the foreign keys removed
- if tablename == "all" then all tables are checked
@param moves : List of dicts {tablename: [(fieldname, new_tablename, link_fieldname)]} to move a field from 1 table to another
- fieldname can be a tuple if the fieldname changes: (fieldname, new_fieldname)
@param news : List of dicts {new_tablename: {'lookup_field': '',
'tables': [tablename: [fieldname]],
'supers': [tablename: [fieldname]],
} to create new records from 1 or more old tables (inc all instances of an SE)
- fieldname can be a tuple if the fieldname changes: (fieldname, new_fieldname)
@param ondeletes : List of tuples [(tablename, fieldname, reftable, ondelete)] to have the ondelete modified to
@param strbools : List of tuples [(tablename, fieldname)] to convert from string/integer to bools
@param strints : List of tuples [(tablename, fieldname)] to convert from string to integer
@param uniques : List of tuples [(tablename, fieldname)] to have the unique indices removed,
"""
# Backup current database
self.moves = moves
self.news = news
self.strbools = strbools
self.strints = strints
self.backup()
if foreigns:
# Remove Foreign Key constraints which need to go in next code
for tablename, fieldname in foreigns:
self.remove_foreign(tablename, fieldname)
if uniques:
# Remove Unique indices which need to go in next code
for tablename, fieldname in uniques:
self.remove_unique(tablename, fieldname)
if ondeletes:
# Modify ondeletes
for tablename, fieldname, reftable, ondelete in ondeletes:
self.ondelete(tablename, fieldname, reftable, ondelete)
# Remove fields which need to be altered in next code
if strbools:
for tablename, fieldname in strbools:
self.drop(tablename, fieldname)
if strints:
for tablename, fieldname in strints:
self.drop(tablename, fieldname)
self.db.commit()
# -------------------------------------------------------------------------
def backup(self):
"""
Backup the database to a local SQLite database
@ToDo: Option to use a temporary DB in Postgres/MySQL as this takes
too long for a large DB
"""
moves = self.moves
news = self.news
strints = self.strints
strbools = self.strbools
if not moves and not news and not strbools and not strints:
# Nothing to backup
return
import os
db = self.db
folder = "%s/databases/backup" % current.request.folder
# Create clean folder for the backup
if os.path.exists(folder):
import shutil
shutil.rmtree(folder)
import time
time.sleep(1)
os.mkdir(folder)
# Setup backup database
db_bak = DAL("sqlite://backup.db", folder=folder, adapter_args={"foreign_keys": False})
# Copy Table structure
skip = []
for tablename in db.tables:
if tablename == "gis_location":
table = db[tablename]
fields = [table[field] for field in table.fields if field != "the_geom"]
try:
db_bak.define_table(tablename, *fields)
except KeyError:
# Can't resolve reference yet
# Cleanup
del db_bak[tablename]
# Try later
skip.append(tablename)
else:
try:
db_bak.define_table(tablename, db[tablename])
except KeyError:
# Can't resolve reference yet
# Cleanup
del db_bak[tablename]
# Try later
skip.append(tablename)
while skip:
_skip = []
for tablename in skip:
if tablename == "gis_location":
table = db[tablename]
fields = [table[field] for field in table.fields if field != "the_geom"]
try:
db_bak.define_table(tablename, *fields)
except KeyError:
# Can't resolve reference yet
# Cleanup
del db_bak[tablename]
# Try later
_skip.append(tablename)
except:
import sys
print "Skipping %s: %s" % (tablename, sys.exc_info()[1])
else:
try:
db_bak.define_table(tablename, db[tablename])
except KeyError:
# Can't resolve reference yet
# Cleanup
del db_bak[tablename]
# Try later
_skip.append(tablename)
except:
import sys
print "Skipping %s: %s" % (tablename, sys.exc_info()[1])
skip = _skip
# Which tables do we need to backup?
tables = []
if moves:
for tablename in moves:
tables.append(tablename)
if news:
for tablename in news:
new = news[tablename]
for t in new["tables"]:
tables.append(t)
for s in new["supers"]:
tables.append(s)
stable = db[s]
rows = db(stable._id > 0).select(stable.instance_type)
instance_types = set([r.instance_type for r in rows])
for t in instance_types:
tables.append(t)
if strbools:
for tablename, fieldname in strints:
tables.append(tablename)
if strints:
for tablename, fieldname in strints:
tables.append(tablename)
# Remove duplicates
tables = set(tables)
# Copy Data
import csv
csv.field_size_limit(2**20 * 100) # 100 megs
for tablename in tables:
filename = "%s/%s.csv" % (folder, tablename)
file = open(filename, "w")
rows = db(db[tablename].id > 0).select()
rows.export_to_csv_file(file)
file.close()
file = open(filename, "r")
db_bak[tablename].import_from_csv_file(file, unique="uuid2") # uuid2 designed to not hit!
file.close()
db_bak.commit()
# Pass handle back to other functions
self.db_bak = db_bak
# -------------------------------------------------------------------------
def migrate(self):
"""
Perform the migration
@ToDo
"""
# Update code: git pull
# run_models_in(environment)
# or
# Set migrate=True in models/000_config.py
# current.s3db.load_all_models() via applications/eden/static/scripts/tools/noop.py
# Set migrate=False in models/000_config.py
pass
# -------------------------------------------------------------------------
def post(self, moves=None,
news=None,
strbools=None,
strints=None,
):
"""
Cleanup after migration
@param moves : List of dicts {tablename: [(fieldname, new_tablename, link_fieldname)]} to move a field from 1 table to another
- fieldname can be a tuple if the fieldname changes: (fieldname, new_fieldname)
@param news : List of dicts {new_tablename: {'lookup_field': '',
'tables': [tablename: [fieldname]],
'supers': [tablename: [fieldname]],
} to create new records from 1 or more old tables (inc all instances of an SE)
- fieldname can be a tuple if the fieldname changes: (fieldname, new_fieldname)
@param strbools : List of tuples [(tablename, fieldname)] to convert from string/integer to bools
@param strints : List of tuples [(tablename, fieldname)] to convert from string to integer
"""
db = self.db
# @ToDo: Do prepops of new tables
# Restore data from backup
folder = "%s/databases/backup" % current.request.folder
db_bak = DAL("sqlite://backup.db",
folder=folder,
auto_import=True,
migrate=False)
if moves:
for tablename in moves:
table = db_bak[tablename]
fieldname, new_tablename, link_fieldname = moves[tablename]
if isinstance(fieldname, (tuple, list)):
fieldname, new_fieldname = fieldname
else:
new_fieldname = fieldname
old_field = table[fieldname]
new_linkfield = db[new_tablename][link_fieldname]
rows = db_bak(table._id > 0).select(old_field, link_fieldname)
for row in rows:
update_vars = {}
update_vars[new_fieldname] = row[old_field]
db(new_linkfield == row[link_fieldname]).update(**update_vars)
if news:
for tablename in news:
# Read Data
data = {}
new = news[tablename]
lookup_field = new["lookup_field"]
_tables = new["tables"]
for t in _tables:
fields = _tables[t]
# @ToDo: Support tuples
#for f in fields:
# if isinstance(f, (tuple, list)):
table = db_bak[t]
table_fields = [table[f] for f in fields]
rows = db_bak(table.deleted == False).select(table[lookup_field],
*table_fields)
for row in rows:
record_id = row[lookup_field]
if record_id in data:
_new = False
_data = data[record_id]
else:
_new = True
_data = {}
for f in fields:
if f in row:
if row[f] not in ("", None):
# JSON type doesn't like ""
_data[f] = row[f]
if _new:
data[record_id] = _data
for s in new["supers"]:
fields = new["supers"][s]
# @ToDo: Support tuples
#for f in fields:
# if isinstance(f, (tuple, list)):
stable = db_bak[s]
superkey = stable._id.name
rows = db_bak(stable.deleted == False).select(stable._id,
stable.instance_type)
for row in rows:
etable = db_bak[row["instance_type"]]
_fields = [f for f in fields if f in etable.fields]
table_fields = [etable[f] for f in _fields]
record = db_bak(etable[superkey] == row[superkey]).select(etable[lookup_field],
*table_fields
).first()
if record:
record_id = record[lookup_field]
if record_id in data:
_new = False
_data = data[record_id]
else:
_new = True
_data = {}
for f in _fields:
if f in record:
if record[f] not in ("", None):
# JSON type doesn't like ""
_data[f] = record[f]
if _new:
data[record_id] = _data
# Create Records
table = db[tablename]
for record_id in data:
update_vars = data[record_id]
if update_vars:
update_vars[lookup_field] = record_id
# Can't rely on the defaults as auto_import doesn't see DAL defaults
update_vars["created_on"] = datetime.datetime.utcnow()
update_vars["deleted"] = False
update_vars["mci"] = 0
update_vars["modified_on"] = datetime.datetime.utcnow()
update_vars["uuid"] = uuid4().urn # Would always be identical otherwise
table.insert(**update_vars)
if strints:
for tablename, fieldname in strints:
newtable = db[tablename]
newrows = db(newtable.id > 0).select(newtable.id)
oldtable = db_bak[tablename]
oldrows = db_bak(oldtable.id > 0).select(oldtable.id,
oldtable[fieldname])
oldvals = oldrows.as_dict()
for row in newrows:
_id = row.id
val = oldvals[_id][fieldname]
if not val:
continue
try:
update_vars = {fieldname : int(val)}
except:
current.log.warning("S3Migrate: Unable to convert %s to an integer - skipping" % val)
else:
db(newtable.id == _id).update(**update_vars)
if strbools:
for tablename, fieldname in strbools:
to_bool = self.to_bool
newtable = db[tablename]
newrows = db(newtable.id > 0).select(newtable.id)
oldtable = db_bak[tablename]
oldrows = db_bak(oldtable.id > 0).select(oldtable.id,
oldtable[fieldname])
oldvals = oldrows.as_dict()
for row in newrows:
_id = row.id
val = oldvals[_id][fieldname]
if not val:
continue
val = to_bool(val)
if val:
update_vars = {fieldname : val}
db(newtable.id == _id).update(**update_vars)
db.commit()
# -------------------------------------------------------------------------
@staticmethod
def to_bool(value):
"""
Converts 'something' to boolean. Raises exception for invalid formats
Possible True values: 1, True, "1", "TRue", "yes", "y", "t"
Possible False values: 0, False, "0", "faLse", "no", "n", "f", 0.0
"""
val = str(value).lower()
if val in ("yes", "y", "true", "t", "1"):
return True
elif val in ("no", "n", "false", "f", "0", "0.0"):
return False
else:
return None
# -------------------------------------------------------------------------
def drop(self, tablename, fieldname):
"""
Drop a field from a table
e.g. for when changing type
"""
db = self.db
db_engine = self.db_engine
# Modify the database
if db_engine == "sqlite":
# Not Supported: http://www.sqlite.org/lang_altertable.html
# But also not required (for strints anyway)
sql = ""
elif db_engine == "mysql":
# http://dev.mysql.com/doc/refman/5.1/en/alter-table.html
sql = "ALTER TABLE %(tablename)s DROP COLUMN %(fieldname)s;" % \
dict(tablename=tablename, fieldname=fieldname)
elif db_engine == "postgres":
# http://www.postgresql.org/docs/9.3/static/sql-altertable.html
sql = "ALTER TABLE %(tablename)s DROP COLUMN %(fieldname)s;" % \
dict(tablename=tablename, fieldname=fieldname)
try:
db.executesql(sql)
except:
import sys
e = sys.exc_info()[1]
print >> sys.stderr, e
# Modify the .table file
table = db[tablename]
fields = []
for fn in table.fields:
if fn == fieldname:
continue
fields.append(table[fn])
db.__delattr__(tablename)
db.tables.remove(tablename)
db.define_table(tablename, *fields,
# Rebuild the .table file from this definition
fake_migrate=True)
# -------------------------------------------------------------------------
def ondelete(self, tablename, fieldname, reftable, ondelete):
"""
Modify the ondelete constraint for a foreign key
"""
db = self.db
db_engine = self.db_engine
executesql = db.executesql
if tablename == "all":
tables = db.tables
else:
tables = [tablename]
for tablename in tables:
if fieldname not in db[tablename].fields:
continue
# Modify the database
if db_engine == "sqlite":
# @ToDo: http://www.sqlite.org/lang_altertable.html
raise NotImplementedError
elif db_engine == "mysql":
# http://dev.mysql.com/doc/refman/5.1/en/alter-table.html
create = executesql("SHOW CREATE TABLE `%s`;" % tablename)[0][1]
fk = create.split("` FOREIGN KEY (`%s" % fieldname)[0].split("CONSTRAINT `").pop()
if "`" in fk:
fk = fk.split("`")[0]
sql = "ALTER TABLE `%(tablename)s` DROP FOREIGN KEY `%(fk)s`, ALTER TABLE %(tablename)s ADD CONSTRAINT %(fk)s FOREIGN KEY (%(fieldname)s) REFERENCES %(reftable)s(id) ON DELETE %(ondelete)s;" % \
dict(tablename=tablename, fk=fk, fieldname=fieldname, reftable=reftable, ondelete=ondelete)
elif db_engine == "postgres":
# http://www.postgresql.org/docs/9.3/static/sql-altertable.html
sql = "ALTER TABLE %(tablename)s DROP CONSTRAINT %(tablename)s_%(fieldname)s_fkey, ALTER TABLE %(tablename)s ADD CONSTRAINT %(tablename)s_%(fieldname)s_fkey FOREIGN KEY (%(fieldname)s) REFERENCES %(reftable)s ON DELETE %(ondelete)s;" % \
dict(tablename=tablename, fieldname=fieldname, reftable=reftable, ondelete=ondelete)
try:
executesql(sql)
except:
print "Error: Table %s with FK %s" % (tablename, fk)
import sys
e = sys.exc_info()[1]
print >> sys.stderr, e
# -------------------------------------------------------------------------
def remove_foreign(self, tablename, fieldname):
"""
Remove a Foreign Key constraint from a table
"""
db = self.db
db_engine = self.db_engine
executesql = db.executesql
if tablename == "all":
tables = db.tables
else:
tables = [tablename]
for tablename in tables:
if fieldname not in db[tablename].fields:
continue
# Modify the database
if db_engine == "sqlite":
# @ToDo: http://www.sqlite.org/lang_altertable.html
raise NotImplementedError
elif db_engine == "mysql":
# http://dev.mysql.com/doc/refman/5.1/en/alter-table.html
create = executesql("SHOW CREATE TABLE `%s`;" % tablename)[0][1]
fk = create.split("` FOREIGN KEY (`%s" % fieldname)[0].split("CONSTRAINT `").pop()
if "`" in fk:
fk = fk.split("`")[0]
sql = "ALTER TABLE `%(tablename)s` DROP FOREIGN KEY `%(fk)s`;" % \
dict(tablename=tablename, fk=fk)
elif db_engine == "postgres":
# http://www.postgresql.org/docs/9.3/static/sql-altertable.html
sql = "ALTER TABLE %(tablename)s DROP CONSTRAINT %(tablename)s_%(fieldname)s_fkey;" % \
dict(tablename=tablename, fieldname=fieldname)
try:
executesql(sql)
except:
print "Error: Table %s with FK %s" % (tablename, fk)
import sys
e = sys.exc_info()[1]
print >> sys.stderr, e
# -------------------------------------------------------------------------
def remove_unique(self, tablename, fieldname):
"""
Remove a Unique Index from a table
"""
db = self.db
db_engine = self.db_engine
# Modify the database
if db_engine == "sqlite":
# @ToDo: http://www.sqlite.org/lang_altertable.html
raise NotImplementedError
elif db_engine == "mysql":
# http://dev.mysql.com/doc/refman/5.1/en/alter-table.html
sql = "ALTER TABLE `%(tablename)s` DROP INDEX `%(fieldname)s`;" % \
dict(tablename=tablename, fieldname=fieldname)
elif db_engine == "postgres":
# http://www.postgresql.org/docs/9.3/static/sql-altertable.html
sql = "ALTER TABLE %(tablename)s DROP CONSTRAINT %(tablename)s_%(fieldname)s_key;" % \
dict(tablename=tablename, fieldname=fieldname)
try:
db.executesql(sql)
except:
import sys
e = sys.exc_info()[1]
print >> sys.stderr, e
# Modify the .table file
table = db[tablename]
fields = []
for fn in table.fields:
field = table[fn]
if fn == fieldname:
field.unique = False
fields.append(field)
db.__delattr__(tablename)
db.tables.remove(tablename)
db.define_table(tablename, *fields,
# Rebuild the .table file from this definition
fake_migrate=True)
# =========================================================================
# OLD CODE below here
# - There are tests for these in /tests/dbmigration
# -------------------------------------------------------------------------
def rename_field(self,
tablename,
fieldname_old,
fieldname_new,
attributes_to_copy=None):
"""
Rename a field, while keeping the other properties of the field the same.
If there are some indexes on that table, these will be recreated and other constraints will remain unchanged too.
@param tablename : name of the table in which the field is renamed
@param fieldname_old : name of the original field before renaming
@param fieldname_new : name of the field after renaming
@param attributes_to_copy : list of attributes which need to be copied from the old_field to the new_field (needed only in sqlite)
"""
db = self.db
db_engine = self.db_engine
if db_engine == "sqlite":
self._add_renamed_fields(db, tablename, fieldname_old, fieldname_new, attributes_to_copy)
self._copy_field(db, tablename, fieldname_old, fieldname_new)
sql = "SELECT sql FROM sqlite_master WHERE type='index' AND tbl_name='%s' ORDER BY name;" % \
tablename
list_index = db.executesql(sql)
for element in list_index:
search_str = "%s(%s)" % (tablename, fieldname_old)
if element[0] is not None and search_str in element[0]:
sql = "CREATE INDEX %s__idx on %s(%s);" % \
(fieldname_new, tablename, fieldname_new)
try:
db.executesql(sql)
except:
pass
elif db_engine == "mysql":
field = db[tablename][fieldname_old]
sql_type = map_type_web2py_to_sql(field.type)
sql = "ALTER TABLE %s CHANGE %s %s %s(%s)" % (tablename,
fieldname_old,
fieldname_new,
sql_type,
field.length)
db.executesql(sql)
elif db_engine == "postgres":
sql = "ALTER TABLE %s RENAME COLUMN %s TO %s" % \
(tablename, fieldname_old, fieldname_new)
db.executesql(sql)
# -------------------------------------------------------------------------
def rename_table(self,
tablename_old,
tablename_new):
"""
Rename a table.
If any fields reference that table, they will be handled too.
@param tablename_old : name of the original table before renaming
@param tablename_new : name of the table after renaming
"""
try:
sql = "ALTER TABLE %s RENAME TO %s;" % (tablename_old,
tablename_new)
self.db.executesql(sql)
except Exception, e:
print e
# -------------------------------------------------------------------------
def list_field_to_reference(self,
tablename_new,
new_list_field,
list_field_name,
table_old_id_field,
tablename_old):
"""
This method handles the migration in which a new table with a column for the
values they'll get from the list field is made and maybe some empty columns to be filled in later.
That new table has a foreign key reference back to the original table.
Then for each value in the list field for each record in the original table,
they create one record in the new table that points back to the original record.
@param tablename_new : name of the new table to which the list field needs to migrated
@param new_list_field : name of the field in the new table which will hold the content of the list field
@param list_field_name : name of the list field in the original table
@param table_old_id_field : name of the id field in the original table
@param tablename_old : name of the original table
"""
self._create_new_table(tablename_new, new_list_field, list_field_name,
table_old_id_field, tablename_old)
self._fill_the_new_table(tablename_new, new_list_field, list_field_name,
table_old_id_field, tablename_old)
# -------------------------------------------------------------------------
def migrate_to_unique_field(self,
tablename,
field_to_update,
mapping_function,
list_of_tables=None):
"""
Add values to a new field according to the mappings given through the mapping_function
@param tablename : name of the original table in which the new unique field id added
@param field_to_update : name of the field to be updated according to the mapping
@param mapping_function : class instance containing the mapping functions
@param list_of_tables : list of tables which the table references
"""
db = self.db
self._add_new_fields(db, field_to_update, tablename)
self._add_tables_temp_db(db, list_of_tables)
self.update_field_by_mapping(db, tablename, field_to_update, mapping_function)
# -------------------------------------------------------------------------
def update_field_by_mapping(db,
tablename,
field_to_update,
mapping_function):
"""
Update the values of an existing field according to the mappings given through the mapping_function
- currently unused
@param db : database instance
@param tablename : name of the original table in which the new unique field id added
@param field_to_update : name of the field to be updated according to the mapping
@param mapping_function : class instance containing the mapping functions
"""
fields = mapping_function.fields(db)
table = db[tablename]
if table["id"] not in fields:
fields.append(table["id"])
rows = db(mapping_function.query(db)).select(*fields)
if rows:
try:
rows[0][tablename]["id"]
row_single_layer = False
except KeyError:
row_single_layer = True
dict_update = {}
for row in rows:
if not row_single_layer:
row_id = row[tablename]["id"]
else:
row_id = row["id"]
changed_value = mapping_function.mapping(row)
dict_update[field_to_update] = changed_value
db(table["id"] == row_id).update(**dict_update)
# -------------------------------------------------------------------------
@staticmethod
def _map_type_list_field(old_type):
"""
This function maps the list type into individual field type which can contain
the individual values of the list.
Mappings
- list:reference <table> --> refererence <table>
- list:integer --> integer
- list:string --> string
"""
if (old_type == "list:integer"):
return "integer"
elif old_type.startswith("list:reference"):
return old_type.strip("list:")
elif old_type == "list:string":
return "string"
# -------------------------------------------------------------------------
def _create_new_table(self,
tablename_new,
new_list_field,
list_field_name,
table_old_id_field,
tablename_old):
"""
This function creates the new table which is used in the list_field_to_reference migration.
That new table has a foreign key reference back to the original table.
@param tablename : name of the new table to which the list field needs to migrated
@param new_list_field : name of the field in the new table which will hold the content of the list field
@param list_field_name : name of the list field in the original table
@param table_old_id_field : name of the id field in the original table
@param tablename_old : name of the original table
"""
db = self.db
new_field_type = self._map_type_list_field(db[tablename_old][list_field_name].type)
new_field = Field(new_list_field, new_field_type)
new_id_field = Field("%s_%s" % (tablename_old, table_old_id_field),
"reference %s" % tablename_old)
db.define_table(tablename,
new_id_field,
new_field)
# -------------------------------------------------------------------------
@staticmethod
def _fill_the_new_table(tablename_new,
new_list_field,
list_field_name,
table_old_id_field,
tablename_old):
"""
This function is used in the list_field_to_reference migration.
For each value in the list field for each record in the original table,
they create one record in the new table that points back to the original record.
@param tablename_new : name of the new table to which the list field needs to migrated
@param new_list_field : name of the field in the new table which will hold the content of the list field
@param list_field_name : name of the list field in the original table
@param table_old_id_field : name of the id field in the original table
@param tablename_old : name of the original table
"""
update_dict = {}
table_old = db[tablename_old]
table_new = db[tablename_new]
for row in db().select(table_old[table_old_id_field],
table_old[list_field_name]):
for element in row[list_field_name]:
update_dict[new_list_field] = element
update_dict["%s_%s" % (tablename_old, table_old_id_field)] = row[table_old_id_field]
table_new.insert(**update_dict)
# -------------------------------------------------------------------------
@staticmethod
def _add_renamed_fields(db,
tablename,
fieldname_old,
fieldname_new,
attributes_to_copy):
"""
Add a field in table mentioned while renaming a field.
The renamed field is added separately to the table with the same properties as the original field.
@param db : database instance
"""
table = db[tablename]
if hasattr(table, "_primarykey"):
primarykey = table._primarykey
else:
primarykey = None
field_new = Field(fieldname_new)
for attribute in attributes_to_copy:
exec_str = "field_new.%(attribute)s = table[fieldname_old].%(attribute)s" % \
dict(attribute=attribute)
exec exec_str in globals(), locals()
db.define_table(tablename,
table, # Table to inherit from
field_new,
primarykey=primarykey)
# -------------------------------------------------------------------------
@staticmethod
def _copy_field(db, tablename, fieldname_old, fieldname_new):
"""
Copy all the values from old_field into new_field
@param db : database instance
"""
dict_update = {}
field_old = db[tablename][fieldname_old]
for row in db().select(field_old):
dict_update[fieldname_new] = row[fieldname_old]
query = (field_old == row[fieldname_old])
db(query).update(**dict_update)
# -------------------------------------------------------------------------
@staticmethod
def map_type_web2py_to_sql(dal_type):
"""
Map the web2py type into SQL type
Used when writing SQL queries to change the properties of a field
Mappings:
string --> Varchar
"""
if dal_type == "string":
return "varchar"
else:
return dal_type
# -------------------------------------------------------------------------
@staticmethod
def _add_new_fields(db, new_unique_field, tablename):
"""
This function adds a new _unique_ field into the table, while keeping all the rest of
the properties of the table unchanged
@param db : database instance
"""
new_field = Field(new_unique_field, "integer")
table = db[tablename]
if hasattr(table, "_primarykey"):
primarykey = table._primarykey
else:
primarykey = None
db.define_table(tablename,
table, # Table to inherit from
new_field,
primarykey=primarykey)
# -------------------------------------------------------------------------
def _add_tables_temp_db(self,
temp_db,
list_of_tables):
"""
This field adds tables to the temp_db from the global db
these might be used for the running queries or validating values.
"""
for tablename in list_of_tables:
temp_db.define_table(tablename, self.db[tablename])
# END =========================================================================
|
mit
|
micahmumper/naev
|
utils/mission.py
|
20
|
2897
|
import os
import xml.etree.ElementTree as ET
import tempfile
from xml.sax.saxutils import escape
version=0.1
def indent(elem, level=0):
i = "\n" + level*" "
if len(elem):
if not elem.text or not elem.text.strip():
elem.text = i + " "
if not elem.tail or not elem.tail.strip():
elem.tail = i
for elem in elem:
indent(elem, level+1)
if not elem.tail or not elem.tail.strip():
elem.tail = i
else:
if level and (not elem.tail or not elem.tail.strip()):
elem.tail = i
print "Mission XML generator, version "+str(version)
name = raw_input("Mission name?: ")
lua = raw_input("Lua file. e. g. \"empire/collective/ec01\": ")
unique = raw_input("unique? If yes, type anything, if no, leave empty: ")
done = raw_input("done? If this mission requires the player to successfully finish another mission, then please write the name of that mission. If there are no requirements, then leave it empty.\n")
chance = raw_input("The last two digits of the number in a <chance> tag determine the likelihood a mission will appear when a player lands on a planet. If the chance number is three digits long, the first digit determines how many times that probability is calculated on each landing. A number larger than 100 means the mission may appear more than once simultaneously.\n")
location = raw_input("location? Can be either None, Computer, Bar, Outfit, Shipyard, Land or Commodity: ")
planets = []
while True:
planet = raw_input("planet? The name of a start planet. Leave empty if no more planets should be listed: ")
if planet:
planets.append(planet)
else:
break
factions = []
while True:
faction = raw_input("faction? The name of required faction of the planet. Leave empty if no more factions should be listed: ")
if faction:
factions.append(faction)
else:
break
cond=None
if raw_input("Do you want to edit the <cond> in your $EDITOR? If yes, please type anything, if no then please leave empty: "):
f = tempfile.NamedTemporaryFile()
os.system("$EDITOR "+f.name)
cond = f.read()
f.close()
root = ET.Element("mission")
root.set("name", name)
e_lua = ET.SubElement(root, "lua").text=escape(lua)
if unique:
ET.SubElement( ET.SubElement(root, "flags") , "unique")
avail = ET.SubElement(root, "avail")
if done:
ET.SubElement(avail, "done").text=escape(done)
ET.SubElement(avail, "chance").text=escape(chance)
ET.SubElement(avail, "location").text=escape(location)
for faction in factions:
ET.SubElement(avail, "faction").text=escape(faction)
for planet in planets:
ET.SubElement(avail, "planet").text=escape(planet)
if cond:
ET.SubElement(avail, "cond").text=escape(cond)
print """The mission xml. Insert it right before '<missions />'.
===== THE XML ====="""
indent(root)
print ET.tostring(root)
|
gpl-3.0
|
tcwicklund/django
|
tests/template_tests/filter_tests/test_phone2numeric.py
|
345
|
1508
|
from django.template.defaultfilters import phone2numeric_filter
from django.test import SimpleTestCase
from django.utils.safestring import mark_safe
from ..utils import setup
class Phone2numericTests(SimpleTestCase):
@setup({'phone2numeric01': '{{ a|phone2numeric }} {{ b|phone2numeric }}'})
def test_phone2numeric01(self):
output = self.engine.render_to_string(
'phone2numeric01',
{'a': '<1-800-call-me>', 'b': mark_safe('<1-800-call-me>')},
)
self.assertEqual(output, '<1-800-2255-63> <1-800-2255-63>')
@setup({'phone2numeric02':
'{% autoescape off %}{{ a|phone2numeric }} {{ b|phone2numeric }}{% endautoescape %}'})
def test_phone2numeric02(self):
output = self.engine.render_to_string(
'phone2numeric02',
{'a': '<1-800-call-me>', 'b': mark_safe('<1-800-call-me>')},
)
self.assertEqual(output, '<1-800-2255-63> <1-800-2255-63>')
@setup({'phone2numeric03': '{{ a|phone2numeric }}'})
def test_phone2numeric03(self):
output = self.engine.render_to_string(
'phone2numeric03',
{'a': 'How razorback-jumping frogs can level six piqued gymnasts!'},
)
self.assertEqual(
output,
'469 729672225-5867464 37647 226 53835 749 747833 49662787!'
)
class FunctionTests(SimpleTestCase):
def test_phone2numeric(self):
self.assertEqual(phone2numeric_filter('0800 flowers'), '0800 3569377')
|
bsd-3-clause
|
armleo/limbo-android
|
jni/qemu/QMP/qmp.py
|
78
|
4958
|
# QEMU Monitor Protocol Python class
#
# Copyright (C) 2009, 2010 Red Hat Inc.
#
# Authors:
# Luiz Capitulino <[email protected]>
#
# This work is licensed under the terms of the GNU GPL, version 2. See
# the COPYING file in the top-level directory.
import json
import errno
import socket
class QMPError(Exception):
pass
class QMPConnectError(QMPError):
pass
class QMPCapabilitiesError(QMPError):
pass
class QEMUMonitorProtocol:
def __init__(self, address, server=False):
"""
Create a QEMUMonitorProtocol class.
@param address: QEMU address, can be either a unix socket path (string)
or a tuple in the form ( address, port ) for a TCP
connection
@param server: server mode listens on the socket (bool)
@raise socket.error on socket connection errors
@note No connection is established, this is done by the connect() or
accept() methods
"""
self.__events = []
self.__address = address
self.__sock = self.__get_sock()
if server:
self.__sock.bind(self.__address)
self.__sock.listen(1)
def __get_sock(self):
if isinstance(self.__address, tuple):
family = socket.AF_INET
else:
family = socket.AF_UNIX
return socket.socket(family, socket.SOCK_STREAM)
def __negotiate_capabilities(self):
self.__sockfile = self.__sock.makefile()
greeting = self.__json_read()
if greeting is None or not greeting.has_key('QMP'):
raise QMPConnectError
# Greeting seems ok, negotiate capabilities
resp = self.cmd('qmp_capabilities')
if "return" in resp:
return greeting
raise QMPCapabilitiesError
def __json_read(self, only_event=False):
while True:
data = self.__sockfile.readline()
if not data:
return
resp = json.loads(data)
if 'event' in resp:
self.__events.append(resp)
if not only_event:
continue
return resp
error = socket.error
def connect(self):
"""
Connect to the QMP Monitor and perform capabilities negotiation.
@return QMP greeting dict
@raise socket.error on socket connection errors
@raise QMPConnectError if the greeting is not received
@raise QMPCapabilitiesError if fails to negotiate capabilities
"""
self.__sock.connect(self.__address)
return self.__negotiate_capabilities()
def accept(self):
"""
Await connection from QMP Monitor and perform capabilities negotiation.
@return QMP greeting dict
@raise socket.error on socket connection errors
@raise QMPConnectError if the greeting is not received
@raise QMPCapabilitiesError if fails to negotiate capabilities
"""
self.__sock, _ = self.__sock.accept()
return self.__negotiate_capabilities()
def cmd_obj(self, qmp_cmd):
"""
Send a QMP command to the QMP Monitor.
@param qmp_cmd: QMP command to be sent as a Python dict
@return QMP response as a Python dict or None if the connection has
been closed
"""
try:
self.__sock.sendall(json.dumps(qmp_cmd))
except socket.error, err:
if err[0] == errno.EPIPE:
return
raise socket.error(err)
return self.__json_read()
def cmd(self, name, args=None, id=None):
"""
Build a QMP command and send it to the QMP Monitor.
@param name: command name (string)
@param args: command arguments (dict)
@param id: command id (dict, list, string or int)
"""
qmp_cmd = { 'execute': name }
if args:
qmp_cmd['arguments'] = args
if id:
qmp_cmd['id'] = id
return self.cmd_obj(qmp_cmd)
def command(self, cmd, **kwds):
ret = self.cmd(cmd, kwds)
if ret.has_key('error'):
raise Exception(ret['error']['desc'])
return ret['return']
def get_events(self, wait=False):
"""
Get a list of available QMP events.
@param wait: block until an event is available (bool)
"""
self.__sock.setblocking(0)
try:
self.__json_read()
except socket.error, err:
if err[0] == errno.EAGAIN:
# No data available
pass
self.__sock.setblocking(1)
if not self.__events and wait:
self.__json_read(only_event=True)
return self.__events
def clear_events(self):
"""
Clear current list of pending events.
"""
self.__events = []
def close(self):
self.__sock.close()
self.__sockfile.close()
|
gpl-2.0
|
hypha/zoopla
|
zoopla/map.py
|
1
|
2154
|
import logging
logger = logging.getLogger( 'oofy' )
class Map(object):
def __init__(self):
self._points = []
def add_point(self, coordinates):
self._points.append(coordinates)
def marker_js(self, latitude=0,longtitude=0, target_url="http://www.planewalk.net", description="unknown property"):
return """var marker = new google.maps.Marker({{
position: new google.maps.LatLng({lat}, {lon}),
map: map,
optimized:false,
title: {desc},
url: "{url}"
}});
marker.addListener('click', function() {{
window.open(this.url);
}});""".format(lat=latitude, lon=longtitude, url=target_url, desc=description)
def __str__(self):
centerLat = sum((x[0] for x in self._points)) / len(self._points)
centerLon = sum((x[1] for x in self._points)) / len(self._points)
markersCode = "\n".join(
# ["""new google.maps.Marker({{
# position: new google.maps.LatLng({lat}, {lon}),
# map: map,
# title: <a href="{url}">Hello World!</a>
# }});"""
[self.marker_js(latitude=x[0], longtitude=x[1], target_url=x[2], description=x[3]) for x in self._points])
return """
<script src="https://maps.googleapis.com/maps/api/js?key={key}&v=3.exp"></script>
<div id="map-canvas" style="height: 100%; width: 100%"></div>
<script type="text/javascript">
var map;
function show_map() {{
map = new google.maps.Map(document.getElementById("map-canvas"), {{
zoom: 13,
center: new google.maps.LatLng({centerLat}, {centerLon})
}});
{markersCode}
}}
google.maps.event.addDomListener(window, 'load', show_map);
</script>
""".format(centerLat=centerLat, centerLon=centerLon,
markersCode=markersCode, key="AIzaSyASjkgLegAeY8JUmyFz-RIQ0e_dRP8wg1A")
|
bsd-2-clause
|
kri5/pghoard
|
pghoard/rohmu/object_storage/azure.py
|
1
|
4175
|
"""
rohmu - azure object store interface
Copyright (c) 2016 Ohmu Ltd
See LICENSE for details
"""
import dateutil.parser
import time
from azure.storage import BlobService # pylint: disable=no-name-in-module, import-error
from .base import BaseTransfer
class AzureTransfer(BaseTransfer):
def __init__(self, account_name, account_key, container_name, prefix=None):
# NOTE: Azure wants all paths to start with a slash
prefix = "/{}".format(prefix.lstrip("/") if prefix else "")
super().__init__(prefix=prefix)
self.account_name = account_name
self.account_key = account_key
self.container_name = container_name
self.conn = BlobService(account_name=self.account_name, account_key=self.account_key)
self.container = self.get_or_create_container(self.container_name)
self.log.debug("AzureTransfer initialized")
# XXX: AzureTransfer isn't actively tested and hasn't its error handling is probably lacking
self.log.warning("AzureTransfer is experimental and has not been thoroughly tested")
def get_metadata_for_key(self, key):
key = self.format_key_for_backend(key)
return self._list_blobs(key)[0]["metadata"]
def _metadata_for_key(self, key):
return self._list_blobs(key)[0]["metadata"]
def list_path(self, key):
path = self.format_key_for_backend(key, trailing_slash=True)
return self._list_blobs(path)
def _list_blobs(self, path):
self.log.debug("Listing path %r", path)
items = self.conn.list_blobs(self.container_name, prefix=path, delimiter="/", include="metadata")
result = []
for item in items:
result.append({
"last_modified": dateutil.parser.parse(item.properties.last_modified),
"metadata": item.metadata,
"name": self.format_key_from_backend(item.name),
"size": item.properties.content_length,
})
return result
def delete_key(self, key):
key = self.format_key_for_backend(key)
self.log.debug("Deleting key: %r", key)
return self.conn.delete_blob(self.container_name, key)
def get_contents_to_file(self, key, filepath_to_store_to):
key = self.format_key_for_backend(key)
self.log.debug("Starting to fetch the contents of: %r to: %r", key, filepath_to_store_to)
return self.conn.get_blob_to_path(self.container_name, key, filepath_to_store_to)
def get_contents_to_fileobj(self, key, fileobj_to_store_to):
key = self.format_key_for_backend(key)
self.log.debug("Starting to fetch the contents of: %r", key)
return self.conn.get_blob_to_file(self.container_name, key, fileobj_to_store_to)
def get_contents_to_string(self, key):
key = self.format_key_for_backend(key)
self.log.debug("Starting to fetch the contents of: %r", key)
return self.conn.get_blob_to_bytes(self.container_name, key), self._metadata_for_key(key)
def store_file_from_memory(self, key, memstring, metadata=None):
key = self.format_key_for_backend(key)
# Azure requires all metadata keys and values to be strings
metadata_to_send = dict((str(k), str(v)) for k, v in metadata.items())
self.conn.put_block_blob_from_bytes(self.container_name, key, memstring,
x_ms_meta_name_values=metadata_to_send)
def store_file_from_disk(self, key, filepath, metadata=None, multipart=None):
key = self.format_key_for_backend(key)
# Azure requires all metadata keys and values to be strings
metadata_to_send = dict((str(k), str(v)) for k, v in metadata.items())
self.conn.put_block_blob_from_path(self.container_name, key, filepath,
x_ms_meta_name_values=metadata_to_send)
def get_or_create_container(self, container_name):
start_time = time.time()
self.conn.create_container(container_name)
self.log.debug("Got/Created container: %r successfully, took: %.3fs", container_name, time.time() - start_time)
return container_name
|
apache-2.0
|
duanhong169/google-diff-match-patch
|
python3/diff_match_patch.py
|
297
|
67320
|
#!/usr/bin/python3
"""Diff Match and Patch
Copyright 2006 Google Inc.
http://code.google.com/p/google-diff-match-patch/
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
"""Functions for diff, match and patch.
Computes the difference between two texts to create a patch.
Applies the patch onto another text, allowing for errors.
"""
__author__ = '[email protected] (Neil Fraser)'
import math
import re
import sys
import time
import urllib.parse
class diff_match_patch:
"""Class containing the diff, match and patch methods.
Also contains the behaviour settings.
"""
def __init__(self):
"""Inits a diff_match_patch object with default settings.
Redefine these in your program to override the defaults.
"""
# Number of seconds to map a diff before giving up (0 for infinity).
self.Diff_Timeout = 1.0
# Cost of an empty edit operation in terms of edit characters.
self.Diff_EditCost = 4
# At what point is no match declared (0.0 = perfection, 1.0 = very loose).
self.Match_Threshold = 0.5
# How far to search for a match (0 = exact location, 1000+ = broad match).
# A match this many characters away from the expected location will add
# 1.0 to the score (0.0 is a perfect match).
self.Match_Distance = 1000
# When deleting a large block of text (over ~64 characters), how close do
# the contents have to be to match the expected contents. (0.0 = perfection,
# 1.0 = very loose). Note that Match_Threshold controls how closely the
# end points of a delete need to match.
self.Patch_DeleteThreshold = 0.5
# Chunk size for context length.
self.Patch_Margin = 4
# The number of bits in an int.
# Python has no maximum, thus to disable patch splitting set to 0.
# However to avoid long patches in certain pathological cases, use 32.
# Multiple short patches (using native ints) are much faster than long ones.
self.Match_MaxBits = 32
# DIFF FUNCTIONS
# The data structure representing a diff is an array of tuples:
# [(DIFF_DELETE, "Hello"), (DIFF_INSERT, "Goodbye"), (DIFF_EQUAL, " world.")]
# which means: delete "Hello", add "Goodbye" and keep " world."
DIFF_DELETE = -1
DIFF_INSERT = 1
DIFF_EQUAL = 0
def diff_main(self, text1, text2, checklines=True, deadline=None):
"""Find the differences between two texts. Simplifies the problem by
stripping any common prefix or suffix off the texts before diffing.
Args:
text1: Old string to be diffed.
text2: New string to be diffed.
checklines: Optional speedup flag. If present and false, then don't run
a line-level diff first to identify the changed areas.
Defaults to true, which does a faster, slightly less optimal diff.
deadline: Optional time when the diff should be complete by. Used
internally for recursive calls. Users should set DiffTimeout instead.
Returns:
Array of changes.
"""
# Set a deadline by which time the diff must be complete.
if deadline == None:
# Unlike in most languages, Python counts time in seconds.
if self.Diff_Timeout <= 0:
deadline = sys.maxsize
else:
deadline = time.time() + self.Diff_Timeout
# Check for null inputs.
if text1 == None or text2 == None:
raise ValueError("Null inputs. (diff_main)")
# Check for equality (speedup).
if text1 == text2:
if text1:
return [(self.DIFF_EQUAL, text1)]
return []
# Trim off common prefix (speedup).
commonlength = self.diff_commonPrefix(text1, text2)
commonprefix = text1[:commonlength]
text1 = text1[commonlength:]
text2 = text2[commonlength:]
# Trim off common suffix (speedup).
commonlength = self.diff_commonSuffix(text1, text2)
if commonlength == 0:
commonsuffix = ''
else:
commonsuffix = text1[-commonlength:]
text1 = text1[:-commonlength]
text2 = text2[:-commonlength]
# Compute the diff on the middle block.
diffs = self.diff_compute(text1, text2, checklines, deadline)
# Restore the prefix and suffix.
if commonprefix:
diffs[:0] = [(self.DIFF_EQUAL, commonprefix)]
if commonsuffix:
diffs.append((self.DIFF_EQUAL, commonsuffix))
self.diff_cleanupMerge(diffs)
return diffs
def diff_compute(self, text1, text2, checklines, deadline):
"""Find the differences between two texts. Assumes that the texts do not
have any common prefix or suffix.
Args:
text1: Old string to be diffed.
text2: New string to be diffed.
checklines: Speedup flag. If false, then don't run a line-level diff
first to identify the changed areas.
If true, then run a faster, slightly less optimal diff.
deadline: Time when the diff should be complete by.
Returns:
Array of changes.
"""
if not text1:
# Just add some text (speedup).
return [(self.DIFF_INSERT, text2)]
if not text2:
# Just delete some text (speedup).
return [(self.DIFF_DELETE, text1)]
if len(text1) > len(text2):
(longtext, shorttext) = (text1, text2)
else:
(shorttext, longtext) = (text1, text2)
i = longtext.find(shorttext)
if i != -1:
# Shorter text is inside the longer text (speedup).
diffs = [(self.DIFF_INSERT, longtext[:i]), (self.DIFF_EQUAL, shorttext),
(self.DIFF_INSERT, longtext[i + len(shorttext):])]
# Swap insertions for deletions if diff is reversed.
if len(text1) > len(text2):
diffs[0] = (self.DIFF_DELETE, diffs[0][1])
diffs[2] = (self.DIFF_DELETE, diffs[2][1])
return diffs
if len(shorttext) == 1:
# Single character string.
# After the previous speedup, the character can't be an equality.
return [(self.DIFF_DELETE, text1), (self.DIFF_INSERT, text2)]
# Check to see if the problem can be split in two.
hm = self.diff_halfMatch(text1, text2)
if hm:
# A half-match was found, sort out the return data.
(text1_a, text1_b, text2_a, text2_b, mid_common) = hm
# Send both pairs off for separate processing.
diffs_a = self.diff_main(text1_a, text2_a, checklines, deadline)
diffs_b = self.diff_main(text1_b, text2_b, checklines, deadline)
# Merge the results.
return diffs_a + [(self.DIFF_EQUAL, mid_common)] + diffs_b
if checklines and len(text1) > 100 and len(text2) > 100:
return self.diff_lineMode(text1, text2, deadline)
return self.diff_bisect(text1, text2, deadline)
def diff_lineMode(self, text1, text2, deadline):
"""Do a quick line-level diff on both strings, then rediff the parts for
greater accuracy.
This speedup can produce non-minimal diffs.
Args:
text1: Old string to be diffed.
text2: New string to be diffed.
deadline: Time when the diff should be complete by.
Returns:
Array of changes.
"""
# Scan the text on a line-by-line basis first.
(text1, text2, linearray) = self.diff_linesToChars(text1, text2)
diffs = self.diff_main(text1, text2, False, deadline)
# Convert the diff back to original text.
self.diff_charsToLines(diffs, linearray)
# Eliminate freak matches (e.g. blank lines)
self.diff_cleanupSemantic(diffs)
# Rediff any replacement blocks, this time character-by-character.
# Add a dummy entry at the end.
diffs.append((self.DIFF_EQUAL, ''))
pointer = 0
count_delete = 0
count_insert = 0
text_delete = ''
text_insert = ''
while pointer < len(diffs):
if diffs[pointer][0] == self.DIFF_INSERT:
count_insert += 1
text_insert += diffs[pointer][1]
elif diffs[pointer][0] == self.DIFF_DELETE:
count_delete += 1
text_delete += diffs[pointer][1]
elif diffs[pointer][0] == self.DIFF_EQUAL:
# Upon reaching an equality, check for prior redundancies.
if count_delete >= 1 and count_insert >= 1:
# Delete the offending records and add the merged ones.
a = self.diff_main(text_delete, text_insert, False, deadline)
diffs[pointer - count_delete - count_insert : pointer] = a
pointer = pointer - count_delete - count_insert + len(a)
count_insert = 0
count_delete = 0
text_delete = ''
text_insert = ''
pointer += 1
diffs.pop() # Remove the dummy entry at the end.
return diffs
def diff_bisect(self, text1, text2, deadline):
"""Find the 'middle snake' of a diff, split the problem in two
and return the recursively constructed diff.
See Myers 1986 paper: An O(ND) Difference Algorithm and Its Variations.
Args:
text1: Old string to be diffed.
text2: New string to be diffed.
deadline: Time at which to bail if not yet complete.
Returns:
Array of diff tuples.
"""
# Cache the text lengths to prevent multiple calls.
text1_length = len(text1)
text2_length = len(text2)
max_d = (text1_length + text2_length + 1) // 2
v_offset = max_d
v_length = 2 * max_d
v1 = [-1] * v_length
v1[v_offset + 1] = 0
v2 = v1[:]
delta = text1_length - text2_length
# If the total number of characters is odd, then the front path will
# collide with the reverse path.
front = (delta % 2 != 0)
# Offsets for start and end of k loop.
# Prevents mapping of space beyond the grid.
k1start = 0
k1end = 0
k2start = 0
k2end = 0
for d in range(max_d):
# Bail out if deadline is reached.
if time.time() > deadline:
break
# Walk the front path one step.
for k1 in range(-d + k1start, d + 1 - k1end, 2):
k1_offset = v_offset + k1
if k1 == -d or (k1 != d and
v1[k1_offset - 1] < v1[k1_offset + 1]):
x1 = v1[k1_offset + 1]
else:
x1 = v1[k1_offset - 1] + 1
y1 = x1 - k1
while (x1 < text1_length and y1 < text2_length and
text1[x1] == text2[y1]):
x1 += 1
y1 += 1
v1[k1_offset] = x1
if x1 > text1_length:
# Ran off the right of the graph.
k1end += 2
elif y1 > text2_length:
# Ran off the bottom of the graph.
k1start += 2
elif front:
k2_offset = v_offset + delta - k1
if k2_offset >= 0 and k2_offset < v_length and v2[k2_offset] != -1:
# Mirror x2 onto top-left coordinate system.
x2 = text1_length - v2[k2_offset]
if x1 >= x2:
# Overlap detected.
return self.diff_bisectSplit(text1, text2, x1, y1, deadline)
# Walk the reverse path one step.
for k2 in range(-d + k2start, d + 1 - k2end, 2):
k2_offset = v_offset + k2
if k2 == -d or (k2 != d and
v2[k2_offset - 1] < v2[k2_offset + 1]):
x2 = v2[k2_offset + 1]
else:
x2 = v2[k2_offset - 1] + 1
y2 = x2 - k2
while (x2 < text1_length and y2 < text2_length and
text1[-x2 - 1] == text2[-y2 - 1]):
x2 += 1
y2 += 1
v2[k2_offset] = x2
if x2 > text1_length:
# Ran off the left of the graph.
k2end += 2
elif y2 > text2_length:
# Ran off the top of the graph.
k2start += 2
elif not front:
k1_offset = v_offset + delta - k2
if k1_offset >= 0 and k1_offset < v_length and v1[k1_offset] != -1:
x1 = v1[k1_offset]
y1 = v_offset + x1 - k1_offset
# Mirror x2 onto top-left coordinate system.
x2 = text1_length - x2
if x1 >= x2:
# Overlap detected.
return self.diff_bisectSplit(text1, text2, x1, y1, deadline)
# Diff took too long and hit the deadline or
# number of diffs equals number of characters, no commonality at all.
return [(self.DIFF_DELETE, text1), (self.DIFF_INSERT, text2)]
def diff_bisectSplit(self, text1, text2, x, y, deadline):
"""Given the location of the 'middle snake', split the diff in two parts
and recurse.
Args:
text1: Old string to be diffed.
text2: New string to be diffed.
x: Index of split point in text1.
y: Index of split point in text2.
deadline: Time at which to bail if not yet complete.
Returns:
Array of diff tuples.
"""
text1a = text1[:x]
text2a = text2[:y]
text1b = text1[x:]
text2b = text2[y:]
# Compute both diffs serially.
diffs = self.diff_main(text1a, text2a, False, deadline)
diffsb = self.diff_main(text1b, text2b, False, deadline)
return diffs + diffsb
def diff_linesToChars(self, text1, text2):
"""Split two texts into an array of strings. Reduce the texts to a string
of hashes where each Unicode character represents one line.
Args:
text1: First string.
text2: Second string.
Returns:
Three element tuple, containing the encoded text1, the encoded text2 and
the array of unique strings. The zeroth element of the array of unique
strings is intentionally blank.
"""
lineArray = [] # e.g. lineArray[4] == "Hello\n"
lineHash = {} # e.g. lineHash["Hello\n"] == 4
# "\x00" is a valid character, but various debuggers don't like it.
# So we'll insert a junk entry to avoid generating a null character.
lineArray.append('')
def diff_linesToCharsMunge(text):
"""Split a text into an array of strings. Reduce the texts to a string
of hashes where each Unicode character represents one line.
Modifies linearray and linehash through being a closure.
Args:
text: String to encode.
Returns:
Encoded string.
"""
chars = []
# Walk the text, pulling out a substring for each line.
# text.split('\n') would would temporarily double our memory footprint.
# Modifying text would create many large strings to garbage collect.
lineStart = 0
lineEnd = -1
while lineEnd < len(text) - 1:
lineEnd = text.find('\n', lineStart)
if lineEnd == -1:
lineEnd = len(text) - 1
line = text[lineStart:lineEnd + 1]
lineStart = lineEnd + 1
if line in lineHash:
chars.append(chr(lineHash[line]))
else:
lineArray.append(line)
lineHash[line] = len(lineArray) - 1
chars.append(chr(len(lineArray) - 1))
return "".join(chars)
chars1 = diff_linesToCharsMunge(text1)
chars2 = diff_linesToCharsMunge(text2)
return (chars1, chars2, lineArray)
def diff_charsToLines(self, diffs, lineArray):
"""Rehydrate the text in a diff from a string of line hashes to real lines
of text.
Args:
diffs: Array of diff tuples.
lineArray: Array of unique strings.
"""
for x in range(len(diffs)):
text = []
for char in diffs[x][1]:
text.append(lineArray[ord(char)])
diffs[x] = (diffs[x][0], "".join(text))
def diff_commonPrefix(self, text1, text2):
"""Determine the common prefix of two strings.
Args:
text1: First string.
text2: Second string.
Returns:
The number of characters common to the start of each string.
"""
# Quick check for common null cases.
if not text1 or not text2 or text1[0] != text2[0]:
return 0
# Binary search.
# Performance analysis: http://neil.fraser.name/news/2007/10/09/
pointermin = 0
pointermax = min(len(text1), len(text2))
pointermid = pointermax
pointerstart = 0
while pointermin < pointermid:
if text1[pointerstart:pointermid] == text2[pointerstart:pointermid]:
pointermin = pointermid
pointerstart = pointermin
else:
pointermax = pointermid
pointermid = (pointermax - pointermin) // 2 + pointermin
return pointermid
def diff_commonSuffix(self, text1, text2):
"""Determine the common suffix of two strings.
Args:
text1: First string.
text2: Second string.
Returns:
The number of characters common to the end of each string.
"""
# Quick check for common null cases.
if not text1 or not text2 or text1[-1] != text2[-1]:
return 0
# Binary search.
# Performance analysis: http://neil.fraser.name/news/2007/10/09/
pointermin = 0
pointermax = min(len(text1), len(text2))
pointermid = pointermax
pointerend = 0
while pointermin < pointermid:
if (text1[-pointermid:len(text1) - pointerend] ==
text2[-pointermid:len(text2) - pointerend]):
pointermin = pointermid
pointerend = pointermin
else:
pointermax = pointermid
pointermid = (pointermax - pointermin) // 2 + pointermin
return pointermid
def diff_commonOverlap(self, text1, text2):
"""Determine if the suffix of one string is the prefix of another.
Args:
text1 First string.
text2 Second string.
Returns:
The number of characters common to the end of the first
string and the start of the second string.
"""
# Cache the text lengths to prevent multiple calls.
text1_length = len(text1)
text2_length = len(text2)
# Eliminate the null case.
if text1_length == 0 or text2_length == 0:
return 0
# Truncate the longer string.
if text1_length > text2_length:
text1 = text1[-text2_length:]
elif text1_length < text2_length:
text2 = text2[:text1_length]
text_length = min(text1_length, text2_length)
# Quick check for the worst case.
if text1 == text2:
return text_length
# Start by looking for a single character match
# and increase length until no match is found.
# Performance analysis: http://neil.fraser.name/news/2010/11/04/
best = 0
length = 1
while True:
pattern = text1[-length:]
found = text2.find(pattern)
if found == -1:
return best
length += found
if found == 0 or text1[-length:] == text2[:length]:
best = length
length += 1
def diff_halfMatch(self, text1, text2):
"""Do the two texts share a substring which is at least half the length of
the longer text?
This speedup can produce non-minimal diffs.
Args:
text1: First string.
text2: Second string.
Returns:
Five element Array, containing the prefix of text1, the suffix of text1,
the prefix of text2, the suffix of text2 and the common middle. Or None
if there was no match.
"""
if self.Diff_Timeout <= 0:
# Don't risk returning a non-optimal diff if we have unlimited time.
return None
if len(text1) > len(text2):
(longtext, shorttext) = (text1, text2)
else:
(shorttext, longtext) = (text1, text2)
if len(longtext) < 4 or len(shorttext) * 2 < len(longtext):
return None # Pointless.
def diff_halfMatchI(longtext, shorttext, i):
"""Does a substring of shorttext exist within longtext such that the
substring is at least half the length of longtext?
Closure, but does not reference any external variables.
Args:
longtext: Longer string.
shorttext: Shorter string.
i: Start index of quarter length substring within longtext.
Returns:
Five element Array, containing the prefix of longtext, the suffix of
longtext, the prefix of shorttext, the suffix of shorttext and the
common middle. Or None if there was no match.
"""
seed = longtext[i:i + len(longtext) // 4]
best_common = ''
j = shorttext.find(seed)
while j != -1:
prefixLength = self.diff_commonPrefix(longtext[i:], shorttext[j:])
suffixLength = self.diff_commonSuffix(longtext[:i], shorttext[:j])
if len(best_common) < suffixLength + prefixLength:
best_common = (shorttext[j - suffixLength:j] +
shorttext[j:j + prefixLength])
best_longtext_a = longtext[:i - suffixLength]
best_longtext_b = longtext[i + prefixLength:]
best_shorttext_a = shorttext[:j - suffixLength]
best_shorttext_b = shorttext[j + prefixLength:]
j = shorttext.find(seed, j + 1)
if len(best_common) * 2 >= len(longtext):
return (best_longtext_a, best_longtext_b,
best_shorttext_a, best_shorttext_b, best_common)
else:
return None
# First check if the second quarter is the seed for a half-match.
hm1 = diff_halfMatchI(longtext, shorttext, (len(longtext) + 3) // 4)
# Check again based on the third quarter.
hm2 = diff_halfMatchI(longtext, shorttext, (len(longtext) + 1) // 2)
if not hm1 and not hm2:
return None
elif not hm2:
hm = hm1
elif not hm1:
hm = hm2
else:
# Both matched. Select the longest.
if len(hm1[4]) > len(hm2[4]):
hm = hm1
else:
hm = hm2
# A half-match was found, sort out the return data.
if len(text1) > len(text2):
(text1_a, text1_b, text2_a, text2_b, mid_common) = hm
else:
(text2_a, text2_b, text1_a, text1_b, mid_common) = hm
return (text1_a, text1_b, text2_a, text2_b, mid_common)
def diff_cleanupSemantic(self, diffs):
"""Reduce the number of edits by eliminating semantically trivial
equalities.
Args:
diffs: Array of diff tuples.
"""
changes = False
equalities = [] # Stack of indices where equalities are found.
lastequality = None # Always equal to diffs[equalities[-1]][1]
pointer = 0 # Index of current position.
# Number of chars that changed prior to the equality.
length_insertions1, length_deletions1 = 0, 0
# Number of chars that changed after the equality.
length_insertions2, length_deletions2 = 0, 0
while pointer < len(diffs):
if diffs[pointer][0] == self.DIFF_EQUAL: # Equality found.
equalities.append(pointer)
length_insertions1, length_insertions2 = length_insertions2, 0
length_deletions1, length_deletions2 = length_deletions2, 0
lastequality = diffs[pointer][1]
else: # An insertion or deletion.
if diffs[pointer][0] == self.DIFF_INSERT:
length_insertions2 += len(diffs[pointer][1])
else:
length_deletions2 += len(diffs[pointer][1])
# Eliminate an equality that is smaller or equal to the edits on both
# sides of it.
if (lastequality and (len(lastequality) <=
max(length_insertions1, length_deletions1)) and
(len(lastequality) <= max(length_insertions2, length_deletions2))):
# Duplicate record.
diffs.insert(equalities[-1], (self.DIFF_DELETE, lastequality))
# Change second copy to insert.
diffs[equalities[-1] + 1] = (self.DIFF_INSERT,
diffs[equalities[-1] + 1][1])
# Throw away the equality we just deleted.
equalities.pop()
# Throw away the previous equality (it needs to be reevaluated).
if len(equalities):
equalities.pop()
if len(equalities):
pointer = equalities[-1]
else:
pointer = -1
# Reset the counters.
length_insertions1, length_deletions1 = 0, 0
length_insertions2, length_deletions2 = 0, 0
lastequality = None
changes = True
pointer += 1
# Normalize the diff.
if changes:
self.diff_cleanupMerge(diffs)
self.diff_cleanupSemanticLossless(diffs)
# Find any overlaps between deletions and insertions.
# e.g: <del>abcxxx</del><ins>xxxdef</ins>
# -> <del>abc</del>xxx<ins>def</ins>
# e.g: <del>xxxabc</del><ins>defxxx</ins>
# -> <ins>def</ins>xxx<del>abc</del>
# Only extract an overlap if it is as big as the edit ahead or behind it.
pointer = 1
while pointer < len(diffs):
if (diffs[pointer - 1][0] == self.DIFF_DELETE and
diffs[pointer][0] == self.DIFF_INSERT):
deletion = diffs[pointer - 1][1]
insertion = diffs[pointer][1]
overlap_length1 = self.diff_commonOverlap(deletion, insertion)
overlap_length2 = self.diff_commonOverlap(insertion, deletion)
if overlap_length1 >= overlap_length2:
if (overlap_length1 >= len(deletion) / 2.0 or
overlap_length1 >= len(insertion) / 2.0):
# Overlap found. Insert an equality and trim the surrounding edits.
diffs.insert(pointer, (self.DIFF_EQUAL,
insertion[:overlap_length1]))
diffs[pointer - 1] = (self.DIFF_DELETE,
deletion[:len(deletion) - overlap_length1])
diffs[pointer + 1] = (self.DIFF_INSERT,
insertion[overlap_length1:])
pointer += 1
else:
if (overlap_length2 >= len(deletion) / 2.0 or
overlap_length2 >= len(insertion) / 2.0):
# Reverse overlap found.
# Insert an equality and swap and trim the surrounding edits.
diffs.insert(pointer, (self.DIFF_EQUAL, deletion[:overlap_length2]))
diffs[pointer - 1] = (self.DIFF_INSERT,
insertion[:len(insertion) - overlap_length2])
diffs[pointer + 1] = (self.DIFF_DELETE, deletion[overlap_length2:])
pointer += 1
pointer += 1
pointer += 1
def diff_cleanupSemanticLossless(self, diffs):
"""Look for single edits surrounded on both sides by equalities
which can be shifted sideways to align the edit to a word boundary.
e.g: The c<ins>at c</ins>ame. -> The <ins>cat </ins>came.
Args:
diffs: Array of diff tuples.
"""
def diff_cleanupSemanticScore(one, two):
"""Given two strings, compute a score representing whether the
internal boundary falls on logical boundaries.
Scores range from 6 (best) to 0 (worst).
Closure, but does not reference any external variables.
Args:
one: First string.
two: Second string.
Returns:
The score.
"""
if not one or not two:
# Edges are the best.
return 6
# Each port of this function behaves slightly differently due to
# subtle differences in each language's definition of things like
# 'whitespace'. Since this function's purpose is largely cosmetic,
# the choice has been made to use each language's native features
# rather than force total conformity.
char1 = one[-1]
char2 = two[0]
nonAlphaNumeric1 = not char1.isalnum()
nonAlphaNumeric2 = not char2.isalnum()
whitespace1 = nonAlphaNumeric1 and char1.isspace()
whitespace2 = nonAlphaNumeric2 and char2.isspace()
lineBreak1 = whitespace1 and (char1 == "\r" or char1 == "\n")
lineBreak2 = whitespace2 and (char2 == "\r" or char2 == "\n")
blankLine1 = lineBreak1 and self.BLANKLINEEND.search(one)
blankLine2 = lineBreak2 and self.BLANKLINESTART.match(two)
if blankLine1 or blankLine2:
# Five points for blank lines.
return 5
elif lineBreak1 or lineBreak2:
# Four points for line breaks.
return 4
elif nonAlphaNumeric1 and not whitespace1 and whitespace2:
# Three points for end of sentences.
return 3
elif whitespace1 or whitespace2:
# Two points for whitespace.
return 2
elif nonAlphaNumeric1 or nonAlphaNumeric2:
# One point for non-alphanumeric.
return 1
return 0
pointer = 1
# Intentionally ignore the first and last element (don't need checking).
while pointer < len(diffs) - 1:
if (diffs[pointer - 1][0] == self.DIFF_EQUAL and
diffs[pointer + 1][0] == self.DIFF_EQUAL):
# This is a single edit surrounded by equalities.
equality1 = diffs[pointer - 1][1]
edit = diffs[pointer][1]
equality2 = diffs[pointer + 1][1]
# First, shift the edit as far left as possible.
commonOffset = self.diff_commonSuffix(equality1, edit)
if commonOffset:
commonString = edit[-commonOffset:]
equality1 = equality1[:-commonOffset]
edit = commonString + edit[:-commonOffset]
equality2 = commonString + equality2
# Second, step character by character right, looking for the best fit.
bestEquality1 = equality1
bestEdit = edit
bestEquality2 = equality2
bestScore = (diff_cleanupSemanticScore(equality1, edit) +
diff_cleanupSemanticScore(edit, equality2))
while edit and equality2 and edit[0] == equality2[0]:
equality1 += edit[0]
edit = edit[1:] + equality2[0]
equality2 = equality2[1:]
score = (diff_cleanupSemanticScore(equality1, edit) +
diff_cleanupSemanticScore(edit, equality2))
# The >= encourages trailing rather than leading whitespace on edits.
if score >= bestScore:
bestScore = score
bestEquality1 = equality1
bestEdit = edit
bestEquality2 = equality2
if diffs[pointer - 1][1] != bestEquality1:
# We have an improvement, save it back to the diff.
if bestEquality1:
diffs[pointer - 1] = (diffs[pointer - 1][0], bestEquality1)
else:
del diffs[pointer - 1]
pointer -= 1
diffs[pointer] = (diffs[pointer][0], bestEdit)
if bestEquality2:
diffs[pointer + 1] = (diffs[pointer + 1][0], bestEquality2)
else:
del diffs[pointer + 1]
pointer -= 1
pointer += 1
# Define some regex patterns for matching boundaries.
BLANKLINEEND = re.compile(r"\n\r?\n$");
BLANKLINESTART = re.compile(r"^\r?\n\r?\n");
def diff_cleanupEfficiency(self, diffs):
"""Reduce the number of edits by eliminating operationally trivial
equalities.
Args:
diffs: Array of diff tuples.
"""
changes = False
equalities = [] # Stack of indices where equalities are found.
lastequality = None # Always equal to diffs[equalities[-1]][1]
pointer = 0 # Index of current position.
pre_ins = False # Is there an insertion operation before the last equality.
pre_del = False # Is there a deletion operation before the last equality.
post_ins = False # Is there an insertion operation after the last equality.
post_del = False # Is there a deletion operation after the last equality.
while pointer < len(diffs):
if diffs[pointer][0] == self.DIFF_EQUAL: # Equality found.
if (len(diffs[pointer][1]) < self.Diff_EditCost and
(post_ins or post_del)):
# Candidate found.
equalities.append(pointer)
pre_ins = post_ins
pre_del = post_del
lastequality = diffs[pointer][1]
else:
# Not a candidate, and can never become one.
equalities = []
lastequality = None
post_ins = post_del = False
else: # An insertion or deletion.
if diffs[pointer][0] == self.DIFF_DELETE:
post_del = True
else:
post_ins = True
# Five types to be split:
# <ins>A</ins><del>B</del>XY<ins>C</ins><del>D</del>
# <ins>A</ins>X<ins>C</ins><del>D</del>
# <ins>A</ins><del>B</del>X<ins>C</ins>
# <ins>A</del>X<ins>C</ins><del>D</del>
# <ins>A</ins><del>B</del>X<del>C</del>
if lastequality and ((pre_ins and pre_del and post_ins and post_del) or
((len(lastequality) < self.Diff_EditCost / 2) and
(pre_ins + pre_del + post_ins + post_del) == 3)):
# Duplicate record.
diffs.insert(equalities[-1], (self.DIFF_DELETE, lastequality))
# Change second copy to insert.
diffs[equalities[-1] + 1] = (self.DIFF_INSERT,
diffs[equalities[-1] + 1][1])
equalities.pop() # Throw away the equality we just deleted.
lastequality = None
if pre_ins and pre_del:
# No changes made which could affect previous entry, keep going.
post_ins = post_del = True
equalities = []
else:
if len(equalities):
equalities.pop() # Throw away the previous equality.
if len(equalities):
pointer = equalities[-1]
else:
pointer = -1
post_ins = post_del = False
changes = True
pointer += 1
if changes:
self.diff_cleanupMerge(diffs)
def diff_cleanupMerge(self, diffs):
"""Reorder and merge like edit sections. Merge equalities.
Any edit section can move as long as it doesn't cross an equality.
Args:
diffs: Array of diff tuples.
"""
diffs.append((self.DIFF_EQUAL, '')) # Add a dummy entry at the end.
pointer = 0
count_delete = 0
count_insert = 0
text_delete = ''
text_insert = ''
while pointer < len(diffs):
if diffs[pointer][0] == self.DIFF_INSERT:
count_insert += 1
text_insert += diffs[pointer][1]
pointer += 1
elif diffs[pointer][0] == self.DIFF_DELETE:
count_delete += 1
text_delete += diffs[pointer][1]
pointer += 1
elif diffs[pointer][0] == self.DIFF_EQUAL:
# Upon reaching an equality, check for prior redundancies.
if count_delete + count_insert > 1:
if count_delete != 0 and count_insert != 0:
# Factor out any common prefixies.
commonlength = self.diff_commonPrefix(text_insert, text_delete)
if commonlength != 0:
x = pointer - count_delete - count_insert - 1
if x >= 0 and diffs[x][0] == self.DIFF_EQUAL:
diffs[x] = (diffs[x][0], diffs[x][1] +
text_insert[:commonlength])
else:
diffs.insert(0, (self.DIFF_EQUAL, text_insert[:commonlength]))
pointer += 1
text_insert = text_insert[commonlength:]
text_delete = text_delete[commonlength:]
# Factor out any common suffixies.
commonlength = self.diff_commonSuffix(text_insert, text_delete)
if commonlength != 0:
diffs[pointer] = (diffs[pointer][0], text_insert[-commonlength:] +
diffs[pointer][1])
text_insert = text_insert[:-commonlength]
text_delete = text_delete[:-commonlength]
# Delete the offending records and add the merged ones.
if count_delete == 0:
diffs[pointer - count_insert : pointer] = [
(self.DIFF_INSERT, text_insert)]
elif count_insert == 0:
diffs[pointer - count_delete : pointer] = [
(self.DIFF_DELETE, text_delete)]
else:
diffs[pointer - count_delete - count_insert : pointer] = [
(self.DIFF_DELETE, text_delete),
(self.DIFF_INSERT, text_insert)]
pointer = pointer - count_delete - count_insert + 1
if count_delete != 0:
pointer += 1
if count_insert != 0:
pointer += 1
elif pointer != 0 and diffs[pointer - 1][0] == self.DIFF_EQUAL:
# Merge this equality with the previous one.
diffs[pointer - 1] = (diffs[pointer - 1][0],
diffs[pointer - 1][1] + diffs[pointer][1])
del diffs[pointer]
else:
pointer += 1
count_insert = 0
count_delete = 0
text_delete = ''
text_insert = ''
if diffs[-1][1] == '':
diffs.pop() # Remove the dummy entry at the end.
# Second pass: look for single edits surrounded on both sides by equalities
# which can be shifted sideways to eliminate an equality.
# e.g: A<ins>BA</ins>C -> <ins>AB</ins>AC
changes = False
pointer = 1
# Intentionally ignore the first and last element (don't need checking).
while pointer < len(diffs) - 1:
if (diffs[pointer - 1][0] == self.DIFF_EQUAL and
diffs[pointer + 1][0] == self.DIFF_EQUAL):
# This is a single edit surrounded by equalities.
if diffs[pointer][1].endswith(diffs[pointer - 1][1]):
# Shift the edit over the previous equality.
diffs[pointer] = (diffs[pointer][0],
diffs[pointer - 1][1] +
diffs[pointer][1][:-len(diffs[pointer - 1][1])])
diffs[pointer + 1] = (diffs[pointer + 1][0],
diffs[pointer - 1][1] + diffs[pointer + 1][1])
del diffs[pointer - 1]
changes = True
elif diffs[pointer][1].startswith(diffs[pointer + 1][1]):
# Shift the edit over the next equality.
diffs[pointer - 1] = (diffs[pointer - 1][0],
diffs[pointer - 1][1] + diffs[pointer + 1][1])
diffs[pointer] = (diffs[pointer][0],
diffs[pointer][1][len(diffs[pointer + 1][1]):] +
diffs[pointer + 1][1])
del diffs[pointer + 1]
changes = True
pointer += 1
# If shifts were made, the diff needs reordering and another shift sweep.
if changes:
self.diff_cleanupMerge(diffs)
def diff_xIndex(self, diffs, loc):
"""loc is a location in text1, compute and return the equivalent location
in text2. e.g. "The cat" vs "The big cat", 1->1, 5->8
Args:
diffs: Array of diff tuples.
loc: Location within text1.
Returns:
Location within text2.
"""
chars1 = 0
chars2 = 0
last_chars1 = 0
last_chars2 = 0
for x in range(len(diffs)):
(op, text) = diffs[x]
if op != self.DIFF_INSERT: # Equality or deletion.
chars1 += len(text)
if op != self.DIFF_DELETE: # Equality or insertion.
chars2 += len(text)
if chars1 > loc: # Overshot the location.
break
last_chars1 = chars1
last_chars2 = chars2
if len(diffs) != x and diffs[x][0] == self.DIFF_DELETE:
# The location was deleted.
return last_chars2
# Add the remaining len(character).
return last_chars2 + (loc - last_chars1)
def diff_prettyHtml(self, diffs):
"""Convert a diff array into a pretty HTML report.
Args:
diffs: Array of diff tuples.
Returns:
HTML representation.
"""
html = []
for (op, data) in diffs:
text = (data.replace("&", "&").replace("<", "<")
.replace(">", ">").replace("\n", "¶<br>"))
if op == self.DIFF_INSERT:
html.append("<ins style=\"background:#e6ffe6;\">%s</ins>" % text)
elif op == self.DIFF_DELETE:
html.append("<del style=\"background:#ffe6e6;\">%s</del>" % text)
elif op == self.DIFF_EQUAL:
html.append("<span>%s</span>" % text)
return "".join(html)
def diff_text1(self, diffs):
"""Compute and return the source text (all equalities and deletions).
Args:
diffs: Array of diff tuples.
Returns:
Source text.
"""
text = []
for (op, data) in diffs:
if op != self.DIFF_INSERT:
text.append(data)
return "".join(text)
def diff_text2(self, diffs):
"""Compute and return the destination text (all equalities and insertions).
Args:
diffs: Array of diff tuples.
Returns:
Destination text.
"""
text = []
for (op, data) in diffs:
if op != self.DIFF_DELETE:
text.append(data)
return "".join(text)
def diff_levenshtein(self, diffs):
"""Compute the Levenshtein distance; the number of inserted, deleted or
substituted characters.
Args:
diffs: Array of diff tuples.
Returns:
Number of changes.
"""
levenshtein = 0
insertions = 0
deletions = 0
for (op, data) in diffs:
if op == self.DIFF_INSERT:
insertions += len(data)
elif op == self.DIFF_DELETE:
deletions += len(data)
elif op == self.DIFF_EQUAL:
# A deletion and an insertion is one substitution.
levenshtein += max(insertions, deletions)
insertions = 0
deletions = 0
levenshtein += max(insertions, deletions)
return levenshtein
def diff_toDelta(self, diffs):
"""Crush the diff into an encoded string which describes the operations
required to transform text1 into text2.
E.g. =3\t-2\t+ing -> Keep 3 chars, delete 2 chars, insert 'ing'.
Operations are tab-separated. Inserted text is escaped using %xx notation.
Args:
diffs: Array of diff tuples.
Returns:
Delta text.
"""
text = []
for (op, data) in diffs:
if op == self.DIFF_INSERT:
# High ascii will raise UnicodeDecodeError. Use Unicode instead.
data = data.encode("utf-8")
text.append("+" + urllib.parse.quote(data, "!~*'();/?:@&=+$,# "))
elif op == self.DIFF_DELETE:
text.append("-%d" % len(data))
elif op == self.DIFF_EQUAL:
text.append("=%d" % len(data))
return "\t".join(text)
def diff_fromDelta(self, text1, delta):
"""Given the original text1, and an encoded string which describes the
operations required to transform text1 into text2, compute the full diff.
Args:
text1: Source string for the diff.
delta: Delta text.
Returns:
Array of diff tuples.
Raises:
ValueError: If invalid input.
"""
diffs = []
pointer = 0 # Cursor in text1
tokens = delta.split("\t")
for token in tokens:
if token == "":
# Blank tokens are ok (from a trailing \t).
continue
# Each token begins with a one character parameter which specifies the
# operation of this token (delete, insert, equality).
param = token[1:]
if token[0] == "+":
param = urllib.parse.unquote(param)
diffs.append((self.DIFF_INSERT, param))
elif token[0] == "-" or token[0] == "=":
try:
n = int(param)
except ValueError:
raise ValueError("Invalid number in diff_fromDelta: " + param)
if n < 0:
raise ValueError("Negative number in diff_fromDelta: " + param)
text = text1[pointer : pointer + n]
pointer += n
if token[0] == "=":
diffs.append((self.DIFF_EQUAL, text))
else:
diffs.append((self.DIFF_DELETE, text))
else:
# Anything else is an error.
raise ValueError("Invalid diff operation in diff_fromDelta: " +
token[0])
if pointer != len(text1):
raise ValueError(
"Delta length (%d) does not equal source text length (%d)." %
(pointer, len(text1)))
return diffs
# MATCH FUNCTIONS
def match_main(self, text, pattern, loc):
"""Locate the best instance of 'pattern' in 'text' near 'loc'.
Args:
text: The text to search.
pattern: The pattern to search for.
loc: The location to search around.
Returns:
Best match index or -1.
"""
# Check for null inputs.
if text == None or pattern == None:
raise ValueError("Null inputs. (match_main)")
loc = max(0, min(loc, len(text)))
if text == pattern:
# Shortcut (potentially not guaranteed by the algorithm)
return 0
elif not text:
# Nothing to match.
return -1
elif text[loc:loc + len(pattern)] == pattern:
# Perfect match at the perfect spot! (Includes case of null pattern)
return loc
else:
# Do a fuzzy compare.
match = self.match_bitap(text, pattern, loc)
return match
def match_bitap(self, text, pattern, loc):
"""Locate the best instance of 'pattern' in 'text' near 'loc' using the
Bitap algorithm.
Args:
text: The text to search.
pattern: The pattern to search for.
loc: The location to search around.
Returns:
Best match index or -1.
"""
# Python doesn't have a maxint limit, so ignore this check.
#if self.Match_MaxBits != 0 and len(pattern) > self.Match_MaxBits:
# raise ValueError("Pattern too long for this application.")
# Initialise the alphabet.
s = self.match_alphabet(pattern)
def match_bitapScore(e, x):
"""Compute and return the score for a match with e errors and x location.
Accesses loc and pattern through being a closure.
Args:
e: Number of errors in match.
x: Location of match.
Returns:
Overall score for match (0.0 = good, 1.0 = bad).
"""
accuracy = float(e) / len(pattern)
proximity = abs(loc - x)
if not self.Match_Distance:
# Dodge divide by zero error.
return proximity and 1.0 or accuracy
return accuracy + (proximity / float(self.Match_Distance))
# Highest score beyond which we give up.
score_threshold = self.Match_Threshold
# Is there a nearby exact match? (speedup)
best_loc = text.find(pattern, loc)
if best_loc != -1:
score_threshold = min(match_bitapScore(0, best_loc), score_threshold)
# What about in the other direction? (speedup)
best_loc = text.rfind(pattern, loc + len(pattern))
if best_loc != -1:
score_threshold = min(match_bitapScore(0, best_loc), score_threshold)
# Initialise the bit arrays.
matchmask = 1 << (len(pattern) - 1)
best_loc = -1
bin_max = len(pattern) + len(text)
# Empty initialization added to appease pychecker.
last_rd = None
for d in range(len(pattern)):
# Scan for the best match each iteration allows for one more error.
# Run a binary search to determine how far from 'loc' we can stray at
# this error level.
bin_min = 0
bin_mid = bin_max
while bin_min < bin_mid:
if match_bitapScore(d, loc + bin_mid) <= score_threshold:
bin_min = bin_mid
else:
bin_max = bin_mid
bin_mid = (bin_max - bin_min) // 2 + bin_min
# Use the result from this iteration as the maximum for the next.
bin_max = bin_mid
start = max(1, loc - bin_mid + 1)
finish = min(loc + bin_mid, len(text)) + len(pattern)
rd = [0] * (finish + 2)
rd[finish + 1] = (1 << d) - 1
for j in range(finish, start - 1, -1):
if len(text) <= j - 1:
# Out of range.
charMatch = 0
else:
charMatch = s.get(text[j - 1], 0)
if d == 0: # First pass: exact match.
rd[j] = ((rd[j + 1] << 1) | 1) & charMatch
else: # Subsequent passes: fuzzy match.
rd[j] = (((rd[j + 1] << 1) | 1) & charMatch) | (
((last_rd[j + 1] | last_rd[j]) << 1) | 1) | last_rd[j + 1]
if rd[j] & matchmask:
score = match_bitapScore(d, j - 1)
# This match will almost certainly be better than any existing match.
# But check anyway.
if score <= score_threshold:
# Told you so.
score_threshold = score
best_loc = j - 1
if best_loc > loc:
# When passing loc, don't exceed our current distance from loc.
start = max(1, 2 * loc - best_loc)
else:
# Already passed loc, downhill from here on in.
break
# No hope for a (better) match at greater error levels.
if match_bitapScore(d + 1, loc) > score_threshold:
break
last_rd = rd
return best_loc
def match_alphabet(self, pattern):
"""Initialise the alphabet for the Bitap algorithm.
Args:
pattern: The text to encode.
Returns:
Hash of character locations.
"""
s = {}
for char in pattern:
s[char] = 0
for i in range(len(pattern)):
s[pattern[i]] |= 1 << (len(pattern) - i - 1)
return s
# PATCH FUNCTIONS
def patch_addContext(self, patch, text):
"""Increase the context until it is unique,
but don't let the pattern expand beyond Match_MaxBits.
Args:
patch: The patch to grow.
text: Source text.
"""
if len(text) == 0:
return
pattern = text[patch.start2 : patch.start2 + patch.length1]
padding = 0
# Look for the first and last matches of pattern in text. If two different
# matches are found, increase the pattern length.
while (text.find(pattern) != text.rfind(pattern) and (self.Match_MaxBits ==
0 or len(pattern) < self.Match_MaxBits - self.Patch_Margin -
self.Patch_Margin)):
padding += self.Patch_Margin
pattern = text[max(0, patch.start2 - padding) :
patch.start2 + patch.length1 + padding]
# Add one chunk for good luck.
padding += self.Patch_Margin
# Add the prefix.
prefix = text[max(0, patch.start2 - padding) : patch.start2]
if prefix:
patch.diffs[:0] = [(self.DIFF_EQUAL, prefix)]
# Add the suffix.
suffix = text[patch.start2 + patch.length1 :
patch.start2 + patch.length1 + padding]
if suffix:
patch.diffs.append((self.DIFF_EQUAL, suffix))
# Roll back the start points.
patch.start1 -= len(prefix)
patch.start2 -= len(prefix)
# Extend lengths.
patch.length1 += len(prefix) + len(suffix)
patch.length2 += len(prefix) + len(suffix)
def patch_make(self, a, b=None, c=None):
"""Compute a list of patches to turn text1 into text2.
Use diffs if provided, otherwise compute it ourselves.
There are four ways to call this function, depending on what data is
available to the caller:
Method 1:
a = text1, b = text2
Method 2:
a = diffs
Method 3 (optimal):
a = text1, b = diffs
Method 4 (deprecated, use method 3):
a = text1, b = text2, c = diffs
Args:
a: text1 (methods 1,3,4) or Array of diff tuples for text1 to
text2 (method 2).
b: text2 (methods 1,4) or Array of diff tuples for text1 to
text2 (method 3) or undefined (method 2).
c: Array of diff tuples for text1 to text2 (method 4) or
undefined (methods 1,2,3).
Returns:
Array of Patch objects.
"""
text1 = None
diffs = None
if isinstance(a, str) and isinstance(b, str) and c is None:
# Method 1: text1, text2
# Compute diffs from text1 and text2.
text1 = a
diffs = self.diff_main(text1, b, True)
if len(diffs) > 2:
self.diff_cleanupSemantic(diffs)
self.diff_cleanupEfficiency(diffs)
elif isinstance(a, list) and b is None and c is None:
# Method 2: diffs
# Compute text1 from diffs.
diffs = a
text1 = self.diff_text1(diffs)
elif isinstance(a, str) and isinstance(b, list) and c is None:
# Method 3: text1, diffs
text1 = a
diffs = b
elif (isinstance(a, str) and isinstance(b, str) and
isinstance(c, list)):
# Method 4: text1, text2, diffs
# text2 is not used.
text1 = a
diffs = c
else:
raise ValueError("Unknown call format to patch_make.")
if not diffs:
return [] # Get rid of the None case.
patches = []
patch = patch_obj()
char_count1 = 0 # Number of characters into the text1 string.
char_count2 = 0 # Number of characters into the text2 string.
prepatch_text = text1 # Recreate the patches to determine context info.
postpatch_text = text1
for x in range(len(diffs)):
(diff_type, diff_text) = diffs[x]
if len(patch.diffs) == 0 and diff_type != self.DIFF_EQUAL:
# A new patch starts here.
patch.start1 = char_count1
patch.start2 = char_count2
if diff_type == self.DIFF_INSERT:
# Insertion
patch.diffs.append(diffs[x])
patch.length2 += len(diff_text)
postpatch_text = (postpatch_text[:char_count2] + diff_text +
postpatch_text[char_count2:])
elif diff_type == self.DIFF_DELETE:
# Deletion.
patch.length1 += len(diff_text)
patch.diffs.append(diffs[x])
postpatch_text = (postpatch_text[:char_count2] +
postpatch_text[char_count2 + len(diff_text):])
elif (diff_type == self.DIFF_EQUAL and
len(diff_text) <= 2 * self.Patch_Margin and
len(patch.diffs) != 0 and len(diffs) != x + 1):
# Small equality inside a patch.
patch.diffs.append(diffs[x])
patch.length1 += len(diff_text)
patch.length2 += len(diff_text)
if (diff_type == self.DIFF_EQUAL and
len(diff_text) >= 2 * self.Patch_Margin):
# Time for a new patch.
if len(patch.diffs) != 0:
self.patch_addContext(patch, prepatch_text)
patches.append(patch)
patch = patch_obj()
# Unlike Unidiff, our patch lists have a rolling context.
# http://code.google.com/p/google-diff-match-patch/wiki/Unidiff
# Update prepatch text & pos to reflect the application of the
# just completed patch.
prepatch_text = postpatch_text
char_count1 = char_count2
# Update the current character count.
if diff_type != self.DIFF_INSERT:
char_count1 += len(diff_text)
if diff_type != self.DIFF_DELETE:
char_count2 += len(diff_text)
# Pick up the leftover patch if not empty.
if len(patch.diffs) != 0:
self.patch_addContext(patch, prepatch_text)
patches.append(patch)
return patches
def patch_deepCopy(self, patches):
"""Given an array of patches, return another array that is identical.
Args:
patches: Array of Patch objects.
Returns:
Array of Patch objects.
"""
patchesCopy = []
for patch in patches:
patchCopy = patch_obj()
# No need to deep copy the tuples since they are immutable.
patchCopy.diffs = patch.diffs[:]
patchCopy.start1 = patch.start1
patchCopy.start2 = patch.start2
patchCopy.length1 = patch.length1
patchCopy.length2 = patch.length2
patchesCopy.append(patchCopy)
return patchesCopy
def patch_apply(self, patches, text):
"""Merge a set of patches onto the text. Return a patched text, as well
as a list of true/false values indicating which patches were applied.
Args:
patches: Array of Patch objects.
text: Old text.
Returns:
Two element Array, containing the new text and an array of boolean values.
"""
if not patches:
return (text, [])
# Deep copy the patches so that no changes are made to originals.
patches = self.patch_deepCopy(patches)
nullPadding = self.patch_addPadding(patches)
text = nullPadding + text + nullPadding
self.patch_splitMax(patches)
# delta keeps track of the offset between the expected and actual location
# of the previous patch. If there are patches expected at positions 10 and
# 20, but the first patch was found at 12, delta is 2 and the second patch
# has an effective expected position of 22.
delta = 0
results = []
for patch in patches:
expected_loc = patch.start2 + delta
text1 = self.diff_text1(patch.diffs)
end_loc = -1
if len(text1) > self.Match_MaxBits:
# patch_splitMax will only provide an oversized pattern in the case of
# a monster delete.
start_loc = self.match_main(text, text1[:self.Match_MaxBits],
expected_loc)
if start_loc != -1:
end_loc = self.match_main(text, text1[-self.Match_MaxBits:],
expected_loc + len(text1) - self.Match_MaxBits)
if end_loc == -1 or start_loc >= end_loc:
# Can't find valid trailing context. Drop this patch.
start_loc = -1
else:
start_loc = self.match_main(text, text1, expected_loc)
if start_loc == -1:
# No match found. :(
results.append(False)
# Subtract the delta for this failed patch from subsequent patches.
delta -= patch.length2 - patch.length1
else:
# Found a match. :)
results.append(True)
delta = start_loc - expected_loc
if end_loc == -1:
text2 = text[start_loc : start_loc + len(text1)]
else:
text2 = text[start_loc : end_loc + self.Match_MaxBits]
if text1 == text2:
# Perfect match, just shove the replacement text in.
text = (text[:start_loc] + self.diff_text2(patch.diffs) +
text[start_loc + len(text1):])
else:
# Imperfect match.
# Run a diff to get a framework of equivalent indices.
diffs = self.diff_main(text1, text2, False)
if (len(text1) > self.Match_MaxBits and
self.diff_levenshtein(diffs) / float(len(text1)) >
self.Patch_DeleteThreshold):
# The end points match, but the content is unacceptably bad.
results[-1] = False
else:
self.diff_cleanupSemanticLossless(diffs)
index1 = 0
for (op, data) in patch.diffs:
if op != self.DIFF_EQUAL:
index2 = self.diff_xIndex(diffs, index1)
if op == self.DIFF_INSERT: # Insertion
text = text[:start_loc + index2] + data + text[start_loc +
index2:]
elif op == self.DIFF_DELETE: # Deletion
text = text[:start_loc + index2] + text[start_loc +
self.diff_xIndex(diffs, index1 + len(data)):]
if op != self.DIFF_DELETE:
index1 += len(data)
# Strip the padding off.
text = text[len(nullPadding):-len(nullPadding)]
return (text, results)
def patch_addPadding(self, patches):
"""Add some padding on text start and end so that edges can match
something. Intended to be called only from within patch_apply.
Args:
patches: Array of Patch objects.
Returns:
The padding string added to each side.
"""
paddingLength = self.Patch_Margin
nullPadding = ""
for x in range(1, paddingLength + 1):
nullPadding += chr(x)
# Bump all the patches forward.
for patch in patches:
patch.start1 += paddingLength
patch.start2 += paddingLength
# Add some padding on start of first diff.
patch = patches[0]
diffs = patch.diffs
if not diffs or diffs[0][0] != self.DIFF_EQUAL:
# Add nullPadding equality.
diffs.insert(0, (self.DIFF_EQUAL, nullPadding))
patch.start1 -= paddingLength # Should be 0.
patch.start2 -= paddingLength # Should be 0.
patch.length1 += paddingLength
patch.length2 += paddingLength
elif paddingLength > len(diffs[0][1]):
# Grow first equality.
extraLength = paddingLength - len(diffs[0][1])
newText = nullPadding[len(diffs[0][1]):] + diffs[0][1]
diffs[0] = (diffs[0][0], newText)
patch.start1 -= extraLength
patch.start2 -= extraLength
patch.length1 += extraLength
patch.length2 += extraLength
# Add some padding on end of last diff.
patch = patches[-1]
diffs = patch.diffs
if not diffs or diffs[-1][0] != self.DIFF_EQUAL:
# Add nullPadding equality.
diffs.append((self.DIFF_EQUAL, nullPadding))
patch.length1 += paddingLength
patch.length2 += paddingLength
elif paddingLength > len(diffs[-1][1]):
# Grow last equality.
extraLength = paddingLength - len(diffs[-1][1])
newText = diffs[-1][1] + nullPadding[:extraLength]
diffs[-1] = (diffs[-1][0], newText)
patch.length1 += extraLength
patch.length2 += extraLength
return nullPadding
def patch_splitMax(self, patches):
"""Look through the patches and break up any which are longer than the
maximum limit of the match algorithm.
Intended to be called only from within patch_apply.
Args:
patches: Array of Patch objects.
"""
patch_size = self.Match_MaxBits
if patch_size == 0:
# Python has the option of not splitting strings due to its ability
# to handle integers of arbitrary precision.
return
for x in range(len(patches)):
if patches[x].length1 <= patch_size:
continue
bigpatch = patches[x]
# Remove the big old patch.
del patches[x]
x -= 1
start1 = bigpatch.start1
start2 = bigpatch.start2
precontext = ''
while len(bigpatch.diffs) != 0:
# Create one of several smaller patches.
patch = patch_obj()
empty = True
patch.start1 = start1 - len(precontext)
patch.start2 = start2 - len(precontext)
if precontext:
patch.length1 = patch.length2 = len(precontext)
patch.diffs.append((self.DIFF_EQUAL, precontext))
while (len(bigpatch.diffs) != 0 and
patch.length1 < patch_size - self.Patch_Margin):
(diff_type, diff_text) = bigpatch.diffs[0]
if diff_type == self.DIFF_INSERT:
# Insertions are harmless.
patch.length2 += len(diff_text)
start2 += len(diff_text)
patch.diffs.append(bigpatch.diffs.pop(0))
empty = False
elif (diff_type == self.DIFF_DELETE and len(patch.diffs) == 1 and
patch.diffs[0][0] == self.DIFF_EQUAL and
len(diff_text) > 2 * patch_size):
# This is a large deletion. Let it pass in one chunk.
patch.length1 += len(diff_text)
start1 += len(diff_text)
empty = False
patch.diffs.append((diff_type, diff_text))
del bigpatch.diffs[0]
else:
# Deletion or equality. Only take as much as we can stomach.
diff_text = diff_text[:patch_size - patch.length1 -
self.Patch_Margin]
patch.length1 += len(diff_text)
start1 += len(diff_text)
if diff_type == self.DIFF_EQUAL:
patch.length2 += len(diff_text)
start2 += len(diff_text)
else:
empty = False
patch.diffs.append((diff_type, diff_text))
if diff_text == bigpatch.diffs[0][1]:
del bigpatch.diffs[0]
else:
bigpatch.diffs[0] = (bigpatch.diffs[0][0],
bigpatch.diffs[0][1][len(diff_text):])
# Compute the head context for the next patch.
precontext = self.diff_text2(patch.diffs)
precontext = precontext[-self.Patch_Margin:]
# Append the end context for this patch.
postcontext = self.diff_text1(bigpatch.diffs)[:self.Patch_Margin]
if postcontext:
patch.length1 += len(postcontext)
patch.length2 += len(postcontext)
if len(patch.diffs) != 0 and patch.diffs[-1][0] == self.DIFF_EQUAL:
patch.diffs[-1] = (self.DIFF_EQUAL, patch.diffs[-1][1] +
postcontext)
else:
patch.diffs.append((self.DIFF_EQUAL, postcontext))
if not empty:
x += 1
patches.insert(x, patch)
def patch_toText(self, patches):
"""Take a list of patches and return a textual representation.
Args:
patches: Array of Patch objects.
Returns:
Text representation of patches.
"""
text = []
for patch in patches:
text.append(str(patch))
return "".join(text)
def patch_fromText(self, textline):
"""Parse a textual representation of patches and return a list of patch
objects.
Args:
textline: Text representation of patches.
Returns:
Array of Patch objects.
Raises:
ValueError: If invalid input.
"""
patches = []
if not textline:
return patches
text = textline.split('\n')
while len(text) != 0:
m = re.match("^@@ -(\d+),?(\d*) \+(\d+),?(\d*) @@$", text[0])
if not m:
raise ValueError("Invalid patch string: " + text[0])
patch = patch_obj()
patches.append(patch)
patch.start1 = int(m.group(1))
if m.group(2) == '':
patch.start1 -= 1
patch.length1 = 1
elif m.group(2) == '0':
patch.length1 = 0
else:
patch.start1 -= 1
patch.length1 = int(m.group(2))
patch.start2 = int(m.group(3))
if m.group(4) == '':
patch.start2 -= 1
patch.length2 = 1
elif m.group(4) == '0':
patch.length2 = 0
else:
patch.start2 -= 1
patch.length2 = int(m.group(4))
del text[0]
while len(text) != 0:
if text[0]:
sign = text[0][0]
else:
sign = ''
line = urllib.parse.unquote(text[0][1:])
if sign == '+':
# Insertion.
patch.diffs.append((self.DIFF_INSERT, line))
elif sign == '-':
# Deletion.
patch.diffs.append((self.DIFF_DELETE, line))
elif sign == ' ':
# Minor equality.
patch.diffs.append((self.DIFF_EQUAL, line))
elif sign == '@':
# Start of next patch.
break
elif sign == '':
# Blank line? Whatever.
pass
else:
# WTF?
raise ValueError("Invalid patch mode: '%s'\n%s" % (sign, line))
del text[0]
return patches
class patch_obj:
"""Class representing one patch operation.
"""
def __init__(self):
"""Initializes with an empty list of diffs.
"""
self.diffs = []
self.start1 = None
self.start2 = None
self.length1 = 0
self.length2 = 0
def __str__(self):
"""Emmulate GNU diff's format.
Header: @@ -382,8 +481,9 @@
Indicies are printed as 1-based, not 0-based.
Returns:
The GNU diff string.
"""
if self.length1 == 0:
coords1 = str(self.start1) + ",0"
elif self.length1 == 1:
coords1 = str(self.start1 + 1)
else:
coords1 = str(self.start1 + 1) + "," + str(self.length1)
if self.length2 == 0:
coords2 = str(self.start2) + ",0"
elif self.length2 == 1:
coords2 = str(self.start2 + 1)
else:
coords2 = str(self.start2 + 1) + "," + str(self.length2)
text = ["@@ -", coords1, " +", coords2, " @@\n"]
# Escape the body of the patch with %xx notation.
for (op, data) in self.diffs:
if op == diff_match_patch.DIFF_INSERT:
text.append("+")
elif op == diff_match_patch.DIFF_DELETE:
text.append("-")
elif op == diff_match_patch.DIFF_EQUAL:
text.append(" ")
# High ascii will raise UnicodeDecodeError. Use Unicode instead.
data = data.encode("utf-8")
text.append(urllib.parse.quote(data, "!~*'();/?:@&=+$,# ") + "\n")
return "".join(text)
|
apache-2.0
|
ovnicraft/suds
|
suds/mx/core.py
|
16
|
4746
|
# This program is free software; you can redistribute it and/or modify
# it under the terms of the (LGPL) GNU Lesser General Public License as
# published by the Free Software Foundation; either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library Lesser General Public License for more details at
# ( http://www.gnu.org/licenses/lgpl.html ).
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
# written by: Jeff Ortel ( [email protected] )
"""
Provides I{marshaller} core classes.
"""
from suds import *
from suds.mx import *
from suds.mx.appender import ContentAppender
from suds.sax.element import Element
from suds.sax.document import Document
from suds.sudsobject import Property
from logging import getLogger
log = getLogger(__name__)
class Core:
"""
An I{abstract} marshaller. This class implement the core
functionality of the marshaller.
@ivar appender: A content appender.
@type appender: L{ContentAppender}
"""
def __init__(self):
"""
"""
self.appender = ContentAppender(self)
def process(self, content):
"""
Process (marshal) the tag with the specified value using the
optional type information.
@param content: The content to process.
@type content: L{Object}
"""
log.debug('processing:\n%s', content)
self.reset()
if content.tag is None:
content.tag = content.value.__class__.__name__
document = Document()
if isinstance(content.value, Property):
root = self.node(content)
self.append(document, content)
return document.root()
def append(self, parent, content):
"""
Append the specified L{content} to the I{parent}.
@param parent: The parent node to append to.
@type parent: L{Element}
@param content: The content to append.
@type content: L{Object}
"""
log.debug('appending parent:\n%s\ncontent:\n%s', parent, content)
if self.start(content):
self.appender.append(parent, content)
self.end(parent, content)
def reset(self):
"""
Reset the marshaller.
"""
pass
def node(self, content):
"""
Create and return an XML node.
@param content: The content for which processing has been suspended.
@type content: L{Object}
@return: An element.
@rtype: L{Element}
"""
return Element(content.tag)
def start(self, content):
"""
Appending this content has started.
@param content: The content for which processing has started.
@type content: L{Content}
@return: True to continue appending
@rtype: boolean
"""
return True
def suspend(self, content):
"""
Appending this content has suspended.
@param content: The content for which processing has been suspended.
@type content: L{Content}
"""
pass
def resume(self, content):
"""
Appending this content has resumed.
@param content: The content for which processing has been resumed.
@type content: L{Content}
"""
pass
def end(self, parent, content):
"""
Appending this content has ended.
@param parent: The parent node ending.
@type parent: L{Element}
@param content: The content for which processing has ended.
@type content: L{Content}
"""
pass
def setnil(self, node, content):
"""
Set the value of the I{node} to nill.
@param node: A I{nil} node.
@type node: L{Element}
@param content: The content to set nil.
@type content: L{Content}
"""
pass
def setdefault(self, node, content):
"""
Set the value of the I{node} to a default value.
@param node: A I{nil} node.
@type node: L{Element}
@param content: The content to set the default value.
@type content: L{Content}
@return: The default.
"""
pass
def optional(self, content):
"""
Get whether the specified content is optional.
@param content: The content which to check.
@type content: L{Content}
"""
return False
|
lgpl-3.0
|
UstadMobile/exelearning-ustadmobile-work
|
twisted/test/test_ftp_options.py
|
41
|
2685
|
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Tests for L{twisted.tap.ftp}.
"""
from twisted.trial.unittest import TestCase
from twisted.cred import credentials, error
from twisted.tap.ftp import Options
from twisted.python import versions
from twisted.python.filepath import FilePath
class FTPOptionsTestCase(TestCase):
"""
Tests for the command line option parser used for C{twistd ftp}.
"""
usernamePassword = ('iamuser', 'thisispassword')
def setUp(self):
"""
Create a file with two users.
"""
self.filename = self.mktemp()
f = FilePath(self.filename)
f.setContent(':'.join(self.usernamePassword))
self.options = Options()
def test_passwordfileDeprecation(self):
"""
The C{--password-file} option will emit a warning stating that
said option is deprecated.
"""
self.callDeprecated(
versions.Version("Twisted", 11, 1, 0),
self.options.opt_password_file, self.filename)
def test_authAdded(self):
"""
The C{--auth} command-line option will add a checker to the list of
checkers
"""
numCheckers = len(self.options['credCheckers'])
self.options.parseOptions(['--auth', 'file:' + self.filename])
self.assertEqual(len(self.options['credCheckers']), numCheckers + 1)
def test_authFailure(self):
"""
The checker created by the C{--auth} command-line option returns a
L{Deferred} that fails with L{UnauthorizedLogin} when
presented with credentials that are unknown to that checker.
"""
self.options.parseOptions(['--auth', 'file:' + self.filename])
checker = self.options['credCheckers'][-1]
invalid = credentials.UsernamePassword(self.usernamePassword[0], 'fake')
return (checker.requestAvatarId(invalid)
.addCallbacks(
lambda ignore: self.fail("Wrong password should raise error"),
lambda err: err.trap(error.UnauthorizedLogin)))
def test_authSuccess(self):
"""
The checker created by the C{--auth} command-line option returns a
L{Deferred} that returns the avatar id when presented with credentials
that are known to that checker.
"""
self.options.parseOptions(['--auth', 'file:' + self.filename])
checker = self.options['credCheckers'][-1]
correct = credentials.UsernamePassword(*self.usernamePassword)
return checker.requestAvatarId(correct).addCallback(
lambda username: self.assertEqual(username, correct.username)
)
|
gpl-2.0
|
akuseru/zulip
|
zerver/forms.py
|
97
|
3911
|
from __future__ import absolute_import
from django import forms
from django.core.exceptions import ValidationError
from django.utils.safestring import mark_safe
from django.contrib.auth.forms import SetPasswordForm, AuthenticationForm
from django.conf import settings
from zerver.models import Realm, get_user_profile_by_email, UserProfile, \
completely_open, resolve_email_to_domain, get_realm
from zerver.lib.actions import do_change_password, is_inactive
from zproject.backends import password_auth_enabled
import DNS
SIGNUP_STRING = u'Use a different e-mail address, or contact %s with questions.'%(settings.ZULIP_ADMINISTRATOR,)
def has_valid_realm(value):
# Checks if there is a realm without invite_required
# matching the domain of the input e-mail.
try:
realm = Realm.objects.get(domain=resolve_email_to_domain(value))
except Realm.DoesNotExist:
return False
return not realm.invite_required
def not_mit_mailing_list(value):
# I don't want ec-discuss signed up for Zulip
if "@mit.edu" in value:
username = value.rsplit("@", 1)[0]
# Check whether the user exists and can get mail.
try:
DNS.dnslookup("%s.pobox.ns.athena.mit.edu" % username, DNS.Type.TXT)
return True
except DNS.Base.ServerError, e:
if e.rcode == DNS.Status.NXDOMAIN:
raise ValidationError(mark_safe(u'That user does not exist at MIT or is a <a href="https://ist.mit.edu/email-lists">mailing list</a>. If you want to sign up an alias for Zulip, <a href="mailto:[email protected]">contact us</a>.'))
else:
raise
return True
class RegistrationForm(forms.Form):
full_name = forms.CharField(max_length=100)
# The required-ness of the password field gets overridden if it isn't
# actually required for a realm
password = forms.CharField(widget=forms.PasswordInput, max_length=100,
required=False)
if not settings.VOYAGER:
terms = forms.BooleanField(required=True)
class ToSForm(forms.Form):
full_name = forms.CharField(max_length=100)
terms = forms.BooleanField(required=True)
class HomepageForm(forms.Form):
# This form is important because it determines whether users can
# register for our product. Be careful when modifying the
# validators.
email = forms.EmailField(validators=[is_inactive,])
def __init__(self, *args, **kwargs):
self.domain = kwargs.get("domain")
if kwargs.has_key("domain"):
del kwargs["domain"]
super(HomepageForm, self).__init__(*args, **kwargs)
def clean_email(self):
data = self.cleaned_data['email']
if completely_open(self.domain) or has_valid_realm(data) and not_mit_mailing_list(data):
return data
raise ValidationError(mark_safe(
u'Your e-mail does not match any existing open organization. ' \
+ SIGNUP_STRING))
class LoggingSetPasswordForm(SetPasswordForm):
def save(self, commit=True):
do_change_password(self.user, self.cleaned_data['new_password1'],
log=True, commit=commit)
return self.user
class CreateUserForm(forms.Form):
full_name = forms.CharField(max_length=100)
email = forms.EmailField()
class OurAuthenticationForm(AuthenticationForm):
def clean_username(self):
email = self.cleaned_data['username']
try:
user_profile = get_user_profile_by_email(email)
except UserProfile.DoesNotExist:
return email
if user_profile.realm.deactivated:
error_msg = u"""Sorry for the trouble, but %s has been deactivated.
Please contact [email protected] to reactivate this group.""" % (
user_profile.realm.name,)
raise ValidationError(mark_safe(error_msg))
return email
|
apache-2.0
|
rynomster/django
|
tests/annotations/tests.py
|
6
|
19583
|
from __future__ import unicode_literals
import datetime
from decimal import Decimal
from django.core.exceptions import FieldDoesNotExist, FieldError
from django.db.models import (
F, BooleanField, CharField, Count, DateTimeField, ExpressionWrapper, Func,
IntegerField, Sum, Value,
)
from django.db.models.functions import Lower
from django.test import TestCase, skipUnlessDBFeature
from django.utils import six
from .models import (
Author, Book, Company, DepartmentStore, Employee, Publisher, Store, Ticket,
)
def cxOracle_py3_bug(func):
"""
There's a bug in Django/cx_Oracle with respect to string handling under
Python 3 (essentially, they treat Python 3 strings as Python 2 strings
rather than unicode). This makes some tests here fail under Python 3, so
we mark them as expected failures until someone fixes them in #23843.
"""
from unittest import expectedFailure
from django.db import connection
return expectedFailure(func) if connection.vendor == 'oracle' and six.PY3 else func
class NonAggregateAnnotationTestCase(TestCase):
@classmethod
def setUpTestData(cls):
cls.a1 = Author.objects.create(name='Adrian Holovaty', age=34)
cls.a2 = Author.objects.create(name='Jacob Kaplan-Moss', age=35)
cls.a3 = Author.objects.create(name='Brad Dayley', age=45)
cls.a4 = Author.objects.create(name='James Bennett', age=29)
cls.a5 = Author.objects.create(name='Jeffrey Forcier', age=37)
cls.a6 = Author.objects.create(name='Paul Bissex', age=29)
cls.a7 = Author.objects.create(name='Wesley J. Chun', age=25)
cls.a8 = Author.objects.create(name='Peter Norvig', age=57)
cls.a9 = Author.objects.create(name='Stuart Russell', age=46)
cls.a1.friends.add(cls.a2, cls.a4)
cls.a2.friends.add(cls.a1, cls.a7)
cls.a4.friends.add(cls.a1)
cls.a5.friends.add(cls.a6, cls.a7)
cls.a6.friends.add(cls.a5, cls.a7)
cls.a7.friends.add(cls.a2, cls.a5, cls.a6)
cls.a8.friends.add(cls.a9)
cls.a9.friends.add(cls.a8)
cls.p1 = Publisher.objects.create(name='Apress', num_awards=3)
cls.p2 = Publisher.objects.create(name='Sams', num_awards=1)
cls.p3 = Publisher.objects.create(name='Prentice Hall', num_awards=7)
cls.p4 = Publisher.objects.create(name='Morgan Kaufmann', num_awards=9)
cls.p5 = Publisher.objects.create(name="Jonno's House of Books", num_awards=0)
cls.b1 = Book.objects.create(
isbn='159059725', name='The Definitive Guide to Django: Web Development Done Right',
pages=447, rating=4.5, price=Decimal('30.00'), contact=cls.a1, publisher=cls.p1,
pubdate=datetime.date(2007, 12, 6)
)
cls.b2 = Book.objects.create(
isbn='067232959', name='Sams Teach Yourself Django in 24 Hours',
pages=528, rating=3.0, price=Decimal('23.09'), contact=cls.a3, publisher=cls.p2,
pubdate=datetime.date(2008, 3, 3)
)
cls.b3 = Book.objects.create(
isbn='159059996', name='Practical Django Projects',
pages=300, rating=4.0, price=Decimal('29.69'), contact=cls.a4, publisher=cls.p1,
pubdate=datetime.date(2008, 6, 23)
)
cls.b4 = Book.objects.create(
isbn='013235613', name='Python Web Development with Django',
pages=350, rating=4.0, price=Decimal('29.69'), contact=cls.a5, publisher=cls.p3,
pubdate=datetime.date(2008, 11, 3)
)
cls.b5 = Book.objects.create(
isbn='013790395', name='Artificial Intelligence: A Modern Approach',
pages=1132, rating=4.0, price=Decimal('82.80'), contact=cls.a8, publisher=cls.p3,
pubdate=datetime.date(1995, 1, 15)
)
cls.b6 = Book.objects.create(
isbn='155860191', name='Paradigms of Artificial Intelligence Programming: Case Studies in Common Lisp',
pages=946, rating=5.0, price=Decimal('75.00'), contact=cls.a8, publisher=cls.p4,
pubdate=datetime.date(1991, 10, 15)
)
cls.b1.authors.add(cls.a1, cls.a2)
cls.b2.authors.add(cls.a3)
cls.b3.authors.add(cls.a4)
cls.b4.authors.add(cls.a5, cls.a6, cls.a7)
cls.b5.authors.add(cls.a8, cls.a9)
cls.b6.authors.add(cls.a8)
s1 = Store.objects.create(
name='Amazon.com',
original_opening=datetime.datetime(1994, 4, 23, 9, 17, 42),
friday_night_closing=datetime.time(23, 59, 59)
)
s2 = Store.objects.create(
name='Books.com',
original_opening=datetime.datetime(2001, 3, 15, 11, 23, 37),
friday_night_closing=datetime.time(23, 59, 59)
)
s3 = Store.objects.create(
name="Mamma and Pappa's Books",
original_opening=datetime.datetime(1945, 4, 25, 16, 24, 14),
friday_night_closing=datetime.time(21, 30)
)
s1.books.add(cls.b1, cls.b2, cls.b3, cls.b4, cls.b5, cls.b6)
s2.books.add(cls.b1, cls.b3, cls.b5, cls.b6)
s3.books.add(cls.b3, cls.b4, cls.b6)
def test_basic_annotation(self):
books = Book.objects.annotate(
is_book=Value(1, output_field=IntegerField()))
for book in books:
self.assertEqual(book.is_book, 1)
def test_basic_f_annotation(self):
books = Book.objects.annotate(another_rating=F('rating'))
for book in books:
self.assertEqual(book.another_rating, book.rating)
def test_joined_annotation(self):
books = Book.objects.select_related('publisher').annotate(
num_awards=F('publisher__num_awards'))
for book in books:
self.assertEqual(book.num_awards, book.publisher.num_awards)
def test_mixed_type_annotation_date_interval(self):
active = datetime.datetime(2015, 3, 20, 14, 0, 0)
duration = datetime.timedelta(hours=1)
expires = datetime.datetime(2015, 3, 20, 14, 0, 0) + duration
Ticket.objects.create(active_at=active, duration=duration)
t = Ticket.objects.annotate(
expires=ExpressionWrapper(F('active_at') + F('duration'), output_field=DateTimeField())
).first()
self.assertEqual(t.expires, expires)
def test_mixed_type_annotation_numbers(self):
test = self.b1
b = Book.objects.annotate(
combined=ExpressionWrapper(F('pages') + F('rating'), output_field=IntegerField())
).get(isbn=test.isbn)
combined = int(test.pages + test.rating)
self.assertEqual(b.combined, combined)
def test_annotate_with_aggregation(self):
books = Book.objects.annotate(
is_book=Value(1, output_field=IntegerField()),
rating_count=Count('rating'))
for book in books:
self.assertEqual(book.is_book, 1)
self.assertEqual(book.rating_count, 1)
def test_aggregate_over_annotation(self):
agg = Author.objects.annotate(other_age=F('age')).aggregate(otherage_sum=Sum('other_age'))
other_agg = Author.objects.aggregate(age_sum=Sum('age'))
self.assertEqual(agg['otherage_sum'], other_agg['age_sum'])
@skipUnlessDBFeature('can_distinct_on_fields')
def test_distinct_on_with_annotation(self):
store = Store.objects.create(
name='test store',
original_opening=datetime.datetime.now(),
friday_night_closing=datetime.time(21, 00, 00),
)
names = [
'Theodore Roosevelt',
'Eleanor Roosevelt',
'Franklin Roosevelt',
'Ned Stark',
'Catelyn Stark',
]
for name in names:
Employee.objects.create(
store=store,
first_name=name.split()[0],
last_name=name.split()[1],
age=30, salary=2000,
)
people = Employee.objects.annotate(
name_lower=Lower('last_name'),
).distinct('name_lower')
self.assertEqual(set(p.last_name for p in people), {'Stark', 'Roosevelt'})
self.assertEqual(len(people), 2)
people2 = Employee.objects.annotate(
test_alias=F('store__name'),
).distinct('test_alias')
self.assertEqual(len(people2), 1)
def test_filter_annotation(self):
books = Book.objects.annotate(
is_book=Value(1, output_field=IntegerField())
).filter(is_book=1)
for book in books:
self.assertEqual(book.is_book, 1)
def test_filter_annotation_with_f(self):
books = Book.objects.annotate(
other_rating=F('rating')
).filter(other_rating=3.5)
for book in books:
self.assertEqual(book.other_rating, 3.5)
def test_filter_annotation_with_double_f(self):
books = Book.objects.annotate(
other_rating=F('rating')
).filter(other_rating=F('rating'))
for book in books:
self.assertEqual(book.other_rating, book.rating)
def test_filter_agg_with_double_f(self):
books = Book.objects.annotate(
sum_rating=Sum('rating')
).filter(sum_rating=F('sum_rating'))
for book in books:
self.assertEqual(book.sum_rating, book.rating)
def test_filter_wrong_annotation(self):
with six.assertRaisesRegex(self, FieldError, "Cannot resolve keyword .*"):
list(Book.objects.annotate(
sum_rating=Sum('rating')
).filter(sum_rating=F('nope')))
def test_combined_annotation_commutative(self):
book1 = Book.objects.annotate(adjusted_rating=F('rating') + 2).get(pk=self.b1.pk)
book2 = Book.objects.annotate(adjusted_rating=2 + F('rating')).get(pk=self.b1.pk)
self.assertEqual(book1.adjusted_rating, book2.adjusted_rating)
book1 = Book.objects.annotate(adjusted_rating=F('rating') + None).get(pk=self.b1.pk)
book2 = Book.objects.annotate(adjusted_rating=None + F('rating')).get(pk=self.b1.pk)
self.assertEqual(book1.adjusted_rating, book2.adjusted_rating)
def test_update_with_annotation(self):
book_preupdate = Book.objects.get(pk=self.b2.pk)
Book.objects.annotate(other_rating=F('rating') - 1).update(rating=F('other_rating'))
book_postupdate = Book.objects.get(pk=self.b2.pk)
self.assertEqual(book_preupdate.rating - 1, book_postupdate.rating)
def test_annotation_with_m2m(self):
books = Book.objects.annotate(author_age=F('authors__age')).filter(pk=self.b1.pk).order_by('author_age')
self.assertEqual(books[0].author_age, 34)
self.assertEqual(books[1].author_age, 35)
def test_annotation_reverse_m2m(self):
books = Book.objects.annotate(
store_name=F('store__name')).filter(
name='Practical Django Projects').order_by(
'store_name')
self.assertQuerysetEqual(
books, [
'Amazon.com',
'Books.com',
'Mamma and Pappa\'s Books'
],
lambda b: b.store_name
)
def test_values_annotation(self):
"""
Annotations can reference fields in a values clause,
and contribute to an existing values clause.
"""
# annotate references a field in values()
qs = Book.objects.values('rating').annotate(other_rating=F('rating') - 1)
book = qs.get(pk=self.b1.pk)
self.assertEqual(book['rating'] - 1, book['other_rating'])
# filter refs the annotated value
book = qs.get(other_rating=4)
self.assertEqual(book['other_rating'], 4)
# can annotate an existing values with a new field
book = qs.annotate(other_isbn=F('isbn')).get(other_rating=4)
self.assertEqual(book['other_rating'], 4)
self.assertEqual(book['other_isbn'], '155860191')
def test_defer_annotation(self):
"""
Deferred attributes can be referenced by an annotation,
but they are not themselves deferred, and cannot be deferred.
"""
qs = Book.objects.defer('rating').annotate(other_rating=F('rating') - 1)
with self.assertNumQueries(2):
book = qs.get(other_rating=4)
self.assertEqual(book.rating, 5)
self.assertEqual(book.other_rating, 4)
with six.assertRaisesRegex(self, FieldDoesNotExist, "\w has no field named u?'other_rating'"):
book = qs.defer('other_rating').get(other_rating=4)
def test_mti_annotations(self):
"""
Fields on an inherited model can be referenced by an
annotated field.
"""
d = DepartmentStore.objects.create(
name='Angus & Robinson',
original_opening=datetime.date(2014, 3, 8),
friday_night_closing=datetime.time(21, 00, 00),
chain='Westfield'
)
books = Book.objects.filter(rating__gt=4)
for b in books:
d.books.add(b)
qs = DepartmentStore.objects.annotate(
other_name=F('name'),
other_chain=F('chain'),
is_open=Value(True, BooleanField()),
book_isbn=F('books__isbn')
).order_by('book_isbn').filter(chain='Westfield')
self.assertQuerysetEqual(
qs, [
('Angus & Robinson', 'Westfield', True, '155860191'),
('Angus & Robinson', 'Westfield', True, '159059725')
],
lambda d: (d.other_name, d.other_chain, d.is_open, d.book_isbn)
)
def test_null_annotation(self):
"""
Test that annotating None onto a model round-trips
"""
book = Book.objects.annotate(no_value=Value(None, output_field=IntegerField())).first()
self.assertIsNone(book.no_value)
def test_order_by_annotation(self):
authors = Author.objects.annotate(other_age=F('age')).order_by('other_age')
self.assertQuerysetEqual(
authors, [
25, 29, 29, 34, 35, 37, 45, 46, 57,
],
lambda a: a.other_age
)
def test_order_by_aggregate(self):
authors = Author.objects.values('age').annotate(age_count=Count('age')).order_by('age_count', 'age')
self.assertQuerysetEqual(
authors, [
(25, 1), (34, 1), (35, 1), (37, 1), (45, 1), (46, 1), (57, 1), (29, 2),
],
lambda a: (a['age'], a['age_count'])
)
def test_annotate_exists(self):
authors = Author.objects.annotate(c=Count('id')).filter(c__gt=1)
self.assertFalse(authors.exists())
def test_column_field_ordering(self):
"""
Test that columns are aligned in the correct order for
resolve_columns. This test will fail on mysql if column
ordering is out. Column fields should be aligned as:
1. extra_select
2. model_fields
3. annotation_fields
4. model_related_fields
"""
store = Store.objects.first()
Employee.objects.create(id=1, first_name='Max', manager=True, last_name='Paine',
store=store, age=23, salary=Decimal(50000.00))
Employee.objects.create(id=2, first_name='Buffy', manager=False, last_name='Summers',
store=store, age=18, salary=Decimal(40000.00))
qs = Employee.objects.extra(
select={'random_value': '42'}
).select_related('store').annotate(
annotated_value=Value(17, output_field=IntegerField())
)
rows = [
(1, 'Max', True, 42, 'Paine', 23, Decimal(50000.00), store.name, 17),
(2, 'Buffy', False, 42, 'Summers', 18, Decimal(40000.00), store.name, 17)
]
self.assertQuerysetEqual(
qs.order_by('id'), rows,
lambda e: (
e.id, e.first_name, e.manager, e.random_value, e.last_name, e.age,
e.salary, e.store.name, e.annotated_value))
def test_column_field_ordering_with_deferred(self):
store = Store.objects.first()
Employee.objects.create(id=1, first_name='Max', manager=True, last_name='Paine',
store=store, age=23, salary=Decimal(50000.00))
Employee.objects.create(id=2, first_name='Buffy', manager=False, last_name='Summers',
store=store, age=18, salary=Decimal(40000.00))
qs = Employee.objects.extra(
select={'random_value': '42'}
).select_related('store').annotate(
annotated_value=Value(17, output_field=IntegerField())
)
rows = [
(1, 'Max', True, 42, 'Paine', 23, Decimal(50000.00), store.name, 17),
(2, 'Buffy', False, 42, 'Summers', 18, Decimal(40000.00), store.name, 17)
]
# and we respect deferred columns!
self.assertQuerysetEqual(
qs.defer('age').order_by('id'), rows,
lambda e: (
e.id, e.first_name, e.manager, e.random_value, e.last_name, e.age,
e.salary, e.store.name, e.annotated_value))
@cxOracle_py3_bug
def test_custom_functions(self):
Company(name='Apple', motto=None, ticker_name='APPL', description='Beautiful Devices').save()
Company(name='Django Software Foundation', motto=None, ticker_name=None, description=None).save()
Company(name='Google', motto='Do No Evil', ticker_name='GOOG', description='Internet Company').save()
Company(name='Yahoo', motto=None, ticker_name=None, description='Internet Company').save()
qs = Company.objects.annotate(
tagline=Func(
F('motto'),
F('ticker_name'),
F('description'),
Value('No Tag'),
function='COALESCE'
)
).order_by('name')
self.assertQuerysetEqual(
qs, [
('Apple', 'APPL'),
('Django Software Foundation', 'No Tag'),
('Google', 'Do No Evil'),
('Yahoo', 'Internet Company')
],
lambda c: (c.name, c.tagline)
)
@cxOracle_py3_bug
def test_custom_functions_can_ref_other_functions(self):
Company(name='Apple', motto=None, ticker_name='APPL', description='Beautiful Devices').save()
Company(name='Django Software Foundation', motto=None, ticker_name=None, description=None).save()
Company(name='Google', motto='Do No Evil', ticker_name='GOOG', description='Internet Company').save()
Company(name='Yahoo', motto=None, ticker_name=None, description='Internet Company').save()
class Lower(Func):
function = 'LOWER'
qs = Company.objects.annotate(
tagline=Func(
F('motto'),
F('ticker_name'),
F('description'),
Value('No Tag'),
function='COALESCE')
).annotate(
tagline_lower=Lower(F('tagline'), output_field=CharField())
).order_by('name')
# LOWER function supported by:
# oracle, postgres, mysql, sqlite, sqlserver
self.assertQuerysetEqual(
qs, [
('Apple', 'APPL'.lower()),
('Django Software Foundation', 'No Tag'.lower()),
('Google', 'Do No Evil'.lower()),
('Yahoo', 'Internet Company'.lower())
],
lambda c: (c.name, c.tagline_lower)
)
|
bsd-3-clause
|
mancoast/CPythonPyc_test
|
cpython/273_test_complex_args.py
|
136
|
3507
|
import unittest
from test import test_support
import textwrap
class ComplexArgsTestCase(unittest.TestCase):
def check(self, func, expected, *args):
self.assertEqual(func(*args), expected)
# These functions are tested below as lambdas too. If you add a
# function test, also add a similar lambda test.
# Functions are wrapped in "exec" statements in order to
# silence Py3k warnings.
def test_func_parens_no_unpacking(self):
exec textwrap.dedent("""
def f(((((x))))): return x
self.check(f, 1, 1)
# Inner parens are elided, same as: f(x,)
def f(((x)),): return x
self.check(f, 2, 2)
""")
def test_func_1(self):
exec textwrap.dedent("""
def f(((((x),)))): return x
self.check(f, 3, (3,))
def f(((((x)),))): return x
self.check(f, 4, (4,))
def f(((((x))),)): return x
self.check(f, 5, (5,))
def f(((x),)): return x
self.check(f, 6, (6,))
""")
def test_func_2(self):
exec textwrap.dedent("""
def f(((((x)),),)): return x
self.check(f, 2, ((2,),))
""")
def test_func_3(self):
exec textwrap.dedent("""
def f((((((x)),),),)): return x
self.check(f, 3, (((3,),),))
""")
def test_func_complex(self):
exec textwrap.dedent("""
def f((((((x)),),),), a, b, c): return x, a, b, c
self.check(f, (3, 9, 8, 7), (((3,),),), 9, 8, 7)
def f(((((((x)),)),),), a, b, c): return x, a, b, c
self.check(f, (3, 9, 8, 7), (((3,),),), 9, 8, 7)
def f(a, b, c, ((((((x)),)),),)): return a, b, c, x
self.check(f, (9, 8, 7, 3), 9, 8, 7, (((3,),),))
""")
# Duplicate the tests above, but for lambda. If you add a lambda test,
# also add a similar function test above.
def test_lambda_parens_no_unpacking(self):
exec textwrap.dedent("""
f = lambda (((((x))))): x
self.check(f, 1, 1)
# Inner parens are elided, same as: f(x,)
f = lambda ((x)),: x
self.check(f, 2, 2)
""")
def test_lambda_1(self):
exec textwrap.dedent("""
f = lambda (((((x),)))): x
self.check(f, 3, (3,))
f = lambda (((((x)),))): x
self.check(f, 4, (4,))
f = lambda (((((x))),)): x
self.check(f, 5, (5,))
f = lambda (((x),)): x
self.check(f, 6, (6,))
""")
def test_lambda_2(self):
exec textwrap.dedent("""
f = lambda (((((x)),),)): x
self.check(f, 2, ((2,),))
""")
def test_lambda_3(self):
exec textwrap.dedent("""
f = lambda ((((((x)),),),)): x
self.check(f, 3, (((3,),),))
""")
def test_lambda_complex(self):
exec textwrap.dedent("""
f = lambda (((((x)),),),), a, b, c: (x, a, b, c)
self.check(f, (3, 9, 8, 7), (((3,),),), 9, 8, 7)
f = lambda ((((((x)),)),),), a, b, c: (x, a, b, c)
self.check(f, (3, 9, 8, 7), (((3,),),), 9, 8, 7)
f = lambda a, b, c, ((((((x)),)),),): (a, b, c, x)
self.check(f, (9, 8, 7, 3), 9, 8, 7, (((3,),),))
""")
def test_main():
with test_support.check_py3k_warnings(
("tuple parameter unpacking has been removed", SyntaxWarning),
("parenthesized argument names are invalid", SyntaxWarning)):
test_support.run_unittest(ComplexArgsTestCase)
if __name__ == "__main__":
test_main()
|
gpl-3.0
|
russellfeeed/exmp-eet
|
.demeteorized/programs/ctl/node_modules/npm/node_modules/node-gyp/gyp/pylib/gyp/__init__.py
|
81
|
21295
|
#!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import copy
import gyp.input
import optparse
import os.path
import re
import shlex
import sys
import traceback
from gyp.common import GypError
# Default debug modes for GYP
debug = {}
# List of "official" debug modes, but you can use anything you like.
DEBUG_GENERAL = 'general'
DEBUG_VARIABLES = 'variables'
DEBUG_INCLUDES = 'includes'
def DebugOutput(mode, message, *args):
if 'all' in gyp.debug or mode in gyp.debug:
ctx = ('unknown', 0, 'unknown')
try:
f = traceback.extract_stack(limit=2)
if f:
ctx = f[0][:3]
except:
pass
if args:
message %= args
print '%s:%s:%d:%s %s' % (mode.upper(), os.path.basename(ctx[0]),
ctx[1], ctx[2], message)
def FindBuildFiles():
extension = '.gyp'
files = os.listdir(os.getcwd())
build_files = []
for file in files:
if file.endswith(extension):
build_files.append(file)
return build_files
def Load(build_files, format, default_variables={},
includes=[], depth='.', params=None, check=False,
circular_check=True):
"""
Loads one or more specified build files.
default_variables and includes will be copied before use.
Returns the generator for the specified format and the
data returned by loading the specified build files.
"""
if params is None:
params = {}
flavor = None
if '-' in format:
format, params['flavor'] = format.split('-', 1)
default_variables = copy.copy(default_variables)
# Default variables provided by this program and its modules should be
# named WITH_CAPITAL_LETTERS to provide a distinct "best practice" namespace,
# avoiding collisions with user and automatic variables.
default_variables['GENERATOR'] = format
# Format can be a custom python file, or by default the name of a module
# within gyp.generator.
if format.endswith('.py'):
generator_name = os.path.splitext(format)[0]
path, generator_name = os.path.split(generator_name)
# Make sure the path to the custom generator is in sys.path
# Don't worry about removing it once we are done. Keeping the path
# to each generator that is used in sys.path is likely harmless and
# arguably a good idea.
path = os.path.abspath(path)
if path not in sys.path:
sys.path.insert(0, path)
else:
generator_name = 'gyp.generator.' + format
# These parameters are passed in order (as opposed to by key)
# because ActivePython cannot handle key parameters to __import__.
generator = __import__(generator_name, globals(), locals(), generator_name)
for (key, val) in generator.generator_default_variables.items():
default_variables.setdefault(key, val)
# Give the generator the opportunity to set additional variables based on
# the params it will receive in the output phase.
if getattr(generator, 'CalculateVariables', None):
generator.CalculateVariables(default_variables, params)
# Give the generator the opportunity to set generator_input_info based on
# the params it will receive in the output phase.
if getattr(generator, 'CalculateGeneratorInputInfo', None):
generator.CalculateGeneratorInputInfo(params)
# Fetch the generator specific info that gets fed to input, we use getattr
# so we can default things and the generators only have to provide what
# they need.
generator_input_info = {
'generator_wants_absolute_build_file_paths':
getattr(generator, 'generator_wants_absolute_build_file_paths', False),
'generator_handles_variants':
getattr(generator, 'generator_handles_variants', False),
'non_configuration_keys':
getattr(generator, 'generator_additional_non_configuration_keys', []),
'path_sections':
getattr(generator, 'generator_additional_path_sections', []),
'extra_sources_for_rules':
getattr(generator, 'generator_extra_sources_for_rules', []),
'generator_supports_multiple_toolsets':
getattr(generator, 'generator_supports_multiple_toolsets', False),
'generator_wants_static_library_dependencies_adjusted':
getattr(generator,
'generator_wants_static_library_dependencies_adjusted', True),
'generator_wants_sorted_dependencies':
getattr(generator, 'generator_wants_sorted_dependencies', False),
}
# Process the input specific to this generator.
result = gyp.input.Load(build_files, default_variables, includes[:],
depth, generator_input_info, check, circular_check,
params['parallel'])
return [generator] + result
def NameValueListToDict(name_value_list):
"""
Takes an array of strings of the form 'NAME=VALUE' and creates a dictionary
of the pairs. If a string is simply NAME, then the value in the dictionary
is set to True. If VALUE can be converted to an integer, it is.
"""
result = { }
for item in name_value_list:
tokens = item.split('=', 1)
if len(tokens) == 2:
# If we can make it an int, use that, otherwise, use the string.
try:
token_value = int(tokens[1])
except ValueError:
token_value = tokens[1]
# Set the variable to the supplied value.
result[tokens[0]] = token_value
else:
# No value supplied, treat it as a boolean and set it.
result[tokens[0]] = True
return result
def ShlexEnv(env_name):
flags = os.environ.get(env_name, [])
if flags:
flags = shlex.split(flags)
return flags
def FormatOpt(opt, value):
if opt.startswith('--'):
return '%s=%s' % (opt, value)
return opt + value
def RegenerateAppendFlag(flag, values, predicate, env_name, options):
"""Regenerate a list of command line flags, for an option of action='append'.
The |env_name|, if given, is checked in the environment and used to generate
an initial list of options, then the options that were specified on the
command line (given in |values|) are appended. This matches the handling of
environment variables and command line flags where command line flags override
the environment, while not requiring the environment to be set when the flags
are used again.
"""
flags = []
if options.use_environment and env_name:
for flag_value in ShlexEnv(env_name):
value = FormatOpt(flag, predicate(flag_value))
if value in flags:
flags.remove(value)
flags.append(value)
if values:
for flag_value in values:
flags.append(FormatOpt(flag, predicate(flag_value)))
return flags
def RegenerateFlags(options):
"""Given a parsed options object, and taking the environment variables into
account, returns a list of flags that should regenerate an equivalent options
object (even in the absence of the environment variables.)
Any path options will be normalized relative to depth.
The format flag is not included, as it is assumed the calling generator will
set that as appropriate.
"""
def FixPath(path):
path = gyp.common.FixIfRelativePath(path, options.depth)
if not path:
return os.path.curdir
return path
def Noop(value):
return value
# We always want to ignore the environment when regenerating, to avoid
# duplicate or changed flags in the environment at the time of regeneration.
flags = ['--ignore-environment']
for name, metadata in options._regeneration_metadata.iteritems():
opt = metadata['opt']
value = getattr(options, name)
value_predicate = metadata['type'] == 'path' and FixPath or Noop
action = metadata['action']
env_name = metadata['env_name']
if action == 'append':
flags.extend(RegenerateAppendFlag(opt, value, value_predicate,
env_name, options))
elif action in ('store', None): # None is a synonym for 'store'.
if value:
flags.append(FormatOpt(opt, value_predicate(value)))
elif options.use_environment and env_name and os.environ.get(env_name):
flags.append(FormatOpt(opt, value_predicate(os.environ.get(env_name))))
elif action in ('store_true', 'store_false'):
if ((action == 'store_true' and value) or
(action == 'store_false' and not value)):
flags.append(opt)
elif options.use_environment and env_name:
print >>sys.stderr, ('Warning: environment regeneration unimplemented '
'for %s flag %r env_name %r' % (action, opt,
env_name))
else:
print >>sys.stderr, ('Warning: regeneration unimplemented for action %r '
'flag %r' % (action, opt))
return flags
class RegeneratableOptionParser(optparse.OptionParser):
def __init__(self):
self.__regeneratable_options = {}
optparse.OptionParser.__init__(self)
def add_option(self, *args, **kw):
"""Add an option to the parser.
This accepts the same arguments as OptionParser.add_option, plus the
following:
regenerate: can be set to False to prevent this option from being included
in regeneration.
env_name: name of environment variable that additional values for this
option come from.
type: adds type='path', to tell the regenerator that the values of
this option need to be made relative to options.depth
"""
env_name = kw.pop('env_name', None)
if 'dest' in kw and kw.pop('regenerate', True):
dest = kw['dest']
# The path type is needed for regenerating, for optparse we can just treat
# it as a string.
type = kw.get('type')
if type == 'path':
kw['type'] = 'string'
self.__regeneratable_options[dest] = {
'action': kw.get('action'),
'type': type,
'env_name': env_name,
'opt': args[0],
}
optparse.OptionParser.add_option(self, *args, **kw)
def parse_args(self, *args):
values, args = optparse.OptionParser.parse_args(self, *args)
values._regeneration_metadata = self.__regeneratable_options
return values, args
def gyp_main(args):
my_name = os.path.basename(sys.argv[0])
parser = RegeneratableOptionParser()
usage = 'usage: %s [options ...] [build_file ...]'
parser.set_usage(usage.replace('%s', '%prog'))
parser.add_option('-D', dest='defines', action='append', metavar='VAR=VAL',
env_name='GYP_DEFINES',
help='sets variable VAR to value VAL')
parser.add_option('-f', '--format', dest='formats', action='append',
env_name='GYP_GENERATORS', regenerate=False,
help='output formats to generate')
parser.add_option('--msvs-version', dest='msvs_version',
regenerate=False,
help='Deprecated; use -G msvs_version=MSVS_VERSION instead')
parser.add_option('-I', '--include', dest='includes', action='append',
metavar='INCLUDE', type='path',
help='files to include in all loaded .gyp files')
parser.add_option('--depth', dest='depth', metavar='PATH', type='path',
help='set DEPTH gyp variable to a relative path to PATH')
parser.add_option('-d', '--debug', dest='debug', metavar='DEBUGMODE',
action='append', default=[], help='turn on a debugging '
'mode for debugging GYP. Supported modes are "variables", '
'"includes" and "general" or "all" for all of them.')
parser.add_option('-S', '--suffix', dest='suffix', default='',
help='suffix to add to generated files')
parser.add_option('-G', dest='generator_flags', action='append', default=[],
metavar='FLAG=VAL', env_name='GYP_GENERATOR_FLAGS',
help='sets generator flag FLAG to VAL')
parser.add_option('--generator-output', dest='generator_output',
action='store', default=None, metavar='DIR', type='path',
env_name='GYP_GENERATOR_OUTPUT',
help='puts generated build files under DIR')
parser.add_option('--ignore-environment', dest='use_environment',
action='store_false', default=True, regenerate=False,
help='do not read options from environment variables')
parser.add_option('--check', dest='check', action='store_true',
help='check format of gyp files')
parser.add_option('--parallel', action='store_true',
env_name='GYP_PARALLEL',
help='Use multiprocessing for speed (experimental)')
parser.add_option('--toplevel-dir', dest='toplevel_dir', action='store',
default=None, metavar='DIR', type='path',
help='directory to use as the root of the source tree')
parser.add_option('--build', dest='configs', action='append',
help='configuration for build after project generation')
# --no-circular-check disables the check for circular relationships between
# .gyp files. These relationships should not exist, but they've only been
# observed to be harmful with the Xcode generator. Chromium's .gyp files
# currently have some circular relationships on non-Mac platforms, so this
# option allows the strict behavior to be used on Macs and the lenient
# behavior to be used elsewhere.
# TODO(mark): Remove this option when http://crbug.com/35878 is fixed.
parser.add_option('--no-circular-check', dest='circular_check',
action='store_false', default=True, regenerate=False,
help="don't check for circular relationships between files")
# We read a few things from ~/.gyp, so set up a var for that.
home_vars = ['HOME']
if sys.platform in ('cygwin', 'win32'):
home_vars.append('USERPROFILE')
home = None
home_dot_gyp = None
for home_var in home_vars:
home = os.getenv(home_var)
if home != None:
home_dot_gyp = os.path.join(home, '.gyp')
if not os.path.exists(home_dot_gyp):
home_dot_gyp = None
else:
break
# TODO(thomasvl): add support for ~/.gyp/defaults
options, build_files_arg = parser.parse_args(args)
build_files = build_files_arg
if not options.formats:
# If no format was given on the command line, then check the env variable.
generate_formats = []
if options.use_environment:
generate_formats = os.environ.get('GYP_GENERATORS', [])
if generate_formats:
generate_formats = re.split('[\s,]', generate_formats)
if generate_formats:
options.formats = generate_formats
else:
# Nothing in the variable, default based on platform.
if sys.platform == 'darwin':
options.formats = ['xcode']
elif sys.platform in ('win32', 'cygwin'):
options.formats = ['msvs']
else:
options.formats = ['make']
if not options.generator_output and options.use_environment:
g_o = os.environ.get('GYP_GENERATOR_OUTPUT')
if g_o:
options.generator_output = g_o
if not options.parallel and options.use_environment:
p = os.environ.get('GYP_PARALLEL')
options.parallel = bool(p and p != '0')
for mode in options.debug:
gyp.debug[mode] = 1
# Do an extra check to avoid work when we're not debugging.
if DEBUG_GENERAL in gyp.debug:
DebugOutput(DEBUG_GENERAL, 'running with these options:')
for option, value in sorted(options.__dict__.items()):
if option[0] == '_':
continue
if isinstance(value, basestring):
DebugOutput(DEBUG_GENERAL, " %s: '%s'", option, value)
else:
DebugOutput(DEBUG_GENERAL, " %s: %s", option, value)
if not build_files:
build_files = FindBuildFiles()
if not build_files:
raise GypError((usage + '\n\n%s: error: no build_file') %
(my_name, my_name))
# TODO(mark): Chromium-specific hack!
# For Chromium, the gyp "depth" variable should always be a relative path
# to Chromium's top-level "src" directory. If no depth variable was set
# on the command line, try to find a "src" directory by looking at the
# absolute path to each build file's directory. The first "src" component
# found will be treated as though it were the path used for --depth.
if not options.depth:
for build_file in build_files:
build_file_dir = os.path.abspath(os.path.dirname(build_file))
build_file_dir_components = build_file_dir.split(os.path.sep)
components_len = len(build_file_dir_components)
for index in xrange(components_len - 1, -1, -1):
if build_file_dir_components[index] == 'src':
options.depth = os.path.sep.join(build_file_dir_components)
break
del build_file_dir_components[index]
# If the inner loop found something, break without advancing to another
# build file.
if options.depth:
break
if not options.depth:
raise GypError('Could not automatically locate src directory. This is'
'a temporary Chromium feature that will be removed. Use'
'--depth as a workaround.')
# If toplevel-dir is not set, we assume that depth is the root of our source
# tree.
if not options.toplevel_dir:
options.toplevel_dir = options.depth
# -D on the command line sets variable defaults - D isn't just for define,
# it's for default. Perhaps there should be a way to force (-F?) a
# variable's value so that it can't be overridden by anything else.
cmdline_default_variables = {}
defines = []
if options.use_environment:
defines += ShlexEnv('GYP_DEFINES')
if options.defines:
defines += options.defines
cmdline_default_variables = NameValueListToDict(defines)
if DEBUG_GENERAL in gyp.debug:
DebugOutput(DEBUG_GENERAL,
"cmdline_default_variables: %s", cmdline_default_variables)
# Set up includes.
includes = []
# If ~/.gyp/include.gypi exists, it'll be forcibly included into every
# .gyp file that's loaded, before anything else is included.
if home_dot_gyp != None:
default_include = os.path.join(home_dot_gyp, 'include.gypi')
if os.path.exists(default_include):
print 'Using overrides found in ' + default_include
includes.append(default_include)
# Command-line --include files come after the default include.
if options.includes:
includes.extend(options.includes)
# Generator flags should be prefixed with the target generator since they
# are global across all generator runs.
gen_flags = []
if options.use_environment:
gen_flags += ShlexEnv('GYP_GENERATOR_FLAGS')
if options.generator_flags:
gen_flags += options.generator_flags
generator_flags = NameValueListToDict(gen_flags)
if DEBUG_GENERAL in gyp.debug.keys():
DebugOutput(DEBUG_GENERAL, "generator_flags: %s", generator_flags)
# TODO: Remove this and the option after we've gotten folks to move to the
# generator flag.
if options.msvs_version:
print >>sys.stderr, \
'DEPRECATED: Use generator flag (-G msvs_version=' + \
options.msvs_version + ') instead of --msvs-version=' + \
options.msvs_version
generator_flags['msvs_version'] = options.msvs_version
# Generate all requested formats (use a set in case we got one format request
# twice)
for format in set(options.formats):
params = {'options': options,
'build_files': build_files,
'generator_flags': generator_flags,
'cwd': os.getcwd(),
'build_files_arg': build_files_arg,
'gyp_binary': sys.argv[0],
'home_dot_gyp': home_dot_gyp,
'parallel': options.parallel}
# Start with the default variables from the command line.
[generator, flat_list, targets, data] = Load(build_files, format,
cmdline_default_variables,
includes, options.depth,
params, options.check,
options.circular_check)
# TODO(mark): Pass |data| for now because the generator needs a list of
# build files that came in. In the future, maybe it should just accept
# a list, and not the whole data dict.
# NOTE: flat_list is the flattened dependency graph specifying the order
# that targets may be built. Build systems that operate serially or that
# need to have dependencies defined before dependents reference them should
# generate targets in the order specified in flat_list.
generator.GenerateOutput(flat_list, targets, data, params)
if options.configs:
valid_configs = targets[flat_list[0]]['configurations'].keys()
for conf in options.configs:
if conf not in valid_configs:
raise GypError('Invalid config specified via --build: %s' % conf)
generator.PerformBuild(data, options.configs, params)
# Done
return 0
def main(args):
try:
return gyp_main(args)
except GypError, e:
sys.stderr.write("gyp: %s\n" % e)
return 1
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
|
mit
|
Shaky156/TFP-Kernel-2.6.39
|
tools/perf/scripts/python/check-perf-trace.py
|
11214
|
2503
|
# perf script event handlers, generated by perf script -g python
# (c) 2010, Tom Zanussi <[email protected]>
# Licensed under the terms of the GNU GPL License version 2
#
# This script tests basic functionality such as flag and symbol
# strings, common_xxx() calls back into perf, begin, end, unhandled
# events, etc. Basically, if this script runs successfully and
# displays expected results, Python scripting support should be ok.
import os
import sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from Core import *
from perf_trace_context import *
unhandled = autodict()
def trace_begin():
print "trace_begin"
pass
def trace_end():
print_unhandled()
def irq__softirq_entry(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
vec):
print_header(event_name, common_cpu, common_secs, common_nsecs,
common_pid, common_comm)
print_uncommon(context)
print "vec=%s\n" % \
(symbol_str("irq__softirq_entry", "vec", vec)),
def kmem__kmalloc(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
call_site, ptr, bytes_req, bytes_alloc,
gfp_flags):
print_header(event_name, common_cpu, common_secs, common_nsecs,
common_pid, common_comm)
print_uncommon(context)
print "call_site=%u, ptr=%u, bytes_req=%u, " \
"bytes_alloc=%u, gfp_flags=%s\n" % \
(call_site, ptr, bytes_req, bytes_alloc,
flag_str("kmem__kmalloc", "gfp_flags", gfp_flags)),
def trace_unhandled(event_name, context, event_fields_dict):
try:
unhandled[event_name] += 1
except TypeError:
unhandled[event_name] = 1
def print_header(event_name, cpu, secs, nsecs, pid, comm):
print "%-20s %5u %05u.%09u %8u %-20s " % \
(event_name, cpu, secs, nsecs, pid, comm),
# print trace fields not included in handler args
def print_uncommon(context):
print "common_preempt_count=%d, common_flags=%s, common_lock_depth=%d, " \
% (common_pc(context), trace_flag_str(common_flags(context)), \
common_lock_depth(context))
def print_unhandled():
keys = unhandled.keys()
if not keys:
return
print "\nunhandled events:\n\n",
print "%-40s %10s\n" % ("event", "count"),
print "%-40s %10s\n" % ("----------------------------------------", \
"-----------"),
for event_name in keys:
print "%-40s %10d\n" % (event_name, unhandled[event_name])
|
gpl-2.0
|
JimCircadian/ansible
|
test/units/modules/network/f5/test_bigip_device_connectivity.py
|
22
|
12724
|
# -*- coding: utf-8 -*-
#
# Copyright (c) 2017 F5 Networks Inc.
# GNU General Public License v3.0 (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import json
import pytest
import sys
from nose.plugins.skip import SkipTest
if sys.version_info < (2, 7):
raise SkipTest("F5 Ansible modules require Python >= 2.7")
from ansible.compat.tests import unittest
from ansible.compat.tests.mock import Mock
from ansible.compat.tests.mock import patch
from ansible.module_utils.basic import AnsibleModule
try:
from library.modules.bigip_device_connectivity import ApiParameters
from library.modules.bigip_device_connectivity import ModuleParameters
from library.modules.bigip_device_connectivity import ModuleManager
from library.modules.bigip_device_connectivity import ArgumentSpec
from library.module_utils.network.f5.common import F5ModuleError
from library.module_utils.network.f5.common import iControlUnexpectedHTTPError
from test.unit.modules.utils import set_module_args
except ImportError:
try:
from ansible.modules.network.f5.bigip_device_connectivity import ApiParameters
from ansible.modules.network.f5.bigip_device_connectivity import ModuleParameters
from ansible.modules.network.f5.bigip_device_connectivity import ModuleManager
from ansible.modules.network.f5.bigip_device_connectivity import ArgumentSpec
from ansible.module_utils.network.f5.common import F5ModuleError
from ansible.module_utils.network.f5.common import iControlUnexpectedHTTPError
from units.modules.utils import set_module_args
except ImportError:
raise SkipTest("F5 Ansible modules require the f5-sdk Python library")
fixture_path = os.path.join(os.path.dirname(__file__), 'fixtures')
fixture_data = {}
def load_fixture(name):
path = os.path.join(fixture_path, name)
if path in fixture_data:
return fixture_data[path]
with open(path) as f:
data = f.read()
try:
data = json.loads(data)
except Exception:
pass
fixture_data[path] = data
return data
class TestParameters(unittest.TestCase):
def test_module_parameters(self):
args = dict(
multicast_port='1010',
multicast_address='10.10.10.10',
multicast_interface='eth0',
failover_multicast=True,
unicast_failover=[
dict(
address='20.20.20.20',
port='1234'
)
],
mirror_primary_address='1.2.3.4',
mirror_secondary_address='5.6.7.8',
config_sync_ip='4.3.2.1',
state='present',
server='localhost',
user='admin',
password='password'
)
p = ModuleParameters(params=args)
assert p.multicast_port == 1010
assert p.multicast_address == '10.10.10.10'
assert p.multicast_interface == 'eth0'
assert p.failover_multicast is True
assert p.mirror_primary_address == '1.2.3.4'
assert p.mirror_secondary_address == '5.6.7.8'
assert p.config_sync_ip == '4.3.2.1'
assert len(p.unicast_failover) == 1
assert 'effectiveIp' in p.unicast_failover[0]
assert 'effectivePort' in p.unicast_failover[0]
assert 'port' in p.unicast_failover[0]
assert 'ip' in p.unicast_failover[0]
assert p.unicast_failover[0]['effectiveIp'] == '20.20.20.20'
assert p.unicast_failover[0]['ip'] == '20.20.20.20'
assert p.unicast_failover[0]['port'] == 1234
assert p.unicast_failover[0]['effectivePort'] == 1234
def test_api_parameters(self):
params = load_fixture('load_tm_cm_device.json')
p = ApiParameters(params=params)
assert p.multicast_port == 62960
assert p.multicast_address == '224.0.0.245'
assert p.multicast_interface == 'eth0'
assert p.mirror_primary_address == '10.2.2.2'
assert p.mirror_secondary_address == '10.2.3.2'
assert p.config_sync_ip == '10.2.2.2'
assert len(p.unicast_failover) == 2
assert 'effectiveIp' in p.unicast_failover[0]
assert 'effectivePort' in p.unicast_failover[0]
assert 'port' in p.unicast_failover[0]
assert 'ip' in p.unicast_failover[0]
assert p.unicast_failover[0]['effectiveIp'] == 'management-ip'
assert p.unicast_failover[0]['ip'] == 'management-ip'
assert p.unicast_failover[0]['port'] == 1026
assert p.unicast_failover[0]['effectivePort'] == 1026
class TestManager(unittest.TestCase):
def setUp(self):
self.spec = ArgumentSpec()
def test_update_settings(self, *args):
set_module_args(dict(
config_sync_ip="10.1.30.1",
mirror_primary_address="10.1.30.1",
unicast_failover=[
dict(
address="10.1.30.1"
)
],
server='localhost',
user='admin',
password='password'
))
# Configure the parameters that would be returned by querying the
# remote device
current = ApiParameters(params=load_fixture('load_tm_cm_device_default.json'))
module = AnsibleModule(
argument_spec=self.spec.argument_spec,
supports_check_mode=self.spec.supports_check_mode
)
mm = ModuleManager(module=module)
# Override methods to force specific logic in the module to happen
mm.update_on_device = Mock(return_value=True)
mm.read_current_from_device = Mock(return_value=current)
results = mm.exec_module()
assert results['changed'] is True
assert results['config_sync_ip'] == '10.1.30.1'
assert results['mirror_primary_address'] == '10.1.30.1'
assert len(results.keys()) == 4
def test_set_primary_mirror_address_none(self, *args):
set_module_args(dict(
mirror_primary_address="none",
server='localhost',
user='admin',
password='password'
))
# Configure the parameters that would be returned by querying the
# remote device
current = ApiParameters(params=load_fixture('load_tm_cm_device.json'))
module = AnsibleModule(
argument_spec=self.spec.argument_spec,
supports_check_mode=self.spec.supports_check_mode
)
mm = ModuleManager(module=module)
# Override methods to force specific logic in the module to happen
mm.update_on_device = Mock(return_value=True)
mm.read_current_from_device = Mock(return_value=current)
results = mm.exec_module()
assert results['changed'] is True
assert results['mirror_primary_address'] == 'none'
assert len(results.keys()) == 2
def test_set_secondary_mirror_address_none(self, *args):
set_module_args(dict(
mirror_secondary_address="none",
server='localhost',
user='admin',
password='password'
))
# Configure the parameters that would be returned by querying the
# remote device
current = ApiParameters(params=load_fixture('load_tm_cm_device.json'))
module = AnsibleModule(
argument_spec=self.spec.argument_spec,
supports_check_mode=self.spec.supports_check_mode
)
mm = ModuleManager(module=module)
# Override methods to force specific logic in the module to happen
mm.update_on_device = Mock(return_value=True)
mm.read_current_from_device = Mock(return_value=current)
results = mm.exec_module()
assert results['changed'] is True
assert results['mirror_secondary_address'] == 'none'
assert len(results.keys()) == 2
def test_set_multicast_address_none(self, *args):
set_module_args(dict(
multicast_address="none",
server='localhost',
user='admin',
password='password'
))
# Configure the parameters that would be returned by querying the
# remote device
current = ApiParameters(params=load_fixture('load_tm_cm_device.json'))
module = AnsibleModule(
argument_spec=self.spec.argument_spec,
supports_check_mode=self.spec.supports_check_mode
)
mm = ModuleManager(module=module)
# Override methods to force specific logic in the module to happen
mm.update_on_device = Mock(return_value=True)
mm.read_current_from_device = Mock(return_value=current)
results = mm.exec_module()
assert results['changed'] is True
assert results['multicast_address'] == 'none'
assert len(results.keys()) == 2
def test_set_multicast_port_negative(self, *args):
set_module_args(dict(
multicast_port=-1,
server='localhost',
user='admin',
password='password'
))
# Configure the parameters that would be returned by querying the
# remote device
current = ApiParameters(params=load_fixture('load_tm_cm_device.json'))
module = AnsibleModule(
argument_spec=self.spec.argument_spec,
supports_check_mode=self.spec.supports_check_mode
)
mm = ModuleManager(module=module)
# Override methods to force specific logic in the module to happen
mm.update_on_device = Mock(return_value=True)
mm.read_current_from_device = Mock(return_value=current)
with pytest.raises(F5ModuleError) as ex:
mm.exec_module()
assert 'must be between' in str(ex)
def test_set_multicast_address(self, *args):
set_module_args(dict(
multicast_address="10.1.1.1",
server='localhost',
user='admin',
password='password'
))
# Configure the parameters that would be returned by querying the
# remote device
current = ApiParameters(params=load_fixture('load_tm_cm_device.json'))
module = AnsibleModule(
argument_spec=self.spec.argument_spec,
supports_check_mode=self.spec.supports_check_mode
)
mm = ModuleManager(module=module)
# Override methods to force specific logic in the module to happen
mm.update_on_device = Mock(return_value=True)
mm.read_current_from_device = Mock(return_value=current)
results = mm.exec_module()
assert results['changed'] is True
assert results['multicast_address'] == '10.1.1.1'
assert len(results.keys()) == 2
def test_unset_unicast_failover(self, *args):
set_module_args(dict(
unicast_failover="none",
server='localhost',
user='admin',
password='password'
))
# Configure the parameters that would be returned by querying the
# remote device
current = ApiParameters(params=load_fixture('load_tm_cm_device.json'))
module = AnsibleModule(
argument_spec=self.spec.argument_spec,
supports_check_mode=self.spec.supports_check_mode
)
mm = ModuleManager(module=module)
# Override methods to force specific logic in the module to happen
mm.update_on_device = Mock(return_value=True)
mm.read_current_from_device = Mock(return_value=current)
results = mm.exec_module()
assert results['changed'] is True
assert results['unicast_failover'] == 'none'
assert len(results.keys()) == 2
def test_unset_config_sync_ip(self, *args):
set_module_args(dict(
config_sync_ip="none",
server='localhost',
user='admin',
password='password'
))
# Configure the parameters that would be returned by querying the
# remote device
current = ApiParameters(params=load_fixture('load_tm_cm_device.json'))
module = AnsibleModule(
argument_spec=self.spec.argument_spec,
supports_check_mode=self.spec.supports_check_mode
)
mm = ModuleManager(module=module)
# Override methods to force specific logic in the module to happen
mm.update_on_device = Mock(return_value=True)
mm.read_current_from_device = Mock(return_value=current)
results = mm.exec_module()
assert results['changed'] is True
assert results['config_sync_ip'] == 'none'
assert len(results.keys()) == 2
|
gpl-3.0
|
codemac/servo
|
tests/wpt/css-tests/tools/wptserve/tests/functional/test_request.py
|
299
|
2987
|
import os
import unittest
import urllib2
import json
import time
import wptserve
from base import TestUsingServer, doc_root
class TestInputFile(TestUsingServer):
def test_seek(self):
@wptserve.handlers.handler
def handler(request, response):
rv = []
f = request.raw_input
f.seek(5)
rv.append(f.read(2))
rv.append(f.tell())
f.seek(0)
rv.append(f.readline())
rv.append(f.tell())
rv.append(f.read(-1))
rv.append(f.tell())
f.seek(0)
rv.append(f.read())
f.seek(0)
rv.extend(f.readlines())
return " ".join(str(item) for item in rv)
route = ("POST", "/test/test_seek", handler)
self.server.router.register(*route)
resp = self.request(route[1], method="POST", body="12345ab\ncdef")
self.assertEquals(200, resp.getcode())
self.assertEquals(["ab", "7", "12345ab\n", "8", "cdef", "12",
"12345ab\ncdef", "12345ab\n", "cdef"],
resp.read().split(" "))
def test_iter(self):
@wptserve.handlers.handler
def handler(request, response):
f = request.raw_input
return " ".join(line for line in f)
route = ("POST", "/test/test_iter", handler)
self.server.router.register(*route)
resp = self.request(route[1], method="POST", body="12345\nabcdef\r\nzyxwv")
self.assertEquals(200, resp.getcode())
self.assertEquals(["12345\n", "abcdef\r\n", "zyxwv"], resp.read().split(" "))
class TestRequest(TestUsingServer):
def test_body(self):
@wptserve.handlers.handler
def handler(request, response):
request.raw_input.seek(5)
return request.body
route = ("POST", "/test/test_body", handler)
self.server.router.register(*route)
resp = self.request(route[1], method="POST", body="12345ab\ncdef")
self.assertEquals("12345ab\ncdef", resp.read())
def test_route_match(self):
@wptserve.handlers.handler
def handler(request, response):
return request.route_match["match"] + " " + request.route_match["*"]
route = ("GET", "/test/{match}_*", handler)
self.server.router.register(*route)
resp = self.request("/test/some_route")
self.assertEquals("some route", resp.read())
class TestAuth(TestUsingServer):
def test_auth(self):
@wptserve.handlers.handler
def handler(request, response):
return " ".join((request.auth.username, request.auth.password))
route = ("GET", "/test/test_auth", handler)
self.server.router.register(*route)
resp = self.request(route[1], auth=("test", "PASS"))
self.assertEquals(200, resp.getcode())
self.assertEquals(["test", "PASS"], resp.read().split(" "))
if __name__ == '__main__':
unittest.main()
|
mpl-2.0
|
nuuuboo/odoo
|
addons/base_import/models.py
|
222
|
14243
|
import csv
import itertools
import logging
import operator
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
import psycopg2
from openerp.osv import orm, fields
from openerp.tools.translate import _
FIELDS_RECURSION_LIMIT = 2
ERROR_PREVIEW_BYTES = 200
_logger = logging.getLogger(__name__)
class ir_import(orm.TransientModel):
_name = 'base_import.import'
# allow imports to survive for 12h in case user is slow
_transient_max_hours = 12.0
_columns = {
'res_model': fields.char('Model'),
'file': fields.binary(
'File', help="File to check and/or import, raw binary (not base64)"),
'file_name': fields.char('File Name'),
'file_type': fields.char('File Type'),
}
def get_fields(self, cr, uid, model, context=None,
depth=FIELDS_RECURSION_LIMIT):
""" Recursively get fields for the provided model (through
fields_get) and filter them according to importability
The output format is a list of ``Field``, with ``Field``
defined as:
.. class:: Field
.. attribute:: id (str)
A non-unique identifier for the field, used to compute
the span of the ``required`` attribute: if multiple
``required`` fields have the same id, only one of them
is necessary.
.. attribute:: name (str)
The field's logical (Odoo) name within the scope of
its parent.
.. attribute:: string (str)
The field's human-readable name (``@string``)
.. attribute:: required (bool)
Whether the field is marked as required in the
model. Clients must provide non-empty import values
for all required fields or the import will error out.
.. attribute:: fields (list(Field))
The current field's subfields. The database and
external identifiers for m2o and m2m fields; a
filtered and transformed fields_get for o2m fields (to
a variable depth defined by ``depth``).
Fields with no sub-fields will have an empty list of
sub-fields.
:param str model: name of the model to get fields form
:param int landing: depth of recursion into o2m fields
"""
model_obj = self.pool[model]
fields = [{
'id': 'id',
'name': 'id',
'string': _("External ID"),
'required': False,
'fields': [],
}]
fields_got = model_obj.fields_get(cr, uid, context=context)
blacklist = orm.MAGIC_COLUMNS + [model_obj.CONCURRENCY_CHECK_FIELD]
for name, field in fields_got.iteritems():
if name in blacklist:
continue
# an empty string means the field is deprecated, @deprecated must
# be absent or False to mean not-deprecated
if field.get('deprecated', False) is not False:
continue
if field.get('readonly'):
states = field.get('states')
if not states:
continue
# states = {state: [(attr, value), (attr2, value2)], state2:...}
if not any(attr == 'readonly' and value is False
for attr, value in itertools.chain.from_iterable(
states.itervalues())):
continue
f = {
'id': name,
'name': name,
'string': field['string'],
# Y U NO ALWAYS HAS REQUIRED
'required': bool(field.get('required')),
'fields': [],
}
if field['type'] in ('many2many', 'many2one'):
f['fields'] = [
dict(f, name='id', string=_("External ID")),
dict(f, name='.id', string=_("Database ID")),
]
elif field['type'] == 'one2many' and depth:
f['fields'] = self.get_fields(
cr, uid, field['relation'], context=context, depth=depth-1)
if self.pool['res.users'].has_group(cr, uid, 'base.group_no_one'):
f['fields'].append({'id' : '.id', 'name': '.id', 'string': _("Database ID"), 'required': False, 'fields': []})
fields.append(f)
# TODO: cache on model?
return fields
def _read_csv(self, record, options):
""" Returns a CSV-parsed iterator of all empty lines in the file
:throws csv.Error: if an error is detected during CSV parsing
:throws UnicodeDecodeError: if ``options.encoding`` is incorrect
"""
csv_iterator = csv.reader(
StringIO(record.file),
quotechar=str(options['quoting']),
delimiter=str(options['separator']))
def nonempty(row):
return any(x for x in row if x.strip())
csv_nonempty = itertools.ifilter(nonempty, csv_iterator)
# TODO: guess encoding with chardet? Or https://github.com/aadsm/jschardet
encoding = options.get('encoding', 'utf-8')
return itertools.imap(
lambda row: [item.decode(encoding) for item in row],
csv_nonempty)
def _match_header(self, header, fields, options):
""" Attempts to match a given header to a field of the
imported model.
:param str header: header name from the CSV file
:param fields:
:param dict options:
:returns: an empty list if the header couldn't be matched, or
all the fields to traverse
:rtype: list(Field)
"""
string_match = None
for field in fields:
# FIXME: should match all translations & original
# TODO: use string distance (levenshtein? hamming?)
if header.lower() == field['name'].lower():
return [field]
if header.lower() == field['string'].lower():
# matching string are not reliable way because
# strings have no unique constraint
string_match = field
if string_match:
# this behavior is only applied if there is no matching field['name']
return [string_match]
if '/' not in header:
return []
# relational field path
traversal = []
subfields = fields
# Iteratively dive into fields tree
for section in header.split('/'):
# Strip section in case spaces are added around '/' for
# readability of paths
match = self._match_header(section.strip(), subfields, options)
# Any match failure, exit
if not match: return []
# prep subfields for next iteration within match[0]
field = match[0]
subfields = field['fields']
traversal.append(field)
return traversal
def _match_headers(self, rows, fields, options):
""" Attempts to match the imported model's fields to the
titles of the parsed CSV file, if the file is supposed to have
headers.
Will consume the first line of the ``rows`` iterator.
Returns a pair of (None, None) if headers were not requested
or the list of headers and a dict mapping cell indices
to key paths in the ``fields`` tree
:param Iterator rows:
:param dict fields:
:param dict options:
:rtype: (None, None) | (list(str), dict(int: list(str)))
"""
if not options.get('headers'):
return None, None
headers = next(rows)
return headers, dict(
(index, [field['name'] for field in self._match_header(header, fields, options)] or None)
for index, header in enumerate(headers)
)
def parse_preview(self, cr, uid, id, options, count=10, context=None):
""" Generates a preview of the uploaded files, and performs
fields-matching between the import's file data and the model's
columns.
If the headers are not requested (not options.headers),
``matches`` and ``headers`` are both ``False``.
:param id: identifier of the import
:param int count: number of preview lines to generate
:param options: format-specific options.
CSV: {encoding, quoting, separator, headers}
:type options: {str, str, str, bool}
:returns: {fields, matches, headers, preview} | {error, preview}
:rtype: {dict(str: dict(...)), dict(int, list(str)), list(str), list(list(str))} | {str, str}
"""
(record,) = self.browse(cr, uid, [id], context=context)
fields = self.get_fields(cr, uid, record.res_model, context=context)
try:
rows = self._read_csv(record, options)
headers, matches = self._match_headers(rows, fields, options)
# Match should have consumed the first row (iif headers), get
# the ``count`` next rows for preview
preview = list(itertools.islice(rows, count))
assert preview, "CSV file seems to have no content"
return {
'fields': fields,
'matches': matches or False,
'headers': headers or False,
'preview': preview,
}
except Exception, e:
# Due to lazy generators, UnicodeDecodeError (for
# instance) may only be raised when serializing the
# preview to a list in the return.
_logger.debug("Error during CSV parsing preview", exc_info=True)
return {
'error': str(e),
# iso-8859-1 ensures decoding will always succeed,
# even if it yields non-printable characters. This is
# in case of UnicodeDecodeError (or csv.Error
# compounded with UnicodeDecodeError)
'preview': record.file[:ERROR_PREVIEW_BYTES]
.decode( 'iso-8859-1'),
}
def _convert_import_data(self, record, fields, options, context=None):
""" Extracts the input browse_record and fields list (with
``False``-y placeholders for fields to *not* import) into a
format Model.import_data can use: a fields list without holes
and the precisely matching data matrix
:param browse_record record:
:param list(str|bool): fields
:returns: (data, fields)
:rtype: (list(list(str)), list(str))
:raises ValueError: in case the import data could not be converted
"""
# Get indices for non-empty fields
indices = [index for index, field in enumerate(fields) if field]
if not indices:
raise ValueError(_("You must configure at least one field to import"))
# If only one index, itemgetter will return an atom rather
# than a 1-tuple
if len(indices) == 1: mapper = lambda row: [row[indices[0]]]
else: mapper = operator.itemgetter(*indices)
# Get only list of actually imported fields
import_fields = filter(None, fields)
rows_to_import = self._read_csv(record, options)
if options.get('headers'):
rows_to_import = itertools.islice(
rows_to_import, 1, None)
data = [
row for row in itertools.imap(mapper, rows_to_import)
# don't try inserting completely empty rows (e.g. from
# filtering out o2m fields)
if any(row)
]
return data, import_fields
def do(self, cr, uid, id, fields, options, dryrun=False, context=None):
""" Actual execution of the import
:param fields: import mapping: maps each column to a field,
``False`` for the columns to ignore
:type fields: list(str|bool)
:param dict options:
:param bool dryrun: performs all import operations (and
validations) but rollbacks writes, allows
getting as much errors as possible without
the risk of clobbering the database.
:returns: A list of errors. If the list is empty the import
executed fully and correctly. If the list is
non-empty it contains dicts with 3 keys ``type`` the
type of error (``error|warning``); ``message`` the
error message associated with the error (a string)
and ``record`` the data which failed to import (or
``false`` if that data isn't available or provided)
:rtype: list({type, message, record})
"""
cr.execute('SAVEPOINT import')
(record,) = self.browse(cr, uid, [id], context=context)
try:
data, import_fields = self._convert_import_data(
record, fields, options, context=context)
except ValueError, e:
return [{
'type': 'error',
'message': unicode(e),
'record': False,
}]
_logger.info('importing %d rows...', len(data))
import_result = self.pool[record.res_model].load(
cr, uid, import_fields, data, context=context)
_logger.info('done')
# If transaction aborted, RELEASE SAVEPOINT is going to raise
# an InternalError (ROLLBACK should work, maybe). Ignore that.
# TODO: to handle multiple errors, create savepoint around
# write and release it in case of write error (after
# adding error to errors array) => can keep on trying to
# import stuff, and rollback at the end if there is any
# error in the results.
try:
if dryrun:
cr.execute('ROLLBACK TO SAVEPOINT import')
else:
cr.execute('RELEASE SAVEPOINT import')
except psycopg2.InternalError:
pass
return import_result['messages']
|
agpl-3.0
|
twiest/openshift-tools
|
openshift/installer/vendored/openshift-ansible-3.6.173.0.59/roles/openshift_health_checker/openshift_checks/disk_availability.py
|
5
|
5957
|
"""Check that there is enough disk space in predefined paths."""
import tempfile
from openshift_checks import OpenShiftCheck, OpenShiftCheckException
class DiskAvailability(OpenShiftCheck):
"""Check that recommended disk space is available before a first-time install."""
name = "disk_availability"
tags = ["preflight"]
# Values taken from the official installation documentation:
# https://docs.openshift.org/latest/install_config/install/prerequisites.html#system-requirements
recommended_disk_space_bytes = {
'/var': {
'masters': 40 * 10**9,
'nodes': 15 * 10**9,
'etcd': 20 * 10**9,
},
# Used to copy client binaries into,
# see roles/openshift_cli/library/openshift_container_binary_sync.py.
'/usr/local/bin': {
'masters': 1 * 10**9,
'nodes': 1 * 10**9,
'etcd': 1 * 10**9,
},
# Used as temporary storage in several cases.
tempfile.gettempdir(): {
'masters': 1 * 10**9,
'nodes': 1 * 10**9,
'etcd': 1 * 10**9,
},
}
# recommended disk space for each location under an upgrade context
recommended_disk_upgrade_bytes = {
'/var': {
'masters': 10 * 10**9,
'nodes': 5 * 10 ** 9,
'etcd': 5 * 10 ** 9,
},
}
def is_active(self):
"""Skip hosts that do not have recommended disk space requirements."""
group_names = self.get_var("group_names", default=[])
active_groups = set()
for recommendation in self.recommended_disk_space_bytes.values():
active_groups.update(recommendation.keys())
has_disk_space_recommendation = bool(active_groups.intersection(group_names))
return super(DiskAvailability, self).is_active() and has_disk_space_recommendation
def run(self):
group_names = self.get_var("group_names")
user_config = self.get_var("openshift_check_min_host_disk_gb", default={})
try:
# For backwards-compatibility, if openshift_check_min_host_disk_gb
# is a number, then it overrides the required config for '/var'.
number = float(user_config)
user_config = {
'/var': {
'masters': number,
'nodes': number,
'etcd': number,
},
}
except TypeError:
# If it is not a number, then it should be a nested dict.
pass
# TODO: as suggested in
# https://github.com/openshift/openshift-ansible/pull/4436#discussion_r122180021,
# maybe we could support checking disk availability in paths that are
# not part of the official recommendation but present in the user
# configuration.
for path, recommendation in self.recommended_disk_space_bytes.items():
free_bytes = self.free_bytes(path)
recommended_bytes = max(recommendation.get(name, 0) for name in group_names)
config = user_config.get(path, {})
# NOTE: the user config is in GB, but we compare bytes, thus the
# conversion.
config_bytes = max(config.get(name, 0) for name in group_names) * 10**9
recommended_bytes = config_bytes or recommended_bytes
# if an "upgrade" context is set, update the minimum disk requirement
# as this signifies an in-place upgrade - the node might have the
# required total disk space, but some of that space may already be
# in use by the existing OpenShift deployment.
context = self.get_var("r_openshift_health_checker_playbook_context", default="")
if context == "upgrade":
recommended_upgrade_paths = self.recommended_disk_upgrade_bytes.get(path, {})
if recommended_upgrade_paths:
recommended_bytes = config_bytes or max(recommended_upgrade_paths.get(name, 0)
for name in group_names)
if free_bytes < recommended_bytes:
free_gb = float(free_bytes) / 10**9
recommended_gb = float(recommended_bytes) / 10**9
msg = (
'Available disk space in "{}" ({:.1f} GB) '
'is below minimum recommended ({:.1f} GB)'
).format(path, free_gb, recommended_gb)
# warn if check failed under an "upgrade" context
# due to limits imposed by the user config
if config_bytes and context == "upgrade":
msg += ('\n\nMake sure to account for decreased disk space during an upgrade\n'
'due to an existing OpenShift deployment. Please check the value of\n'
' openshift_check_min_host_disk_gb={}\n'
'in your Ansible inventory, and lower the recommended disk space availability\n'
'if necessary for this upgrade.').format(config_bytes)
return {
'failed': True,
'msg': msg,
}
return {}
def free_bytes(self, path):
"""Return the size available in path based on ansible_mounts."""
mount = self.find_ansible_mount(path)
try:
return mount['size_available']
except KeyError:
raise OpenShiftCheckException(
'Unable to retrieve disk availability for "{path}".\n'
'Ansible facts included a matching mount point for this path:\n'
' {mount}\n'
'however it is missing the size_available field.\n'
'To investigate, you can inspect the output of `ansible -m setup <host>`'
''.format(path=path, mount=mount)
)
|
apache-2.0
|
turbomanage/training-data-analyst
|
courses/developingapps/python/kubernetesengine/end/backend/start/frontend/quiz/gcp/spanner.py
|
33
|
2582
|
# Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
# TODO: Import the spanner module
from google.cloud import spanner
# END TODO
"""
Get spanner management objects
"""
# TODO: Create a spanner Client
spanner_client = spanner.Client()
# END TODO
# TODO: Get a reference to the Cloud Spanner quiz-instance
instance = spanner_client.instance('quiz-instance')
# END TODO
# TODO: Get a referent to the Cloud Spanner quiz-database
database = instance.database('quiz-database')
# END TODO
"""
Takes an email address and reverses it (to be used as primary key)
"""
def reverse_email(email):
return '_'.join(list(reversed(email.replace('@','_').
replace('.','_').
split('_'))))
"""
Persists feedback data into Spanner
- create primary key value
- do a batch insert (even though it's a single record)
"""
def save_feedback(data):
# TODO: Create a batch object for database operations
with database.batch() as batch:
# TODO: Create a key for the record
# from the email, quiz and timestamp
feedback_id = '{}_{}_{}'.format(reverse_email(data['email']),
data['quiz'],
data['timestamp'])
# END TODO
# TODO: Use the batch to insert a record
# into the feedback table
# This needs the columns and values
batch.insert(
table='feedback',
columns=(
'feedbackId',
'email',
'quiz',
'timestamp',
'rating',
'score',
'feedback'
),
values=[
(
feedback_id,
data['email'],
data['quiz'],
data['timestamp'],
data['rating'],
data['score'],
data['feedback']
)
]
)
# END TODO
# END TODO
|
apache-2.0
|
2014cdbg13/cdbg13
|
wsgi/static/Brython2.1.0-20140419-113919/Lib/colorsys.py
|
1066
|
3691
|
"""Conversion functions between RGB and other color systems.
This modules provides two functions for each color system ABC:
rgb_to_abc(r, g, b) --> a, b, c
abc_to_rgb(a, b, c) --> r, g, b
All inputs and outputs are triples of floats in the range [0.0...1.0]
(with the exception of I and Q, which covers a slightly larger range).
Inputs outside the valid range may cause exceptions or invalid outputs.
Supported color systems:
RGB: Red, Green, Blue components
YIQ: Luminance, Chrominance (used by composite video signals)
HLS: Hue, Luminance, Saturation
HSV: Hue, Saturation, Value
"""
# References:
# http://en.wikipedia.org/wiki/YIQ
# http://en.wikipedia.org/wiki/HLS_color_space
# http://en.wikipedia.org/wiki/HSV_color_space
__all__ = ["rgb_to_yiq","yiq_to_rgb","rgb_to_hls","hls_to_rgb",
"rgb_to_hsv","hsv_to_rgb"]
# Some floating point constants
ONE_THIRD = 1.0/3.0
ONE_SIXTH = 1.0/6.0
TWO_THIRD = 2.0/3.0
# YIQ: used by composite video signals (linear combinations of RGB)
# Y: perceived grey level (0.0 == black, 1.0 == white)
# I, Q: color components
def rgb_to_yiq(r, g, b):
y = 0.30*r + 0.59*g + 0.11*b
i = 0.60*r - 0.28*g - 0.32*b
q = 0.21*r - 0.52*g + 0.31*b
return (y, i, q)
def yiq_to_rgb(y, i, q):
r = y + 0.948262*i + 0.624013*q
g = y - 0.276066*i - 0.639810*q
b = y - 1.105450*i + 1.729860*q
if r < 0.0:
r = 0.0
if g < 0.0:
g = 0.0
if b < 0.0:
b = 0.0
if r > 1.0:
r = 1.0
if g > 1.0:
g = 1.0
if b > 1.0:
b = 1.0
return (r, g, b)
# HLS: Hue, Luminance, Saturation
# H: position in the spectrum
# L: color lightness
# S: color saturation
def rgb_to_hls(r, g, b):
maxc = max(r, g, b)
minc = min(r, g, b)
# XXX Can optimize (maxc+minc) and (maxc-minc)
l = (minc+maxc)/2.0
if minc == maxc:
return 0.0, l, 0.0
if l <= 0.5:
s = (maxc-minc) / (maxc+minc)
else:
s = (maxc-minc) / (2.0-maxc-minc)
rc = (maxc-r) / (maxc-minc)
gc = (maxc-g) / (maxc-minc)
bc = (maxc-b) / (maxc-minc)
if r == maxc:
h = bc-gc
elif g == maxc:
h = 2.0+rc-bc
else:
h = 4.0+gc-rc
h = (h/6.0) % 1.0
return h, l, s
def hls_to_rgb(h, l, s):
if s == 0.0:
return l, l, l
if l <= 0.5:
m2 = l * (1.0+s)
else:
m2 = l+s-(l*s)
m1 = 2.0*l - m2
return (_v(m1, m2, h+ONE_THIRD), _v(m1, m2, h), _v(m1, m2, h-ONE_THIRD))
def _v(m1, m2, hue):
hue = hue % 1.0
if hue < ONE_SIXTH:
return m1 + (m2-m1)*hue*6.0
if hue < 0.5:
return m2
if hue < TWO_THIRD:
return m1 + (m2-m1)*(TWO_THIRD-hue)*6.0
return m1
# HSV: Hue, Saturation, Value
# H: position in the spectrum
# S: color saturation ("purity")
# V: color brightness
def rgb_to_hsv(r, g, b):
maxc = max(r, g, b)
minc = min(r, g, b)
v = maxc
if minc == maxc:
return 0.0, 0.0, v
s = (maxc-minc) / maxc
rc = (maxc-r) / (maxc-minc)
gc = (maxc-g) / (maxc-minc)
bc = (maxc-b) / (maxc-minc)
if r == maxc:
h = bc-gc
elif g == maxc:
h = 2.0+rc-bc
else:
h = 4.0+gc-rc
h = (h/6.0) % 1.0
return h, s, v
def hsv_to_rgb(h, s, v):
if s == 0.0:
return v, v, v
i = int(h*6.0) # XXX assume int() truncates!
f = (h*6.0) - i
p = v*(1.0 - s)
q = v*(1.0 - s*f)
t = v*(1.0 - s*(1.0-f))
i = i%6
if i == 0:
return v, t, p
if i == 1:
return q, v, p
if i == 2:
return p, v, t
if i == 3:
return p, q, v
if i == 4:
return t, p, v
if i == 5:
return v, p, q
# Cannot get here
|
gpl-2.0
|
wiso/dask
|
dask/base.py
|
1
|
3934
|
import warnings
from operator import attrgetter
from hashlib import md5
from functools import partial
from toolz import merge, groupby, curry
from toolz.functoolz import Compose
from .context import _globals
from .utils import Dispatch, ignoring
__all__ = ("Base", "compute", "normalize_token", "tokenize", "visualize")
class Base(object):
"""Base class for dask collections"""
def visualize(self, filename='mydask', optimize_graph=False):
return visualize(self, filename=filename, optimize_graph=optimize_graph)
def _visualize(self, filename='mydask', optimize_graph=False):
warn = DeprecationWarning("``_visualize`` is deprecated, use "
"``visualize`` instead.")
warnings.warn(warn)
return self.visualize(optimize_graph)
def compute(self, **kwargs):
return compute(self, **kwargs)[0]
@classmethod
def _get(cls, dsk, keys, get=None, **kwargs):
get = get or _globals['get'] or cls._default_get
dsk2 = cls._optimize(dsk, keys)
return get(dsk2, keys, **kwargs)
def compute(*args, **kwargs):
"""Compute several dask collections at once.
Examples
--------
>>> import dask.array as da
>>> a = da.arange(10, chunks=2).sum()
>>> b = da.arange(10, chunks=2).mean()
>>> compute(a, b)
(45, 4.5)
"""
groups = groupby(attrgetter('_optimize'), args)
get = kwargs.pop('get', None) or _globals['get']
if not get:
get = args[0]._default_get
if not all(a._default_get == get for a in args):
raise ValueError("Compute called on multiple collections with "
"differing default schedulers. Please specify a "
"scheduler `get` function using either "
"the `get` kwarg or globally with `set_options`.")
dsk = merge([opt(merge([v.dask for v in val]), [v._keys() for v in val])
for opt, val in groups.items()])
keys = [arg._keys() for arg in args]
results = get(dsk, keys, **kwargs)
return tuple(a._finalize(a, r) for a, r in zip(args, results))
def visualize(*args, **kwargs):
filename = kwargs.get('filename', 'mydask')
optimize_graph = kwargs.get('optimize_graph', False)
from dask.dot import dot_graph
if optimize_graph:
dsks = [arg._optimize(arg.dask, arg._keys()) for arg in args]
else:
dsks = [arg.dask for arg in args]
dsk = merge(dsks)
return dot_graph(dsk, filename=filename)
def normalize_function(func):
if isinstance(func, curry):
func = func._partial
if isinstance(func, Compose):
first = getattr(func, 'first', None)
funcs = reversed((first,) + func.funcs) if first else func.funcs
return tuple(normalize_function(f) for f in funcs)
elif isinstance(func, partial):
kws = tuple(sorted(func.keywords.items())) if func.keywords else ()
return (normalize_function(func.func), func.args, kws)
else:
return str(func)
normalize_token = Dispatch()
normalize_token.register((int, float, str, tuple, list), lambda a: a)
normalize_token.register(object,
lambda a: normalize_function(a) if callable(a) else a)
normalize_token.register(dict, lambda a: tuple(sorted(a.items())))
with ignoring(ImportError):
import pandas as pd
normalize_token.register(pd.DataFrame,
lambda a: (id(a), len(a), list(a.columns)))
normalize_token.register(pd.Series, lambda a: (id(a), len(a), a.name))
with ignoring(ImportError):
import numpy as np
normalize_token.register(np.ndarray, lambda a: (id(a), a.dtype, a.shape))
def tokenize(*args):
""" Deterministic token
>>> tokenize([1, 2, '3'])
'9d71491b50023b06fc76928e6eddb952'
>>> tokenize('Hello') == tokenize('Hello')
True
"""
return md5(str(tuple(map(normalize_token, args))).encode()).hexdigest()
|
bsd-3-clause
|
trietptm/volatility
|
volatility/win32/hashdump.py
|
44
|
10456
|
# Volatility
# Copyright (c) 2008-2013 Volatility Foundation
# Copyright (c) 2008 Brendan Dolan-Gavitt <[email protected]>
#
# This file is part of Volatility.
#
# Volatility is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# Volatility is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Volatility. If not, see <http://www.gnu.org/licenses/>.
#
#pylint: disable-msg=C0111
"""
@author: Brendan Dolan-Gavitt
@license: GNU General Public License 2.0
@contact: [email protected]
"""
import volatility.obj as obj
import volatility.win32.rawreg as rawreg
import volatility.win32.hive as hive
from Crypto.Hash import MD5, MD4
from Crypto.Cipher import ARC4, DES
from struct import unpack, pack
odd_parity = [
1, 1, 2, 2, 4, 4, 7, 7, 8, 8, 11, 11, 13, 13, 14, 14,
16, 16, 19, 19, 21, 21, 22, 22, 25, 25, 26, 26, 28, 28, 31, 31,
32, 32, 35, 35, 37, 37, 38, 38, 41, 41, 42, 42, 44, 44, 47, 47,
49, 49, 50, 50, 52, 52, 55, 55, 56, 56, 59, 59, 61, 61, 62, 62,
64, 64, 67, 67, 69, 69, 70, 70, 73, 73, 74, 74, 76, 76, 79, 79,
81, 81, 82, 82, 84, 84, 87, 87, 88, 88, 91, 91, 93, 93, 94, 94,
97, 97, 98, 98, 100, 100, 103, 103, 104, 104, 107, 107, 109, 109, 110, 110,
112, 112, 115, 115, 117, 117, 118, 118, 121, 121, 122, 122, 124, 124, 127, 127,
128, 128, 131, 131, 133, 133, 134, 134, 137, 137, 138, 138, 140, 140, 143, 143,
145, 145, 146, 146, 148, 148, 151, 151, 152, 152, 155, 155, 157, 157, 158, 158,
161, 161, 162, 162, 164, 164, 167, 167, 168, 168, 171, 171, 173, 173, 174, 174,
176, 176, 179, 179, 181, 181, 182, 182, 185, 185, 186, 186, 188, 188, 191, 191,
193, 193, 194, 194, 196, 196, 199, 199, 200, 200, 203, 203, 205, 205, 206, 206,
208, 208, 211, 211, 213, 213, 214, 214, 217, 217, 218, 218, 220, 220, 223, 223,
224, 224, 227, 227, 229, 229, 230, 230, 233, 233, 234, 234, 236, 236, 239, 239,
241, 241, 242, 242, 244, 244, 247, 247, 248, 248, 251, 251, 253, 253, 254, 254
]
# Permutation matrix for boot key
p = [ 0x8, 0x5, 0x4, 0x2, 0xb, 0x9, 0xd, 0x3,
0x0, 0x6, 0x1, 0xc, 0xe, 0xa, 0xf, 0x7 ]
# Constants for SAM decrypt algorithm
aqwerty = "!@#$%^&*()qwertyUIOPAzxcvbnmQQQQQQQQQQQQ)(*@&%\0"
anum = "0123456789012345678901234567890123456789\0"
antpassword = "NTPASSWORD\0"
almpassword = "LMPASSWORD\0"
lmkey = "KGS!@#$%"
empty_lm = "aad3b435b51404eeaad3b435b51404ee".decode('hex')
empty_nt = "31d6cfe0d16ae931b73c59d7e0c089c0".decode('hex')
def str_to_key(s):
key = []
key.append(ord(s[0]) >> 1)
key.append(((ord(s[0]) & 0x01) << 6) | (ord(s[1]) >> 2))
key.append(((ord(s[1]) & 0x03) << 5) | (ord(s[2]) >> 3))
key.append(((ord(s[2]) & 0x07) << 4) | (ord(s[3]) >> 4))
key.append(((ord(s[3]) & 0x0F) << 3) | (ord(s[4]) >> 5))
key.append(((ord(s[4]) & 0x1F) << 2) | (ord(s[5]) >> 6))
key.append(((ord(s[5]) & 0x3F) << 1) | (ord(s[6]) >> 7))
key.append(ord(s[6]) & 0x7F)
for i in range(8):
key[i] = (key[i] << 1)
key[i] = odd_parity[key[i]]
return "".join(chr(k) for k in key)
def sid_to_key(sid):
s1 = ""
s1 += chr(sid & 0xFF)
s1 += chr((sid >> 8) & 0xFF)
s1 += chr((sid >> 16) & 0xFF)
s1 += chr((sid >> 24) & 0xFF)
s1 += s1[0]
s1 += s1[1]
s1 += s1[2]
s2 = s1[3] + s1[0] + s1[1] + s1[2]
s2 += s2[0] + s2[1] + s2[2]
return str_to_key(s1), str_to_key(s2)
def hash_lm(pw):
pw = pw[:14].upper()
pw = pw + ('\0' * (14 - len(pw)))
d1 = DES.new(str_to_key(pw[:7]), DES.MODE_ECB)
d2 = DES.new(str_to_key(pw[7:]), DES.MODE_ECB)
return d1.encrypt(lmkey) + d2.encrypt(lmkey)
def hash_nt(pw):
return MD4.new(pw.encode('utf-16-le')).digest()
def find_control_set(sysaddr):
root = rawreg.get_root(sysaddr)
if not root:
return 1
csselect = rawreg.open_key(root, ["Select"])
if not csselect:
return 1
for v in rawreg.values(csselect):
if v.Name == "Current":
return v.Data
def get_bootkey(sysaddr):
cs = find_control_set(sysaddr)
lsa_base = ["ControlSet{0:03}".format(cs), "Control", "Lsa"]
lsa_keys = ["JD", "Skew1", "GBG", "Data"]
root = rawreg.get_root(sysaddr)
if not root:
return None
lsa = rawreg.open_key(root, lsa_base)
if not lsa:
return None
bootkey = ""
for lk in lsa_keys:
key = rawreg.open_key(lsa, [lk])
class_data = sysaddr.read(key.Class, key.ClassLength)
bootkey += class_data.decode('utf-16-le').decode('hex')
bootkey_scrambled = ""
for i in range(len(bootkey)):
bootkey_scrambled += bootkey[p[i]]
return bootkey_scrambled
def get_hbootkey(samaddr, bootkey):
sam_account_path = ["SAM", "Domains", "Account"]
if not bootkey:
return None
root = rawreg.get_root(samaddr)
if not root:
return None
sam_account_key = rawreg.open_key(root, sam_account_path)
if not sam_account_key:
return None
F = None
for v in rawreg.values(sam_account_key):
if v.Name == 'F':
F = samaddr.read(v.Data, v.DataLength)
if not F:
return None
md5 = MD5.new()
md5.update(F[0x70:0x80] + aqwerty + bootkey + anum)
rc4_key = md5.digest()
rc4 = ARC4.new(rc4_key)
hbootkey = rc4.encrypt(F[0x80:0xA0])
return hbootkey
def get_user_keys(samaddr):
user_key_path = ["SAM", "Domains", "Account", "Users"]
root = rawreg.get_root(samaddr)
if not root:
return []
user_key = rawreg.open_key(root, user_key_path)
if not user_key:
return []
return [k for k in rawreg.subkeys(user_key) if k.Name != "Names"]
def decrypt_single_hash(rid, hbootkey, enc_hash, lmntstr):
(des_k1, des_k2) = sid_to_key(rid)
d1 = DES.new(des_k1, DES.MODE_ECB)
d2 = DES.new(des_k2, DES.MODE_ECB)
md5 = MD5.new()
md5.update(hbootkey[:0x10] + pack("<L", rid) + lmntstr)
rc4_key = md5.digest()
rc4 = ARC4.new(rc4_key)
obfkey = rc4.encrypt(enc_hash)
hash = d1.decrypt(obfkey[:8]) + d2.decrypt(obfkey[8:])
return hash
def decrypt_hashes(rid, enc_lm_hash, enc_nt_hash, hbootkey):
# LM Hash
if enc_lm_hash:
lmhash = decrypt_single_hash(rid, hbootkey, enc_lm_hash, almpassword)
else:
lmhash = ""
# NT Hash
if enc_nt_hash:
nthash = decrypt_single_hash(rid, hbootkey, enc_nt_hash, antpassword)
else:
nthash = ""
return lmhash, nthash
def encrypt_single_hash(rid, hbootkey, hash, lmntstr):
(des_k1, des_k2) = sid_to_key(rid)
d1 = DES.new(des_k1, DES.MODE_ECB)
d2 = DES.new(des_k2, DES.MODE_ECB)
enc_hash = d1.encrypt(hash[:8]) + d2.encrypt(hash[8:])
md5 = MD5.new()
md5.update(hbootkey[:0x10] + pack("<L", rid) + lmntstr)
rc4_key = md5.digest()
rc4 = ARC4.new(rc4_key)
obfkey = rc4.encrypt(enc_hash)
return obfkey
def encrypt_hashes(rid, lm_hash, nt_hash, hbootkey):
# LM Hash
if lm_hash:
enc_lmhash = encrypt_single_hash(rid, hbootkey, lm_hash, almpassword)
else:
enc_lmhash = ""
# NT Hash
if nt_hash:
enc_nthash = encrypt_single_hash(rid, hbootkey, nt_hash, antpassword)
else:
enc_nthash = ""
return enc_lmhash, enc_nthash
def get_user_hashes(user_key, hbootkey):
samaddr = user_key.obj_vm
rid = int(str(user_key.Name), 16)
V = None
for v in rawreg.values(user_key):
if v.Name == 'V':
V = samaddr.read(v.Data, v.DataLength)
if not V:
return None
lm_offset = unpack("<L", V[0x9c:0xa0])[0] + 0xCC + 4
lm_len = unpack("<L", V[0xa0:0xa4])[0] - 4
nt_offset = unpack("<L", V[0xa8:0xac])[0] + 0xCC + 4
nt_len = unpack("<L", V[0xac:0xb0])[0] - 4
if lm_len:
enc_lm_hash = V[lm_offset:lm_offset + 0x10]
else:
enc_lm_hash = ""
if nt_len:
enc_nt_hash = V[nt_offset:nt_offset + 0x10]
else:
enc_nt_hash = ""
return decrypt_hashes(rid, enc_lm_hash, enc_nt_hash, hbootkey)
def get_user_name(user_key):
samaddr = user_key.obj_vm
V = None
for v in rawreg.values(user_key):
if v.Name == 'V':
V = samaddr.read(v.Data, v.DataLength)
if not V:
return None
name_offset = unpack("<L", V[0x0c:0x10])[0] + 0xCC
name_length = unpack("<L", V[0x10:0x14])[0]
username = V[name_offset:name_offset + name_length].decode('utf-16-le')
return username
def get_user_desc(user_key):
samaddr = user_key.obj_vm
V = None
for v in rawreg.values(user_key):
if v.Name == 'V':
V = samaddr.read(v.Data, v.DataLength)
if not V:
return None
desc_offset = unpack("<L", V[0x24:0x28])[0] + 0xCC
desc_length = unpack("<L", V[0x28:0x2c])[0]
desc = V[desc_offset:desc_offset + desc_length].decode('utf-16-le')
return desc
def dump_hashes(sysaddr, samaddr):
bootkey = get_bootkey(sysaddr)
hbootkey = get_hbootkey(samaddr, bootkey)
if hbootkey:
for user in get_user_keys(samaddr):
ret = get_user_hashes(user, hbootkey)
if not ret:
yield obj.NoneObject("Cannot get user hashes for {0}".format(user))
else:
lmhash, nthash = ret
if not lmhash:
lmhash = empty_lm
if not nthash:
nthash = empty_nt
yield "{0}:{1}:{2}:{3}:::".format(get_user_name(user), int(str(user.Name), 16),
lmhash.encode('hex'), nthash.encode('hex'))
else:
yield obj.NoneObject("Hbootkey is not valid")
def dump_memory_hashes(addr_space, config, syshive, samhive):
sysaddr = hive.HiveAddressSpace(addr_space, config, syshive)
samaddr = hive.HiveAddressSpace(addr_space, config, samhive)
return dump_hashes(sysaddr, samaddr)
def dump_file_hashes(syshive_fname, samhive_fname):
sysaddr = hive.HiveFileAddressSpace(syshive_fname)
samaddr = hive.HiveFileAddressSpace(samhive_fname)
return dump_hashes(sysaddr, samaddr)
|
gpl-2.0
|
alcides/rdflib
|
rdflib/journal.py
|
1
|
2342
|
import logging
_logger = logging.getLogger(__name__)
from rdflib.graph import QuotedGraph
from rdflib.events import Event, Dispatcher
from rdflib.store import TripleAddedEvent, TripleRemovedEvent, StoreCreatedEvent
class JournalWriter(object):
"""
Writes a journal of the store events.
"""
def __init__(self, store, stream=None, filename=None):
if stream is None:
assert filename, "Must specify either stream or filename"
stream = file(filename, "ab")
dispatcher = store.dispatcher
dispatcher.subscribe(TripleAddedEvent, self.journal_event)
dispatcher.subscribe(TripleRemovedEvent, self.journal_event)
dispatcher.subscribe(StoreCreatedEvent, self.journal_event)
self._dumps = store.node_pickler.dumps
self._write = stream.write
def journal_event(self, event):
self._write(self._dumps(event))
self._write("\n\n")
class JournalReader(object):
"""
Reads a journal of store events into a store.
"""
def __init__(self, store, filename):
self.stream = file(filename, "rb")
self.store = store
dispatcher = Dispatcher()
dispatcher.subscribe(TripleAddedEvent, self.add)
dispatcher.subscribe(TripleRemovedEvent, self.remove)
dispatcher.subscribe(StoreCreatedEvent, self.store_created)
loads = store.node_pickler.loads
dispatch = dispatcher.dispatch
lines = []
for line in self.stream:
if line=="\n":
try:
event = loads("".join(lines))
dispatch(event)
lines = []
except Exception, e:
_logger.exception(e)
_logger.debug("lines: '%s'" % lines)
lines = []
else:
lines.append(line)
def add(self, event):
context = event.context
quoted = isinstance(context, QuotedGraph)
self.store.add(event.triple, context, quoted)
def remove(self, event):
self.store.remove(event.triple, event.context)
def store_created(self, event):
n = len(self.store)
if n>0:
_logger.warning("Store not empty for 'store created'. Contains '%s' assertions" % n)
# TODO: clear store
|
bsd-3-clause
|
jasalt/spotify-ripper
|
spotify_ripper/main.py
|
2
|
15457
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from colorama import init, Fore, AnsiToWin32
from spotify_ripper.ripper import Ripper
from spotify_ripper.utils import *
import os
import sys
import codecs
import time
import argparse
import pkg_resources
if sys.version_info >= (3, 0):
import configparser as ConfigParser
else:
import ConfigParser
import schedule
import signal
def load_config(args, defaults):
_settings_dir = settings_dir(args)
config_file = os.path.join(_settings_dir, "config.ini")
if os.path.exists(config_file):
try:
config = ConfigParser.SafeConfigParser()
config.read(config_file)
if not config.has_section("main"):
return defaults
config_items = dict(config.items("main"))
to_array_options = [
"directory", "key", "user", "password", "log",
"genres", "format"]
# coerce boolean and none types
for _key in config_items:
item = config_items[_key]
if item == 'True':
config_items[_key] = True
elif item == 'False':
config_items[_key] = False
elif item == 'None':
config_items[_key] = None
else:
config_items[_key] = item.strip("'\"")
# certain options need to be in array (nargs=1)
if _key in to_array_options:
item = config_items[_key]
if item is not None:
config_items[_key] = [item]
# overwrite any existing defaults
defaults.update(config_items)
except ConfigParser.Error as e:
print("\nError parsing config file: " + config_file)
print(str(e))
return defaults
def patch_bug_in_mutagen():
from mutagen.mp4 import MP4Tags, MP4Cover
from mutagen.mp4._atom import Atoms, Atom, AtomError
import struct
def _key2name(key):
if sys.version_info >= (3, 0):
return key.encode("latin-1")
else:
return key
def __fixed_render_cover(self, key, value):
atom_data = []
for cover in value:
try:
imageformat = cover.imageformat
except AttributeError:
imageformat = MP4Cover.FORMAT_JPEG
atom_data.append(Atom.render(
b"data", struct.pack(">2I", imageformat, 0) + bytes(cover)))
return Atom.render(_key2name(key), b"".join(atom_data))
print(
Fore.RED + "Monkey-patching MP4/Python 3.x bug in Mutagen" +
Fore.RESET)
MP4Tags.__fixed_render_cover = __fixed_render_cover
MP4Tags._MP4Tags__atoms[b"covr"] = (
MP4Tags._MP4Tags__parse_cover, MP4Tags.__fixed_render_cover)
def main(prog_args=sys.argv[1:]):
# in case we changed the location of the settings directory where the
# config file lives, we need to parse this argument before we parse
# the rest of the arguments (which can overwrite the options in the
# config file)
settings_parser = argparse.ArgumentParser(add_help=False)
settings_parser.add_argument(
'-S', '--settings', nargs=1,
help='Path to settings, config and temp files directory '
'[Default=~/.spotify-ripper]')
args, remaining_argv = settings_parser.parse_known_args(prog_args)
# load config file, overwriting any defaults
defaults = {
"bitrate": "320",
"quality": "320",
"comp": "10",
"vbr": "0",
}
defaults = load_config(args, defaults)
parser = argparse.ArgumentParser(
prog='spotify-ripper',
description='Rips Spotify URIs to MP3s with ID3 tags and album covers',
parents=[settings_parser],
formatter_class=argparse.RawTextHelpFormatter,
epilog='''Example usage:
rip a single file: spotify-ripper -u user -p password spotify:track:52xaypL0Kjzk0ngwv3oBPR
rip entire playlist: spotify-ripper -u user -p password spotify:user:username:playlist:4vkGNcsS8lRXj4q945NIA4
rip a list of URIs: spotify-ripper -u user -p password list_of_uris.txt
search for tracks to rip: spotify-ripper -l -b 160 -o "album:Rumours track:'the chain'"
''')
# create group to prevent user from using both the -l and -u option
is_user_set = defaults.get('user') is not None
is_last_set = defaults.get('last') is True
if is_user_set or is_last_set:
if is_user_set and is_last_set:
print("spotify-ripper: error: one of the arguments -u/--user "
"-l/--last is required")
sys.exit(1)
else:
group = parser.add_mutually_exclusive_group(required=False)
else:
group = parser.add_mutually_exclusive_group(required=True)
encoding_group = parser.add_mutually_exclusive_group(required=False)
# set defaults
parser.set_defaults(**defaults)
prog_version = pkg_resources.require("spotify-ripper")[0].version
parser.add_argument(
'-a', '--ascii', action='store_true',
help='Convert the file name and the metadata tags to ASCII '
'encoding [Default=utf-8]')
encoding_group.add_argument(
'--aac', action='store_true',
help='Rip songs to AAC format with FreeAAC instead of MP3')
parser.add_argument(
'-A', '--ascii-path-only', action='store_true',
help='Convert the file name (but not the metadata tags) to ASCII '
'encoding [Default=utf-8]')
parser.add_argument(
'-b', '--bitrate', help='CBR bitrate [Default=320]')
parser.add_argument(
'-c', '--cbr', action='store_true', help='CBR encoding [Default=VBR]')
parser.add_argument(
'--comp', default="10",
help='compression complexity for FLAC and Opus [Default=Max]')
parser.add_argument(
'--comment', nargs=1,
help='Add custom metadata comment to all songs')
parser.add_argument(
'--cover-file', nargs=1,
help='Save album cover image to file name (e.g "cover.jpg") [Default=embed]')
parser.add_argument(
'-d', '--directory', nargs=1,
help='Base directory where ripped MP3s are saved [Default=cwd]')
parser.add_argument(
'--fail-log', nargs=1,
help="Logs the list of track URIs that failed to rip"
)
encoding_group.add_argument(
'--flac', action='store_true',
help='Rip songs to lossless FLAC encoding instead of MP3')
parser.add_argument(
'-f', '--format', nargs=1,
help='Save songs using this path and filename structure (see README)')
parser.add_argument(
'--flat', action='store_true',
help='Save all songs to a single directory '
'(overrides --format option)')
parser.add_argument(
'--flat-with-index', action='store_true',
help='Similar to --flat [-f] but includes the playlist index at '
'the start of the song file')
parser.add_argument(
'-g', '--genres', nargs=1,
choices=['artist', 'album'],
help='Attempt to retrieve genre information from Spotify\'s '
'Web API [Default=skip]')
parser.add_argument(
'-k', '--key', nargs=1,
help='Path to Spotify application key file [Default=Settings Directory]')
group.add_argument(
'-u', '--user', nargs=1,
help='Spotify username')
parser.add_argument(
'-p', '--password', nargs=1,
help='Spotify password [Default=ask interactively]')
group.add_argument(
'-l', '--last', action='store_true',
help='Use last login credentials')
parser.add_argument(
'-L', '--log', nargs=1,
help='Log in a log-friendly format to a file (use - to log to stdout)')
encoding_group.add_argument(
'--pcm', action='store_true',
help='Saves a .pcm file with the raw PCM data instead of MP3')
encoding_group.add_argument(
'--mp4', action='store_true',
help='Rip songs to MP4/M4A format with Fraunhofer FDK AAC codec '
'instead of MP3')
parser.add_argument(
'--normalize', action='store_true',
help='Normalize volume levels of tracks')
parser.add_argument(
'-o', '--overwrite', action='store_true',
help='Overwrite existing MP3 files [Default=skip]')
encoding_group.add_argument(
'--opus', action='store_true',
help='Rip songs to Opus encoding instead of MP3')
parser.add_argument(
'-q', '--vbr',
help='VBR quality setting or target bitrate for Opus [Default=0]')
parser.add_argument(
'-Q', '--quality', choices=['160', '320', '96'],
help='Spotify stream bitrate preference [Default=320]')
parser.add_argument(
'-s', '--strip-colors', action='store_true',
help='Strip coloring from output [Default=colors]')
parser.add_argument(
'-V', '--version', action='version', version=prog_version)
encoding_group.add_argument(
'--wav', action='store_true',
help='Rip songs to uncompressed WAV file instead of MP3')
encoding_group.add_argument(
'--vorbis', action='store_true',
help='Rip songs to Ogg Vorbis encoding instead of MP3')
parser.add_argument(
'-r', '--remove-from-playlist', action='store_true',
help='Delete tracks from playlist after successful '
'ripping [Default=no]')
parser.add_argument(
'-x', '--exclude-appears-on', action='store_true',
help='Exclude albums that an artist \'appears on\' when passing '
'a Spotify artist URI')
parser.add_argument(
'uri', nargs="+",
help='One or more Spotify URI(s) (either URI, a file of URIs or a '
'search query)')
args = parser.parse_args(remaining_argv)
# kind of a hack to get colorama stripping to work when outputting
# to a file instead of stdout. Taken from initialise.py in colorama
def wrap_stream(stream, convert, strip, autoreset, wrap):
if wrap:
wrapper = AnsiToWin32(stream, convert=convert,
strip=strip, autoreset=autoreset)
if wrapper.should_wrap():
stream = wrapper.stream
return stream
args.has_log = args.log is not None
if args.has_log:
if args.log[0] == "-":
init(strip=True)
else:
log_file = open(args.log[0], 'a')
sys.stdout = wrap_stream(log_file, None, True, False, True)
sys.stdout = codecs.getwriter('utf-8')(sys.stdout)
else:
init(strip=True if args.strip_colors else None)
if args.ascii_path_only is True:
args.ascii = True
if args.wav:
args.output_type = "wav"
elif args.pcm:
args.output_type = "pcm"
elif args.flac:
args.output_type = "flac"
if args.comp == "10":
args.comp = "8"
elif args.vorbis:
args.output_type = "ogg"
if args.vbr == "0":
args.vbr = "10"
elif args.opus:
args.output_type = "opus"
if args.vbr == "0":
args.vbr = "320"
elif args.aac:
args.output_type = "aac"
if args.vbr == "0":
args.vbr = "500"
elif args.mp4:
args.output_type = "m4a"
if args.vbr == "0":
args.vbr = "5"
else:
args.output_type = "mp3"
# check that encoder tool is available
encoders = {
"flac": ("flac", "flac"),
"aac": ("faac", "faac"),
"ogg": ("oggenc", "vorbis-tools"),
"opus": ("opusenc", "opus-tools"),
"mp3": ("lame", "lame"),
"m4a": ("fdkaac", "fdk-aac-encoder"),
}
if args.output_type in encoders.keys():
encoder = encoders[args.output_type][0]
if which(encoder) is None:
print(Fore.RED + "Missing dependency '" + encoder +
"'. Please install and add to path..." + Fore.RESET)
# assumes OS X or Ubuntu/Debian
command_help = ("brew install " if sys.platform == "darwin"
else "sudo apt-get install ")
print("...try " + Fore.YELLOW + command_help +
encoders[args.output_type][1] + Fore.RESET)
sys.exit(1)
# format string
if args.flat:
args.format = ["{artist} - {track_name}.{ext}"]
elif args.flat_with_index:
args.format = ["{idx:3} - {artist} - {track_name}.{ext}"]
elif args.format is None:
args.format = ["{album_artist}/{album}/{artist} - {track_name}.{ext}"]
# print some settings
print(Fore.GREEN + "Spotify Ripper - v" + prog_version + Fore.RESET)
def encoding_output_str():
if args.output_type == "wav":
return "WAV, Stereo 16bit 44100Hz"
elif args.output_type == "pcm":
return "Raw Headerless PCM, Stereo 16bit 44100Hz"
else:
if args.output_type == "flac":
return "FLAC, Compression Level: " + args.comp
elif args.output_type == "ogg":
codec = "Ogg Vorbis"
elif args.output_type == "opus":
codec = "Opus"
elif args.output_type == "mp3":
codec = "MP3"
elif args.output_type == "m4a":
codec = "MPEG4 AAC"
elif args.output_type == "aac":
codec = "AAC"
else:
codec = "Unknown"
if args.cbr:
return codec + ", CBR " + args.bitrate + " kbps"
else:
return codec + ", VBR " + args.vbr
print(Fore.YELLOW + " Encoding output:\t" +
Fore.RESET + encoding_output_str())
print(Fore.YELLOW + " Spotify bitrate:\t" +
Fore.RESET + args.quality + " kbps")
def unicode_support_str():
if args.ascii_path_only:
return "Unicode tags, ASCII file path"
elif args.ascii:
return "ASCII only"
else:
return "Yes"
print(Fore.YELLOW + " Unicode support:\t" +
Fore.RESET + unicode_support_str())
print(Fore.YELLOW + " Output directory:\t" + Fore.RESET +
base_dir(args))
print(Fore.YELLOW + " Settings directory:\t" + Fore.RESET +
settings_dir(args))
print(Fore.YELLOW + " Format String:\t" + Fore.RESET + args.format[0])
print(Fore.YELLOW + " Overwrite files:\t" +
Fore.RESET + ("Yes" if args.overwrite else "No"))
# patch a bug when Python 3/MP4
if sys.version_info >= (3, 0) and args.output_type == "m4a":
patch_bug_in_mutagen()
ripper = Ripper(args)
ripper.start()
# try to listen for terminal resize events
# (needs to be called on main thread)
if not args.has_log:
ripper.progress.handle_resize()
signal.signal(signal.SIGWINCH, ripper.progress.handle_resize)
# wait for ripping thread to finish
try:
while not ripper.finished:
schedule.run_pending()
time.sleep(0.1)
except (KeyboardInterrupt, Exception) as e:
if not isinstance(e, KeyboardInterrupt):
print(str(e))
print("\n" + Fore.RED + "Aborting..." + Fore.RESET)
ripper.abort()
sys.exit(1)
if __name__ == '__main__':
main()
|
mit
|
chokribr/invenioold
|
modules/bibexport/lib/bibexport_method_fieldexporter_dblayer.py
|
35
|
41803
|
# -*- coding: utf-8 -*-
##
## $Id: webmessage_dblayer.py,v 1.28 2008/08/08 13:28:15 cparker Exp $
##
## This file is part of Invenio.
## Copyright (C) 2009, 2010, 2011 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""Every db-related function of plugin field exporter"""
__revision__ = "$Id: webmessage_dblayer.py,v 1.28 2008/08/08 13:28:15 cparker Exp $"
import os
import zipfile
import tempfile
import shutil
from time import localtime
from invenio.dbquery import run_sql
from invenio.dateutils import convert_datestruct_to_datetext, \
convert_datetext_to_datestruct
from invenio import bibrecord as bibrecord
from invenio import xmlmarc2textmarc as xmlmarc2textmarc
class Job:
"""Represents job that will run certain number of queries
and save the results in a given location"""
# Constants defining different output formats
OUTPUT_FORMAT_MISSING = -1
OUTPUT_FORMAT_MARCXML = 0
OUTPUT_FORMAT_MARC = 1
# Value indicating that the ID is not specified
ID_MISSING = -1
# ID of the job in the database
_id = ID_MISSING
# Name of the job, used to identify the job among the other jobs
_name = ""
# Frequence of execution of the job in hours
_frequency = 0
# Output format for displaying the results
_output_format = 0
# Last time when the job has run. If it is in the future the job will
# run in the specified time
_last_run = localtime()
# Directory where the output of the queries will be stored
_output_directory = ""
def __init__(self, job_id = ID_MISSING,
name = "",
frequency = 0,
output_format = OUTPUT_FORMAT_MARCXML,
last_run = localtime(),
output_directory="" ):
"""Initialize the state of the object
@param job_id: id of the job
@param name: name of the job
@param frequency: frequency of execution in hours
@param last_run: last time when job has run
@param output_directory: directory where the output of
the job will be stored
"""
self._id = job_id
self.set_name(name)
self.set_frequency(frequency)
self.set_output_format(output_format)
self.set_last_run(last_run)
self.set_output_directory(output_directory)
def get_name(self):
"""Returns the name of the job"""
return self._name
def set_name(self, value):
"""Sets the name of the job"""
self._name = value
def get_frequency(self):
"""Returns the freqency of execution in hours"""
return self._frequency
def set_frequency(self, value):
"""Sets the freqency of execution in hours
@param value: integer representing frequency of
execution in hours
"""
self._frequency = value
def get_output_format(self):
"""Returns value indicating the ouput format of the job.
@return: integer value representing output format"""
return self._output_format
def set_output_format(self, value):
"""Sets the output format of the job
@param value: integer indicating the output format"""
self._output_format = value
def get_last_run(self):
"""Returns the last run time of the job.
@return: datestruct representing last run"""
return self._last_run
def set_last_run(self, value):
"""Sets the last run time of the job.
@param value: datestruct representing last run"""
self._last_run = value
def get_output_directory(self):
"""Returns the output directory"""
return self._output_directory
def set_output_directory(self, value):
"""Sets the output directory"""
self._output_directory = value
def get_id(self):
"""Returns identifier of the job"""
return self._id
class Query:
"""Represents query that will return certain fields
from the records that match the criteria of the query."""
# Value indicating that the ID is not specified
ID_MISSING = -1
# ID of the query in the database
_id = ID_MISSING
# name of the query - it is defined by librarians and
# helps them to identify their queries
_name = ""
# combination of search terms written the same way we
# write them in the search box in Invenio
_search_criteria = ""
# free text that describes the query
_comment = ""
# list of fields that will be retrieved for every record
_output_fields = []
def __init__(self, query_id = ID_MISSING,
name = "",
search_criteria = "",
comment = "",
output_fields = []):
"""Initialize the state of the object
@param id: id of the query in database
@param search_criteria: criteria used for searching records
@param comment: text describing the query
@param output_fields: list containing the fields that will be written in the output
"""
self._id = query_id
self.set_name(name)
self.set_search_criteria(search_criteria)
self.set_comment(comment)
self.set_output_fields(output_fields)
def get_search_criteria(self):
"""Returns the search criteria of the query"""
return self._search_criteria
def set_search_criteria(self, value):
"""Sets the search criteria of the query"""
self._search_criteria = value
def get_output_fields(self):
"""Returns a list of the fields that will
be used to filter the output
"""
return self._output_fields
def set_output_fields(self, value):
"""
Sets the fields that will be used to filter the output.
Only these fields will be printed in the output
"""
self._output_fields = value
def get_name(self):
"""Returns the name of the query"""
return self._name
def set_name(self, value):
"""Sets the name of the query"""
self._name = value
def get_comment(self):
"""Returns description of the query"""
return self._comment
def set_comment(self, value):
"""Sets description of the query"""
self._comment = value
def get_id(self):
"""Returns identifier of the job"""
return self._id
class QueryResult:
"""Class containing the result of query execution."""
# Constants defining different kind of status of the query
STATUS_CODE_OK = 0
STATUS_CODE_ERROR = 1
# Value indicating that the ID is not specified
ID_MISSING = -1
# ID of the query in the database
_id = ID_MISSING
# Query object representing the query related to this query result
_query = None
# Thu result of execution of the query
_result = ""
# Status of execution of the query
# Contains information if the execution was successful or
# there are errors during execution
_status = STATUS_CODE_OK
# Contains additional information about the status
_status_message = ""
def __init__(self, query,
result,
id = ID_MISSING,
status = STATUS_CODE_OK,
status_message = ""):
"""Initialize the state of the object
@param id: identifier of the result in the database
@param query: Query object with informatioin about query causing result
@param result: the result of query execution
@param status: status of execution
@param status_message: text containing additional information about
the status
"""
self._id = id
self.set_query(query)
self.set_result(result)
self.set_status(status)
self.set_status_message(status_message)
def get_result(self, output_format = Job.OUTPUT_FORMAT_MARCXML):
"""Returns MARC XML with the records that are
result of the query execution"""
# Originaly the result is kept in MARCXML
result = self._result
if output_format == Job.OUTPUT_FORMAT_MARC:
result = self._create_marc(records_xml = result)
return result
def set_result(self, value):
"""Sets the result of execution
@param value: MARC XML containing information
for the records that are result of execution"""
self._result = value
def get_status(self):
"""Returns the status of the result
@return: Integer value representing the status of execution"""
return self._status
def set_status(self, value):
"""Sets the status of the result.
@param value: Integer value reperesenting the status of execution"""
self._status = value
def get_status_message(self):
"""Sets the status message of the result
@return: string containing the message"""
return self._status_message
def set_status_message(self, value):
"""Returns the status message of the result
@param value: string containing the message"""
self._status_message = value
def get_query(self):
"""Returns the query causing the result"""
return self._query
def set_query(self, value):
"""Sets the query causing the result
@param value: Query object"""
self._query = value
def get_id(self):
"""Returns identifier of the query result"""
return self._id
def get_number_of_records_found(self):
"""Returns the number of records in the result"""
records = bibrecord.create_records(self._result)
records_count = len(records)
return records_count
def _create_marc(self, records_xml):
"""Creates MARC from MARCXML.
@param records_xml: MARCXML containing information about the records
@return: string containing information about the records
in MARC format
"""
aleph_marc_output = ""
records = bibrecord.create_records(records_xml)
for (record, status_code, list_of_errors) in records:
sysno_options = {"text-marc":1}
sysno = xmlmarc2textmarc.get_sysno_from_record(record,
sysno_options)
options = {"aleph-marc":0, "correct-mode":1, "append-mode":0,
"delete-mode":0, "insert-mode":0, "replace-mode":0,
"text-marc":1}
aleph_record = xmlmarc2textmarc.create_marc_record(record,
sysno,
options)
aleph_marc_output += aleph_record
return aleph_marc_output
class JobResult:
"""Class containing the result of job execution."""
# Constants defining different kind of status of the job
STATUS_CODE_OK = 0
STATUS_CODE_ERROR = 1
# Value indicating that the ID is not specified
ID_MISSING = -1
# ID of the query in the database
_id = ID_MISSING
# Query object representing the query related to this query result
_job = None
# List of query results (one result per query in the job)
_query_results = []
# Status of execution of the job
# Contains information if the execution was successful or
# there are errors during execution
_status = STATUS_CODE_OK
# Contains additional information about the status
_status_message = ""
# Date and time of job execution
_execution_date_time = localtime()
def __init__(self, job,
query_results = [],
execution_date_time = localtime(),
id = ID_MISSING,
status = STATUS_CODE_OK,
status_message = ""):
"""Initialize the state of the object
@param id: identifier of the job result in the database
@param query_results: List of query results
(one result per query in the job)
@param status: status of execution
@param status_message: text containing additional information about
the status
"""
self._id = id
self.set_job(job)
self.set_query_results(query_results)
self.set_execution_date_time(execution_date_time)
self.set_status(status)
self.set_status_message(status_message)
def get_query_results(self):
"""Returns list of results from the queries in the job
@return: List of QueryResult objects"""
return self._query_results
def set_query_results(self, value):
"""Sets the results of execution of the job queries.
@param value: list of QueryResult objects
"""
self._query_results = value
def add_query_result(self, query_result):
"""Adds a aquery result to the results
@param query_result: QueryResult object containing information
about the result
"""
self._query_results.append(query_result)
def get_status(self):
"""Returns the status of the execution
@return: Integer value representing the status of execution"""
return self._status
def set_status(self, value):
"""Sets the status of the execution.
@param value: Integer value reperesenting the status of execution"""
self._status = value
def get_status_message(self):
"""Sets the status message of the result
@return: string containing the message"""
return self._status_message
def set_status_message(self, value):
"""Returns the status message of the result
@param value: string containing the message"""
self._status_message = value
def get_job(self):
"""Sets the job causing the result"""
return self._job
def add_status_message(self, message):
"""Adds additional message to status message field
@param message: string containing the additional message
"""
self._status_message += "\n"
self._status_message += message
def set_job(self, value):
"""Returns the job causing the result"""
self._job = value
def get_id(self):
"""Returns identifier of the job result"""
return self._id
def get_execution_date_time(self):
"""Returns the date and time of job execution.
@return: datestruct representing date and time of execution"""
return self._execution_date_time
def set_execution_date_time(self, value):
"""Sets the last run time of the job.
@param value: datestruct representing date and time of execution"""
self._execution_date_time = value
def get_number_of_records_found(self):
"""Returns the number of records in the job result"""
records_count = 0
for query_result in self.get_query_results():
records_count += query_result.get_number_of_records_found()
return records_count
class FieldExporterDBException(Exception):
"""
Exception indicating an error during
databese operation in field exproter.
"""
_error_message = ""
_inner_exception = None
def __init__(self, error_message, inner_exception = None):
"""Constructor of the exception"""
Exception.__init__(self, error_message, inner_exception)
self._error_message = error_message
self._inner_exception = inner_exception
def get_error_message(self):
"""
Returns the error message that explains
the reason for the exception
"""
return self._error_message
def get_inner_exception(self):
"""
Returns the inner exception that is the
cause for the current exception
"""
return self._inner_exception
def save_job(user_id, job):
"""Saves job in the database. If the job already exists it will be updated.
@param user_id: identifier of the user owning the job
@param job: Object containing information about the job
@return: Returns the identifier of the job
"""
job_id = job.get_id()
if _exist_job(job_id):
return _update_job(job)
else:
return _insert_job(user_id, job)
def delete_job(job_id):
"""Deletes a job from the database
@param job_id: identifier of the job that has to be deleted
@return 1 if delete was successful
"""
query = """UPDATE expJOB SET deleted = 1 WHERE id=%s"""
query_parameters = (job_id, )
result = run_sql(query, query_parameters)
return int(result)
def _exist_job(job_id):
"""Checks if a job exist in the database
@param job_id: identifier of the job
@return: True if the job exists, otherwise return False
"""
query = """SELECT COUNT(id) FROM expJOB WHERE id=%s"""
result = run_sql(query, (job_id, ))
if 1 == result[0][0]:
return True
return False
def get_job(job_id):
"""Loads job from the database.
@param job_id: identifier of the job
@return: Job object containf information about the job
or None if the job does not exist"""
if not _exist_job(job_id):
return None
query = """SELECT id,
jobname,
jobfreq,
output_format,
DATE_FORMAT(lastrun,'%%Y-%%m-%%d %%H:%%i:%%s'),
output_directory
FROM expJOB WHERE id=%s"""
query_result = run_sql(query, (job_id,))
(id, name, frequency, output_format, last_run, output_directory) = query_result[0]
job = Job(id,
name,
frequency,
output_format,
convert_datetext_to_datestruct(last_run),
output_directory)
return job
def get_job_by_name(job_name):
"""Loads the first job with the given name found in database.
@param job_name: name of the job
@return: Job object containf information about the job
or None if the job does not exist"""
query = """SELECT id,
jobname,
jobfreq,
output_format,
DATE_FORMAT(lastrun,'%%Y-%%m-%%d %%H:%%i:%%s'),
output_directory
FROM expJOB WHERE jobname=%s"""
query_result = run_sql(query, (job_name,))
if 0 == len(query_result):
return None
(id, name, frequency, output_format, last_run, output_directory) = query_result[0]
job = Job(id,
name,
frequency,
output_format,
convert_datetext_to_datestruct(last_run),
output_directory)
return job
def get_all_jobs(user_id):
"""Loads all jobs from the database.
@param user_id: identifier of the user owning the jobs
@return: list of Job objects containing all the jobs
owned by the user given as a parameter"""
query = """SELECT expJOB.id,
expJOB.jobname,
expJOB.jobfreq,
expJOB.output_format,
DATE_FORMAT(expJOB.lastrun,'%%Y-%%m-%%d %%H:%%i:%%s'),
expJOB.output_directory
FROM expJOB
INNER JOIN user_expJOB
ON expJOB.id = user_expJOB.id_expJOB
WHERE user_expJOB.id_user = %s
AND expJOB.deleted = 0
"""
query_parameters = (user_id, )
query_result = run_sql(query, query_parameters)
all_jobs = []
for (job_id, name, frequency, output_format, last_run, output_directory) in query_result:
job = Job(job_id,
name,
frequency,
output_format,
convert_datetext_to_datestruct(last_run),
output_directory)
all_jobs.append(job)
return all_jobs
def _insert_job(user_id, job):
"""Inserts new job into database.
@param user_id: identifier of the user owning the job
@param job: Job object containing information about the job
@return: Returns the identifier of the job"""
job_id = run_sql("""INSERT INTO expJOB(jobname,
jobfreq,
output_format,
lastrun,
output_directory)
VALUES(%s, %s, %s, %s, %s)""",
(job.get_name(),
job.get_frequency(),
job.get_output_format(),
convert_datestruct_to_datetext(job.get_last_run()),
job.get_output_directory()
))
# create relation between job and user
run_sql("""INSERT INTO user_expJOB(id_user,
id_expJOB)
VALUES(%s, %s)""",
(user_id,
job_id
))
return job_id
def _update_job(job):
"""Updates data about existing job in the database.
@param job: Object containing information about the job.
"""
run_sql("""UPDATE expJOB SET jobname = %s,
jobfreq = %s,
output_format = %s,
lastrun = %s,
output_directory = %s
WHERE id=%s""",
(job.get_name(),
job.get_frequency(),
job.get_output_format(),
convert_datestruct_to_datetext(job.get_last_run()),
job.get_output_directory(),
job.get_id()
))
return job.get_id()
def save_query(query, job_id):
"""Saves query in database. If the query already exists it will be updated.
@param query: Object containing information about the query
@param job_id: identifier of the job, containing the query
@return: Returns the identifier of the query
"""
query_id = query.get_id()
if _exist_query(query_id):
return _update_query(query)
else:
return _insert_query(query, job_id)
def _exist_query(query_id):
"""Checks if a query exist in the database
@param query_id: identifier of the query
@return: True if the query exists, otherwise return False
"""
query = """SELECT COUNT(id) FROM expQUERY WHERE id=%s"""
query_parameters = (query_id, )
query_result = run_sql(query, query_parameters)
if 1 == query_result[0][0]:
return True
return False
def _insert_query(query, job_id):
"""Inserts new query into database.
@param query: Object containing information about the query
@param job_id: Identifier of the job owning the query
@return: Returns the identifier of the query"""
# WE always attach a query to a job. If the given job id
# does not exists it is an error
if not _exist_job(job_id):
raise FieldExporterDBException("There is no job with id %s" %(job_id,))
output_fields = ",".join(query.get_output_fields())
query_id = run_sql("""INSERT INTO expQUERY(name,
search_criteria,
output_fields,
notes)
VALUES(%s, %s, %s, %s)""",
(query.get_name(),
query.get_search_criteria(),
output_fields,
query.get_comment()
))
run_sql("""INSERT INTO expJOB_expQUERY(id_expJOB,
id_expQUERY)
VALUES(%s, %s)""",
(job_id,
query_id
))
return query_id
def _update_query(query):
"""Updates data about existing query in the database.
@param query: Object containing information about the query.
"""
output_fields = ",".join(query.get_output_fields())
run_sql("""UPDATE expQUERY SET name = %s,
search_criteria = %s,
output_fields = %s,
notes = %s
WHERE id=%s""",
(query.get_name(),
query.get_search_criteria(),
output_fields,
query.get_comment(),
query.get_id()
))
return query.get_id()
def get_job_queries(job_id):
"""Returns a list of all job queries
@param job_id: identifier of the job
@return: list of Query objects"""
query = """SELECT id,
expQUERY.name,
expQUERY.search_criteria,
expQUERY.output_fields,
expQUERY.notes
FROM expQUERY
INNER JOIN expJOB_expQUERY
ON expQUERY.id = expJOB_expQUERY.id_expQUERY
WHERE expJOB_expQUERY.id_expJOB = %s
AND expQUERY.deleted = 0
"""
query_parameters = (job_id, )
query_result = run_sql(query, query_parameters)
all_queries = []
for (query_id, name, search_criteria, output_fields, comment) in query_result:
output_fields_list = output_fields.split(",")
query = Query(query_id,
name,
search_criteria,
comment,
output_fields_list)
all_queries.append(query)
return all_queries
def get_query(query_id):
"""Loads query from the database.
@param query_id: identifier of the query
@return: Query object containf information about the query
or None if the query does not exist"""
if not _exist_query(query_id):
return None
query = """SELECT id,
name,
search_criteria,
output_fields,
notes
FROM expQUERY WHERE id=%s"""
query_parameters = (query_id, )
query_result = run_sql(query, query_parameters)
(id, name, search_criteria, output_fields_text, comment) = query_result[0]
output_fields = output_fields_text.split(",")
job_query = Query(id, name, search_criteria, comment, output_fields)
return job_query
def delete_query(query_id):
"""Deletes a query from the database
@param query_id: identifier of the query that has to be deleted
@return 1 if deletion was successful
"""
query = """UPDATE expQUERY SET deleted = 1 WHERE id=%s"""
query_parameters = (query_id, )
result = run_sql(query, query_parameters)
return int(result)
def save_job_result(job_result):
"""Saves a job result
@param job_result: JobResult object containing information about
the job and its result
@return: Returns the identifier of the job result
"""
#Save results in output directory
_save_job_result_in_output_directory(job_result)
# insert information about the job result in
# expJOBRESULT table
job_id = job_result.get_job().get_id()
execution_time = convert_datestruct_to_datetext(job_result.get_execution_date_time())
status = job_result.get_status()
status_message = job_result.get_status_message()
job_result_id = run_sql("""INSERT INTO expJOBRESULT(id_expJOB,
execution_time,
status,
status_message)
VALUES(%s, %s, %s, %s)""",
(job_id,
execution_time,
status,
status_message
))
query_results = job_result.get_query_results()
for current_query_result in query_results:
_insert_query_result(current_query_result, job_result_id)
return job_result_id
def _save_job_result_in_output_directory(job_result):
"""Saves a job result to the output directory of the job
if it is specified
@param job_result: JobResult object containing information about
the job and its result
"""
output_directory = job_result.get_job().get_output_directory()
if "" == output_directory or None == output_directory:
return
# remove the slash from the end of the path if exists
if output_directory[-1] == os.sep:
output_directory = output_directory[:-1]
# if directory does not exists then create it
if not os.path.exists(output_directory):
try:
os.makedirs(output_directory)
except(IOError, OSError), exception:
job_result.set_status(job_result.STATUS_CODE_ERROR)
job_result.set_status_message("Output directory %s does not exist and cannot be ctreated."
% (output_directory, ))
return
# if it is not path to a directory report an error
if not os.path.isdir(output_directory):
job_result.set_status(job_result.STATUS_CODE_ERROR)
job_result.add_status_message("%s is not a directory."
% (output_directory, ))
return
query_results = job_result.get_query_results()
output_format = job_result.get_job().get_output_format()
for current_query_result in query_results:
try:
_save_query_result_in_file(current_query_result, output_directory, output_format)
except (IOError, OSError), exception:
job_result.set_status(job_result.STATUS_CODE_ERROR)
job_result.add_status_message("Failed to write result in file for query " +
current_query_result.get_query().get_name())
def _save_query_result_in_file(query_result, output_directory, output_format):
"""Saves query result in a file in a specified directory
@param query_result: QueryResult object containing information about the query result
@param output_directory: path to a directory where the new file will be placed
"""
file_name = query_result.get_query().get_name()
path = output_directory + os.sep + file_name
print path
output_file = None
try:
output_file = open(path, "w")
text_to_write = query_result.get_result(output_format)
output_file.write(text_to_write)
finally:
if not output_file is None:
output_file.close()
def create_temporary_zip_file_with_job_result(job_result):
"""Creates temporary file containing the zipped content
of the job result and returns the path to the file.
The caller of the method is responsible for deleting the
temporary file when done with it.
@param job_result: job result that should be stored in the file
@return: the absolute path name to the temporary file where the infromation
is stored."""
# create temporary directory
path_to_temporary_directory = tempfile.mkdtemp()
# save the job result into the temporary directory
job_result.get_job().set_output_directory(path_to_temporary_directory)
_save_job_result_in_output_directory(job_result)
# create temporary file for zipping the content of the directory
(temp_zip_file, path_to_temp_zip_file) = tempfile.mkstemp(suffix = ".zip")
os.close(temp_zip_file)
# zip the content of the directory
_zip_directory_content_to_file(path_to_temporary_directory,
path_to_temp_zip_file)
# delete the temporary directory
shutil.rmtree(path_to_temporary_directory)
# return the path to the temporary file
return path_to_temp_zip_file
def _zip_directory_content_to_file(path_to_directory, file):
"""
Zips the whole content of a directory and adds it to
a file
@param path_to_directory: directory which content will be
added to the archive
@param file: path to a file (a string) or a file-like object
where content will be added.
"""
zip_file = zipfile.ZipFile(file = file, mode = "w")
_write_directory_to_zip_file(path_to_directory, zip_file)
zip_file.close()
def _write_directory_to_zip_file(path_to_directory, zip_file, arcname = ""):
"""Writes content of a directory to a zip file. If the directory
contains subdirectories they are also added in the archive
@param path_to_directory: directory which content will be
added to the archive
@param zip_file: ZipFile object where directory content will be written
@param arcname: archive name of the directory
"""
file_list = os.listdir(path_to_directory)
for current_file_name in file_list:
path_to_current_file = path_to_directory + os.sep + current_file_name
# add directly the files
if os.path.isfile(path_to_current_file):
zip_file.write(path_to_current_file, arcname + current_file_name)
# traverse recursively the directories and add their content
if os.path.isdir(path_to_current_file):
current_arcname = arcname + current_file_name + os.sep
current_path_to_directory = path_to_directory + os.sep + current_file_name
_write_directory_to_zip_file(current_path_to_directory, zip_file, current_arcname)
def get_all_job_result_ids(user_id):
"""Return list of the identifieres of all job reults.
The list is sorted in descending according to execution date of the jobs.
@param user_id: identifier of the user owning the jobs,
that produce the results
@return: list of identifiers (integer numbers)
of the results owned by the given user
"""
query = """SELECT expJOBRESULT.id
FROM expJOBRESULT
INNER JOIN user_expJOB
ON expJOBRESULT.id_expJOB = user_expJOB.id_expJOB
WHERE id_user = %s
ORDER BY execution_time DESC
"""
query_parameters = (user_id, )
query_result = run_sql(query, query_parameters)
all_job_ids = []
for (current_job_result_id, ) in query_result:
all_job_ids.append(current_job_result_id)
return all_job_ids
def get_job_results(result_identifiers):
"""Return a list of JobResult objects corresponding to identifiers
given as a parameter
@return: List of JobResult objects
The order of the results in the list is the same as their
corresponding identifiers in the input list
"""
job_results = []
for job_result_id in result_identifiers:
current_result = get_job_result(job_result_id)
job_results.append(current_result)
return job_results
def get_job_result(job_result_id):
"""Loads job result from the database.
@param job_result_id: identifier of the job result
@return: JobResult object containing information about the job result
or None if job result with this identifier does not exist"""
if not _exist_job_result(job_result_id):
return None
query = """SELECT id,
id_expJOB,
DATE_FORMAT(execution_time,'%%Y-%%m-%%d %%H:%%i:%%s'),
status,
status_message
FROM expJOBRESULT WHERE id=%s"""
query_result = run_sql(query, (job_result_id,))
(id, job_id, execution_date_time, status, status_message) = query_result[0]
job = get_job(job_id)
query_results = _get_query_results_for_job_result(id)
job_result = JobResult(job,
query_results,
convert_datetext_to_datestruct(execution_date_time),
id,
status,
status_message)
return job_result
def _get_query_results_for_job_result(job_result_id):
"""Retrieves all query results owned by a given job result.
@param job_result_id: identifier of the job result that owns the queryes.
@return: list of QueryReusult objects
contaning information about the query results
"""
query = """SELECT expQUERYRESULT.id,
expQUERYRESULT.id_expQUERY,
expQUERYRESULT.result,
expQUERYRESULT.status,
expQUERYRESULT.status_message
FROM expQUERYRESULT
INNER JOIN expJOBRESULT_expQUERYRESULT
ON expQUERYRESULT.id = expJOBRESULT_expQUERYRESULT.id_expQUERYRESULT
WHERE expJOBRESULT_expQUERYRESULT.id_expJOBRESULT = %s
"""
query_parameters = (job_result_id, )
query_result = run_sql(query, query_parameters)
print query_result
all_query_results = []
for (query_result_id, query_id, result, status, status_message) in query_result:
current_query = get_query(query_id)
current_result = QueryResult(current_query,
result,
query_result_id,
status,
status_message)
all_query_results.append(current_result)
return all_query_results
def is_user_owner_of_job(user_id, job_id):
"""Checks if a user is owner of a job
@param user_id: identifier of the user
@param job_id: identifier of the job
@return: True if user is owner of the job, otherwise return False
"""
query = """SELECT COUNT(id_user)
FROM user_expJOB
WHERE id_user=%s and id_expJOB=%s"""
result = run_sql(query, (user_id, job_id))
if 1 == result[0][0]:
return True
return False
def is_user_owner_of_job_result(user_id, job_result_id):
"""Checks if a user is owner of a job result
@param user_id: identifier of the user
@param job_result_id: identifier of the job result
@return: True if user is owner of the job result, otherwise return False
"""
job_result = get_job_result(job_result_id)
if None == job_result:
return False
job_id = job_result.get_job().get_id()
return is_user_owner_of_job(user_id, job_id)
def is_user_owner_of_query(user_id, query_id):
"""Checks if a user is owner of a query
@param user_id: identifier of the user
@param job_query_id: identifier of the query
@return: True if user is owner of the query, otherwise return False
"""
query = """SELECT COUNT(user_expJOB.id_user)
FROM user_expJOB
INNER JOIN expJOB_expQUERY
ON user_expJOB.id_expJOB = expJOB_expQUERY.id_expJOB
WHERE id_user = %s AND expJOB_expQUERY.id_expQUERY = %s
"""
result = run_sql(query, (user_id, query_id))
if 1 == result[0][0]:
return True
return False
def _insert_query_result(query_result, job_result_id):
"""Inserts new query result into database.
@param query_result: QueryResult object containing
information about the query result
@param job_result_id: Identifier of the job result owning the query result
@return: Returns the identifier of the query result"""
# WE always attach a query to a job. If the given job id
# does not exists it is an error
if not _exist_job_result(job_result_id):
raise FieldExporterDBException("There is no job result with id %s"
%(job_result_id,))
query_id = query_result.get_query().get_id()
result = query_result.get_result()
status = query_result.get_status()
status_message = query_result.get_status_message()
query_result_id = run_sql("""INSERT INTO expQUERYRESULT(id_expQUERY,
result,
status,
status_message)
VALUES(%s, %s, %s, %s)""",
(query_id,
result,
status,
status_message
))
run_sql("""INSERT INTO expJOBRESULT_expQUERYRESULT(id_expJOBRESULT,
id_expQUERYRESULT)
VALUES(%s, %s)""",
(job_result_id,
query_result_id
))
return query_result_id
def _exist_job_result(job_result_id):
"""Checks if a job result exist in the database
@param job_result_id: identifier of the job result
@return: True if the job result exists, otherwise return False
"""
query = """SELECT COUNT(id) FROM expJOBRESULT WHERE id=%s"""
result = run_sql(query, (job_result_id, ))
if 1 == result[0][0]:
return True
return False
|
gpl-2.0
|
vacancy/TensorArtist
|
docs/conf.py
|
1
|
9615
|
# -*- coding: utf-8 -*-
#
# TensorArtist documentation build configuration file, created by
# sphinx-quickstart on Sat Mar 25 17:18:31 2017.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
sys.path.insert(0, '../')
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.mathjax',
'sphinx.ext.ifconfig',
'sphinx.ext.viewcode',
'sphinx.ext.napoleon'
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'TensorArtist'
copyright = u'2017, Jiayuan Mao, Honghua Dong'
author = u'Jiayuan Mao, Honghua Dong'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = u'0.4.1a'
# The full version, including alpha/beta/rc tags.
release = u'0.4.1a'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'classic'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents.
# "<project> v<release> documentation" by default.
#html_title = u'TensorArtist v0.3.1a'
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (relative to this directory) to use as a favicon of
# the docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not None, a 'Last updated on:' timestamp is inserted at every page
# bottom, using the given strftime format.
# The empty string is equivalent to '%b %d, %Y'.
#html_last_updated_fmt = None
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr', 'zh'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# 'ja' uses this config value.
# 'zh' user can custom change `jieba` dictionary path.
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'TensorArtistdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'TensorArtist.tex', u'TensorArtist Documentation',
u'Jiayuan Mao, Honghua Dong', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'tensorartist', u'TensorArtist Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'TensorArtist', u'TensorArtist Documentation',
author, 'TensorArtist', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
|
mit
|
tlksio/tlksio
|
env/lib/python3.4/site-packages/django/contrib/auth/handlers/modwsgi.py
|
54
|
1303
|
from django import db
from django.contrib import auth
from django.utils.encoding import force_bytes
UserModel = auth.get_user_model()
def check_password(environ, username, password):
"""
Authenticates against Django's auth database
mod_wsgi docs specify None, True, False as return value depending
on whether the user exists and authenticates.
"""
# db connection state is managed similarly to the wsgi handler
# as mod_wsgi may call these functions outside of a request/response cycle
db.reset_queries()
try:
try:
user = UserModel._default_manager.get_by_natural_key(username)
except UserModel.DoesNotExist:
return None
if not user.is_active:
return None
return user.check_password(password)
finally:
db.close_old_connections()
def groups_for_user(environ, username):
"""
Authorizes a user based on groups
"""
db.reset_queries()
try:
try:
user = UserModel._default_manager.get_by_natural_key(username)
except UserModel.DoesNotExist:
return []
if not user.is_active:
return []
return [force_bytes(group.name) for group in user.groups.all()]
finally:
db.close_old_connections()
|
mit
|
chhans/tor-automation
|
capture.py
|
1
|
3679
|
from selenium import webdriver
from torProfile import TorProfile
from pyvirtualdisplay import Display
import selenium
from random import randint
import subprocess
import os
import signal
import sys
import time
training_data = "Dumps/training/"
experiment_data = "Dumps/experiment/"
iface = "eth1"
sleep_time = 2.0
load_timeout = 120.0
def createPageList(in_file, n, random):
with open(in_file, "r") as f:
if random:
sites = [""]*n
all_sites = [next(f).split(",")[1].rstrip() for x in xrange(1000)]
for i in range(n):
sites[i] = all_sites[randint(0, n-1)]
else:
sites = [next(f).split(",")[1].rstrip() for x in xrange(n)]
f.close()
return sites
def mkdir(dir):
if not os.path.exists(dir):
os.makedirs(dir)
def getFilename(dir):
max_i = -1
try:
max_i = max([int(x.split(".")[0]) for x in os.listdir(dir)])
except:
pass
return "%d.cap" % (max_i + 1)
def startTshark(f_path):
command = "tshark -f tcp -i %s -w %s" % (iface, f_path)
FNULL = open(os.devnull, 'w')
tshark_proc = subprocess.Popen(command, stdout=FNULL, close_fds=True, stderr=subprocess.PIPE, shell=True, preexec_fn=os.setsid)
return tshark_proc.pid
def stopTshark(pid):
try:
os.killpg(pid, signal.SIGTERM)
except:
print "Could not stop tshark process"
FNULL = open(os.devnull, 'w')
subprocess.Popen("killall tshark", stdout=FNULL, close_fds=True, stderr=subprocess.PIPE, shell=True, preexec_fn=os.setsid)
def loadPage(url):
driver = webdriver.Firefox(firefox_profile=TorProfile().p)
driver.set_page_load_timeout(load_timeout)
try:
driver.get("http://"+url)
driver.close()
time.sleep(sleep_time)
except selenium.common.exceptions.TimeoutException:
print "Error lading page: timed out"
time.sleep(sleep_time)
driver.close()
return -1
except (KeyboardInterrupt, SystemExit):
driver.close()
raise
except:
print "Unexpected error when loading page:", sys.exc_info()[0]
time.sleep(sleep_time)
driver.close()
raise
def removeFile(f_path):
os.remove(f_path)
def capturePage(folder, page):
# Create directory for page
folder = folder + page.split("/")[0]
mkdir(folder)
f_path = "%s/%s" % (folder, getFilename(folder))
tshark_pid = startTshark(f_path)
try:
s = loadPage(page)
stopTshark(tshark_pid)
if s == -1:
removeFile(f_path)
except (KeyboardInterrupt, SystemExit):
stopTshark(tshark_pid)
removeFile(f_path)
sys.exit()
except:
print "Unexpected error when capturing website:", sys.exc_info()[0]
stopTshark(tshark_pid)
removeFile(f_path)
raise
if __name__ == "__main__":
manual = False
try:
plist = sys.argv[1]
if plist == "manual":
manual = True
elif not os.path.isfile(plist):
print "ERROR: File %s not found" % plist
raise
else:
n = int(sys.argv[2])
iface = sys.argv[3]
t = int(sys.argv[4])
except:
print "Usage:\tpython %s <web page list> <number of pages to visit> <network interface> <training data (0/1)> OR" % sys.argv[0]
print "\tpython %s manual <network interface> <training_data (0/1)> <web page(s)>" % sys.argv[0]
print "Example: python %s alexa.csv 100 eth1 1 (capture training data from the first 100 pages of list alexa.csv on eth1)" % sys.argv[0]
sys.exit()
if manual:
iface = sys.argv[2]
t = int(sys.argv[3])
page_list = []
cnt = 4
while True:
try:
page_list.append(sys.argv[cnt])
cnt += 1
except:
break
else:
page_list = createPageList(plist, n, False)
display = Display(visible=0, size=(800, 600))
display.start()
p = training_data if t else experiment_data
for i,page in enumerate(page_list):
print "Capturing web page %d/%d: %s" % (i+1, len(page_list), page)
capturePage(p, page)
display.stop()
|
mit
|
MSOpenTech/edx-platform
|
common/djangoapps/student/management/commands/create_user.py
|
167
|
4361
|
from optparse import make_option
from django.conf import settings
from django.contrib.auth.models import User
from django.core.management.base import BaseCommand
from django.utils import translation
from opaque_keys import InvalidKeyError
from opaque_keys.edx.keys import CourseKey
from opaque_keys.edx.locations import SlashSeparatedCourseKey
from student.forms import AccountCreationForm
from student.models import CourseEnrollment, create_comments_service_user
from student.views import _do_create_account, AccountValidationError
from track.management.tracked_command import TrackedCommand
class Command(TrackedCommand):
help = """
This command creates and registers a user in a given course
as "audit", "verified" or "honor".
example:
# Enroll a user [email protected] into the demo course
# The username and name will default to "test"
manage.py ... create_user -e [email protected] -p insecure -c edX/Open_DemoX/edx_demo_course -m verified
"""
option_list = BaseCommand.option_list + (
make_option('-m', '--mode',
metavar='ENROLLMENT_MODE',
dest='mode',
default='honor',
choices=('audit', 'verified', 'honor'),
help='Enrollment type for user for a specific course'),
make_option('-u', '--username',
metavar='USERNAME',
dest='username',
default=None,
help='Username, defaults to "user" in the email'),
make_option('-n', '--name',
metavar='NAME',
dest='name',
default=None,
help='Name, defaults to "user" in the email'),
make_option('-p', '--password',
metavar='PASSWORD',
dest='password',
default=None,
help='Password for user'),
make_option('-e', '--email',
metavar='EMAIL',
dest='email',
default=None,
help='Email for user'),
make_option('-c', '--course',
metavar='COURSE_ID',
dest='course',
default=None,
help='course to enroll the user in (optional)'),
make_option('-s', '--staff',
dest='staff',
default=False,
action='store_true',
help='give user the staff bit'),
)
def handle(self, *args, **options):
username = options['username']
name = options['name']
if not username:
username = options['email'].split('@')[0]
if not name:
name = options['email'].split('@')[0]
# parse out the course into a coursekey
if options['course']:
try:
course = CourseKey.from_string(options['course'])
# if it's not a new-style course key, parse it from an old-style
# course key
except InvalidKeyError:
course = SlashSeparatedCourseKey.from_deprecated_string(options['course'])
form = AccountCreationForm(
data={
'username': username,
'email': options['email'],
'password': options['password'],
'name': name,
},
tos_required=False
)
# django.utils.translation.get_language() will be used to set the new
# user's preferred language. This line ensures that the result will
# match this installation's default locale. Otherwise, inside a
# management command, it will always return "en-us".
translation.activate(settings.LANGUAGE_CODE)
try:
user, _, reg = _do_create_account(form)
if options['staff']:
user.is_staff = True
user.save()
reg.activate()
reg.save()
create_comments_service_user(user)
except AccountValidationError as e:
print e.message
user = User.objects.get(email=options['email'])
if options['course']:
CourseEnrollment.enroll(user, course, mode=options['mode'])
translation.deactivate()
|
agpl-3.0
|
adammaikai/OmicsPipe2.0
|
build/lib.linux-x86_64-2.7/omics_pipe/modules/htseq.py
|
2
|
1361
|
#!/usr/bin/env python
from omics_pipe.parameters.default_parameters import default_parameters
from omics_pipe.utils import *
p = Bunch(default_parameters)
def htseq(sample, htseq_flag):
'''Runs htseq-count to get raw count data from alignments.
input:
Aligned.out.sort.bam
output:
counts.txt
citation:
Simon Anders, EMBL
link:
http://www-huber.embl.de/users/anders/HTSeq/doc/overview.html
parameters from parameters file:
STAR_RESULTS:
HTSEQ_OPTIONS:
REF_GENES:
HTSEQ_RESULTS:
TEMP_DIR:
SAMTOOLS_VERSION:
BAM_FILE_NAME:
PYTHON_VERSION:
'''
spawn_job(jobname = 'htseq', SAMPLE = sample, LOG_PATH = p.LOG_PATH, RESULTS_EMAIL = p.RESULTS_EMAIL, SCHEDULER = p.SCHEDULER, walltime = "120:00:00", queue = p.QUEUE, nodes = 1, ppn = 32, memory = "40gb", script = "/htseq_drmaa.sh", args_list = [sample,p.STAR_RESULTS,p.HTSEQ_OPTIONS,p.REF_GENES,p.HTSEQ_RESULTS,p.TEMP_DIR,p.SAMTOOLS_VERSION, p.BAM_FILE_NAME, p.PYTHON_VERSION])
job_status(jobname = 'htseq', resultspath = p.HTSEQ_RESULTS, SAMPLE = sample, outputfilename = sample + "_counts.txt", FLAG_PATH = p.FLAG_PATH)
return
if __name__ == '__main__':
htseq(sample, htseq_flag)
sys.exit(0)
|
mit
|
sam-tsai/django-old
|
django/core/validators.py
|
158
|
6583
|
import re
import urlparse
from django.core.exceptions import ValidationError
from django.utils.translation import ugettext_lazy as _
from django.utils.encoding import smart_unicode
# These values, if given to validate(), will trigger the self.required check.
EMPTY_VALUES = (None, '', [], (), {})
try:
from django.conf import settings
URL_VALIDATOR_USER_AGENT = settings.URL_VALIDATOR_USER_AGENT
except ImportError:
# It's OK if Django settings aren't configured.
URL_VALIDATOR_USER_AGENT = 'Django (http://www.djangoproject.com/)'
class RegexValidator(object):
regex = ''
message = _(u'Enter a valid value.')
code = 'invalid'
def __init__(self, regex=None, message=None, code=None):
if regex is not None:
self.regex = regex
if message is not None:
self.message = message
if code is not None:
self.code = code
if isinstance(self.regex, basestring):
self.regex = re.compile(regex)
def __call__(self, value):
"""
Validates that the input matches the regular expression.
"""
if not self.regex.search(smart_unicode(value)):
raise ValidationError(self.message, code=self.code)
class URLValidator(RegexValidator):
regex = re.compile(
r'^https?://' # http:// or https://
r'(?:(?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\.)+[A-Z]{2,6}\.?|' #domain...
r'localhost|' #localhost...
r'\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})' # ...or ip
r'(?::\d+)?' # optional port
r'(?:/?|[/?]\S+)$', re.IGNORECASE)
def __init__(self, verify_exists=False, validator_user_agent=URL_VALIDATOR_USER_AGENT):
super(URLValidator, self).__init__()
self.verify_exists = verify_exists
self.user_agent = validator_user_agent
def __call__(self, value):
try:
super(URLValidator, self).__call__(value)
except ValidationError, e:
# Trivial case failed. Try for possible IDN domain
if value:
value = smart_unicode(value)
scheme, netloc, path, query, fragment = urlparse.urlsplit(value)
try:
netloc = netloc.encode('idna') # IDN -> ACE
except UnicodeError: # invalid domain part
raise e
url = urlparse.urlunsplit((scheme, netloc, path, query, fragment))
super(URLValidator, self).__call__(url)
else:
raise
else:
url = value
if self.verify_exists:
import urllib2
headers = {
"Accept": "text/xml,application/xml,application/xhtml+xml,text/html;q=0.9,text/plain;q=0.8,image/png,*/*;q=0.5",
"Accept-Language": "en-us,en;q=0.5",
"Accept-Charset": "ISO-8859-1,utf-8;q=0.7,*;q=0.7",
"Connection": "close",
"User-Agent": self.user_agent,
}
try:
req = urllib2.Request(url, None, headers)
u = urllib2.urlopen(req)
except ValueError:
raise ValidationError(_(u'Enter a valid URL.'), code='invalid')
except: # urllib2.URLError, httplib.InvalidURL, etc.
raise ValidationError(_(u'This URL appears to be a broken link.'), code='invalid_link')
def validate_integer(value):
try:
int(value)
except (ValueError, TypeError), e:
raise ValidationError('')
class EmailValidator(RegexValidator):
def __call__(self, value):
try:
super(EmailValidator, self).__call__(value)
except ValidationError, e:
# Trivial case failed. Try for possible IDN domain-part
if value and u'@' in value:
parts = value.split(u'@')
domain_part = parts[-1]
try:
parts[-1] = parts[-1].encode('idna')
except UnicodeError:
raise e
super(EmailValidator, self).__call__(u'@'.join(parts))
else:
raise
email_re = re.compile(
r"(^[-!#$%&'*+/=?^_`{}|~0-9A-Z]+(\.[-!#$%&'*+/=?^_`{}|~0-9A-Z]+)*" # dot-atom
r'|^"([\001-\010\013\014\016-\037!#-\[\]-\177]|\\[\001-011\013\014\016-\177])*"' # quoted-string
r')@(?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\.)+[A-Z]{2,6}\.?$', re.IGNORECASE) # domain
validate_email = EmailValidator(email_re, _(u'Enter a valid e-mail address.'), 'invalid')
slug_re = re.compile(r'^[-\w]+$')
validate_slug = RegexValidator(slug_re, _(u"Enter a valid 'slug' consisting of letters, numbers, underscores or hyphens."), 'invalid')
ipv4_re = re.compile(r'^(25[0-5]|2[0-4]\d|[0-1]?\d?\d)(\.(25[0-5]|2[0-4]\d|[0-1]?\d?\d)){3}$')
validate_ipv4_address = RegexValidator(ipv4_re, _(u'Enter a valid IPv4 address.'), 'invalid')
comma_separated_int_list_re = re.compile('^[\d,]+$')
validate_comma_separated_integer_list = RegexValidator(comma_separated_int_list_re, _(u'Enter only digits separated by commas.'), 'invalid')
class BaseValidator(object):
compare = lambda self, a, b: a is not b
clean = lambda self, x: x
message = _(u'Ensure this value is %(limit_value)s (it is %(show_value)s).')
code = 'limit_value'
def __init__(self, limit_value):
self.limit_value = limit_value
def __call__(self, value):
cleaned = self.clean(value)
params = {'limit_value': self.limit_value, 'show_value': cleaned}
if self.compare(cleaned, self.limit_value):
raise ValidationError(
self.message % params,
code=self.code,
params=params,
)
class MaxValueValidator(BaseValidator):
compare = lambda self, a, b: a > b
message = _(u'Ensure this value is less than or equal to %(limit_value)s.')
code = 'max_value'
class MinValueValidator(BaseValidator):
compare = lambda self, a, b: a < b
message = _(u'Ensure this value is greater than or equal to %(limit_value)s.')
code = 'min_value'
class MinLengthValidator(BaseValidator):
compare = lambda self, a, b: a < b
clean = lambda self, x: len(x)
message = _(u'Ensure this value has at least %(limit_value)d characters (it has %(show_value)d).')
code = 'min_length'
class MaxLengthValidator(BaseValidator):
compare = lambda self, a, b: a > b
clean = lambda self, x: len(x)
message = _(u'Ensure this value has at most %(limit_value)d characters (it has %(show_value)d).')
code = 'max_length'
|
bsd-3-clause
|
rhndg/openedx
|
cms/djangoapps/contentstore/tests/test_orphan.py
|
77
|
4598
|
"""
Test finding orphans via the view and django config
"""
import json
from contentstore.tests.utils import CourseTestCase
from student.models import CourseEnrollment
from contentstore.utils import reverse_course_url
class TestOrphanBase(CourseTestCase):
"""
Base class for Studio tests that require orphaned modules
"""
def setUp(self):
super(TestOrphanBase, self).setUp()
# create chapters and add them to course tree
chapter1 = self.store.create_child(self.user.id, self.course.location, 'chapter', "Chapter1")
self.store.publish(chapter1.location, self.user.id)
chapter2 = self.store.create_child(self.user.id, self.course.location, 'chapter', "Chapter2")
self.store.publish(chapter2.location, self.user.id)
# orphan chapter
orphan_chapter = self.store.create_item(self.user.id, self.course.id, 'chapter', "OrphanChapter")
self.store.publish(orphan_chapter.location, self.user.id)
# create vertical and add it as child to chapter1
vertical1 = self.store.create_child(self.user.id, chapter1.location, 'vertical', "Vertical1")
self.store.publish(vertical1.location, self.user.id)
# create orphan vertical
orphan_vertical = self.store.create_item(self.user.id, self.course.id, 'vertical', "OrphanVert")
self.store.publish(orphan_vertical.location, self.user.id)
# create component and add it to vertical1
html1 = self.store.create_child(self.user.id, vertical1.location, 'html', "Html1")
self.store.publish(html1.location, self.user.id)
# create component and add it as a child to vertical1 and orphan_vertical
multi_parent_html = self.store.create_child(self.user.id, vertical1.location, 'html', "multi_parent_html")
self.store.publish(multi_parent_html.location, self.user.id)
orphan_vertical.children.append(multi_parent_html.location)
self.store.update_item(orphan_vertical, self.user.id)
# create an orphaned html module
orphan_html = self.store.create_item(self.user.id, self.course.id, 'html', "OrphanHtml")
self.store.publish(orphan_html.location, self.user.id)
self.store.create_child(self.user.id, self.course.location, 'static_tab', "staticuno")
self.store.create_child(self.user.id, self.course.location, 'about', "overview")
self.store.create_child(self.user.id, self.course.location, 'course_info', "updates")
class TestOrphan(TestOrphanBase):
"""
Test finding orphans via view and django config
"""
def setUp(self):
super(TestOrphan, self).setUp()
self.orphan_url = reverse_course_url('orphan_handler', self.course.id)
def test_mongo_orphan(self):
"""
Test that old mongo finds the orphans
"""
orphans = json.loads(
self.client.get(
self.orphan_url,
HTTP_ACCEPT='application/json'
).content
)
self.assertEqual(len(orphans), 3, "Wrong # {}".format(orphans))
location = self.course.location.replace(category='chapter', name='OrphanChapter')
self.assertIn(location.to_deprecated_string(), orphans)
location = self.course.location.replace(category='vertical', name='OrphanVert')
self.assertIn(location.to_deprecated_string(), orphans)
location = self.course.location.replace(category='html', name='OrphanHtml')
self.assertIn(location.to_deprecated_string(), orphans)
def test_mongo_orphan_delete(self):
"""
Test that old mongo deletes the orphans
"""
self.client.delete(self.orphan_url)
orphans = json.loads(
self.client.get(self.orphan_url, HTTP_ACCEPT='application/json').content
)
self.assertEqual(len(orphans), 0, "Orphans not deleted {}".format(orphans))
# make sure that any children with one orphan parent and one non-orphan
# parent are not deleted
self.assertTrue(self.store.has_item(self.course.id.make_usage_key('html', "multi_parent_html")))
def test_not_permitted(self):
"""
Test that auth restricts get and delete appropriately
"""
test_user_client, test_user = self.create_non_staff_authed_user_client()
CourseEnrollment.enroll(test_user, self.course.id)
response = test_user_client.get(self.orphan_url)
self.assertEqual(response.status_code, 403)
response = test_user_client.delete(self.orphan_url)
self.assertEqual(response.status_code, 403)
|
agpl-3.0
|
toontownfunserver/Panda3D-1.9.0
|
python/Tools/Scripts/h2py.py
|
21
|
5962
|
#! /usr/bin/env python
# Read #define's and translate to Python code.
# Handle #include statements.
# Handle #define macros with one argument.
# Anything that isn't recognized or doesn't translate into valid
# Python is ignored.
# Without filename arguments, acts as a filter.
# If one or more filenames are given, output is written to corresponding
# filenames in the local directory, translated to all uppercase, with
# the extension replaced by ".py".
# By passing one or more options of the form "-i regular_expression"
# you can specify additional strings to be ignored. This is useful
# e.g. to ignore casts to u_long: simply specify "-i '(u_long)'".
# XXX To do:
# - turn trailing C comments into Python comments
# - turn C Boolean operators "&& || !" into Python "and or not"
# - what to do about #if(def)?
# - what to do about macros with multiple parameters?
import sys, re, getopt, os
p_define = re.compile('^[\t ]*#[\t ]*define[\t ]+([a-zA-Z0-9_]+)[\t ]+')
p_macro = re.compile(
'^[\t ]*#[\t ]*define[\t ]+'
'([a-zA-Z0-9_]+)\(([_a-zA-Z][_a-zA-Z0-9]*)\)[\t ]+')
p_include = re.compile('^[\t ]*#[\t ]*include[\t ]+<([a-zA-Z0-9_/\.]+)')
p_comment = re.compile(r'/\*([^*]+|\*+[^/])*(\*+/)?')
p_cpp_comment = re.compile('//.*')
ignores = [p_comment, p_cpp_comment]
p_char = re.compile(r"'(\\.[^\\]*|[^\\])'")
p_hex = re.compile(r"0x([0-9a-fA-F]+)L?")
filedict = {}
importable = {}
try:
searchdirs=os.environ['include'].split(';')
except KeyError:
try:
searchdirs=os.environ['INCLUDE'].split(';')
except KeyError:
try:
if sys.platform.find("beos") == 0:
searchdirs=os.environ['BEINCLUDES'].split(';')
elif sys.platform.startswith("atheos"):
searchdirs=os.environ['C_INCLUDE_PATH'].split(':')
else:
raise KeyError
except KeyError:
searchdirs=['/usr/include']
try:
searchdirs.insert(0, os.path.join('/usr/include',
os.environ['MULTIARCH']))
except KeyError:
pass
def main():
global filedict
opts, args = getopt.getopt(sys.argv[1:], 'i:')
for o, a in opts:
if o == '-i':
ignores.append(re.compile(a))
if not args:
args = ['-']
for filename in args:
if filename == '-':
sys.stdout.write('# Generated by h2py from stdin\n')
process(sys.stdin, sys.stdout)
else:
fp = open(filename, 'r')
outfile = os.path.basename(filename)
i = outfile.rfind('.')
if i > 0: outfile = outfile[:i]
modname = outfile.upper()
outfile = modname + '.py'
outfp = open(outfile, 'w')
outfp.write('# Generated by h2py from %s\n' % filename)
filedict = {}
for dir in searchdirs:
if filename[:len(dir)] == dir:
filedict[filename[len(dir)+1:]] = None # no '/' trailing
importable[filename[len(dir)+1:]] = modname
break
process(fp, outfp)
outfp.close()
fp.close()
def pytify(body):
# replace ignored patterns by spaces
for p in ignores:
body = p.sub(' ', body)
# replace char literals by ord(...)
body = p_char.sub("ord('\\1')", body)
# Compute negative hexadecimal constants
start = 0
UMAX = 2*(sys.maxint+1)
while 1:
m = p_hex.search(body, start)
if not m: break
s,e = m.span()
val = long(body[slice(*m.span(1))], 16)
if val > sys.maxint:
val -= UMAX
body = body[:s] + "(" + str(val) + ")" + body[e:]
start = s + 1
return body
def process(fp, outfp, env = {}):
lineno = 0
while 1:
line = fp.readline()
if not line: break
lineno = lineno + 1
match = p_define.match(line)
if match:
# gobble up continuation lines
while line[-2:] == '\\\n':
nextline = fp.readline()
if not nextline: break
lineno = lineno + 1
line = line + nextline
name = match.group(1)
body = line[match.end():]
body = pytify(body)
ok = 0
stmt = '%s = %s\n' % (name, body.strip())
try:
exec stmt in env
except:
sys.stderr.write('Skipping: %s' % stmt)
else:
outfp.write(stmt)
match = p_macro.match(line)
if match:
macro, arg = match.group(1, 2)
body = line[match.end():]
body = pytify(body)
stmt = 'def %s(%s): return %s\n' % (macro, arg, body)
try:
exec stmt in env
except:
sys.stderr.write('Skipping: %s' % stmt)
else:
outfp.write(stmt)
match = p_include.match(line)
if match:
regs = match.regs
a, b = regs[1]
filename = line[a:b]
if importable.has_key(filename):
outfp.write('from %s import *\n' % importable[filename])
elif not filedict.has_key(filename):
filedict[filename] = None
inclfp = None
for dir in searchdirs:
try:
inclfp = open(dir + '/' + filename)
break
except IOError:
pass
if inclfp:
outfp.write(
'\n# Included from %s\n' % filename)
process(inclfp, outfp, env)
else:
sys.stderr.write('Warning - could not find file %s\n' %
filename)
if __name__ == '__main__':
main()
|
bsd-3-clause
|
fritzstauff/trigger
|
trigger/contrib/xmlrpc/server.py
|
13
|
7760
|
"""
Trigger Twisted XMLRPC server with an SSH manhole. Supports SSL.
This provides a daemonized Twisted reactor loop, Trigger and client
applications do not have to co-habitate. Using the XMLRPC server model, all
Trigger compatibility tasks can be executed using simple XMLRPC clients that
call the appropriate method with arguments on the local XMLRPC server instance.
New methods can be added by way of plugins.
See ``examples/xmlrpc_server`` in the Trigger source distribution for a simple
usage example.
"""
import os
import sys
import types
from trigger.contrib.commando import CommandoApplication
from trigger.utils import importlib
from twisted.internet import defer
from twisted.python import log
from twisted.web import xmlrpc, server
# Enable Deferred debuging if ``DEBUG`` is set.
if os.getenv('DEBUG'):
defer.setDebugging(True)
class TriggerXMLRPCServer(xmlrpc.XMLRPC):
"""
Twisted XMLRPC server front-end for Commando
"""
def __init__(self, *args, **kwargs):
xmlrpc.XMLRPC.__init__(self, *args, **kwargs)
self.allowNone = True
self.useDateTime = True
self._handlers = []
self._procedure_map = {}
self.addHandlers(self._handlers)
def lookupProcedure(self, procedurePath):
"""
Lookup a method dynamically.
1. First, see if it's provided by a sub-handler.
2. Or try a self-defined method (prefixed with `xmlrpc_`)
3. Lastly, try dynamically mapped methods.
4. Or fail loudly.
"""
log.msg("LOOKING UP:", procedurePath)
if procedurePath.find(self.separator) != -1:
prefix, procedurePath = procedurePath.split(self.separator, 1)
handler = self.getSubHandler(prefix)
if handler is None:
raise xmlrpc.NoSuchFunction(self.NOT_FOUND,
"no such subHandler %s" % prefix)
return handler.lookupProcedure(procedurePath)
# Try self-defined methods first...
f = getattr(self, "xmlrpc_%s" % procedurePath, None)
# Try mapped methods second...
if f is None:
f = self._procedure_map.get(procedurePath, None)
if not f:
raise xmlrpc.NoSuchFunction(self.NOT_FOUND,
"procedure %s not found" % procedurePath)
elif not callable(f):
raise xmlrpc.NoSuchFunction(self.NOT_FOUND,
"procedure %s not callable" % procedurePath)
else:
return f
def addHandlers(self, handlers):
"""Add multiple handlers"""
for handler in handlers:
self.addHandler(handler)
def addHandler(self, handler):
"""
Add a handler and bind it to an XMLRPC procedure.
Handler must a be a function or an instance of an object with handler
methods.
"""
# Register it
log.msg("Adding handler: %s" % handler)
self._handlers.append(handler)
# If it's a function, bind it as its own internal name.
if type(handler) in (types.BuiltinFunctionType, types.FunctionType):
name = handler.__name__
if name.startswith('xmlrpc_'):
name = name[7:] # If it starts w/ 'xmlrpc_', slice it out!
log.msg("Mapping function %s..." % name)
self._procedure_map[name] = handler
return None
# Otherwise, walk the methods on any class objects and bind them by
# their attribute name.
for method in dir(handler):
if not method.startswith('_'):
log.msg("Mapping method %s..." % method)
self._procedure_map[method] = getattr(handler, method)
def listProcedures(self):
"""Return a list of the registered procedures"""
return self._procedure_map.keys()
def xmlrpc_add_handler(self, mod_name, task_name, force=False):
"""
Add a handler object from a remote call.
"""
module = None
if mod_name in sys.modules:
# Check if module is already loaded
if force:
log.msg("Forcing reload of handler: %r" % task_name)
# Allow user to force reload of module
module = reload(sys.modules[mod_name])
else:
# If not forcing reload, don't bother with the rest
log.msg("%r already loaded" % mod_name)
return None
else:
log.msg("Trying to add handler: %r" % task_name)
try:
module = importlib.import_module(mod_name, __name__)
except NameError as msg:
log.msg('NameError: %s' % msg)
except:
pass
if not module:
log.msg(" Unable to load module: %s" % mod_name)
return None
else:
handler = getattr(module, 'xmlrpc_' + task_name)
# XMLRPC methods will not accept kwargs. Instead, we pass 2 position
# args: args and kwargs, to a shell method (dummy) that will explode
# them when sending to the user defined method (handler).
def dummy(self, args, kwargs):
return handler(*args, **kwargs)
# TODO (jathan): Make this work!!
# This just simply does not work. I am not sure why, but it results in a
# "<Fault 8001: 'procedure config_device not found'>" error!
# # Bind the dummy shell method to TriggerXMLRPCServer. The function's
# # name will be used to map it to the "dummy" handler object.
# dummy.__name__ = task_name
# self.addHandler(dummy)
# This does work.
# Bind the dummy shell method to TriggerXMLRPCServer as 'xmlrpc_' + task_name
setattr(TriggerXMLRPCServer, 'xmlrpc_' + task_name, dummy)
def xmlrpc_list_subhandlers(self):
return list(self.subHandlers)
def xmlrpc_execute_commands(self, args, kwargs):
"""Execute ``commands`` on ``devices``"""
c = CommandoApplication(*args, **kwargs)
d = c.run()
return d
def xmlrpc_add(self, x, y):
"""Adds x and y"""
return x + y
def xmlrpc_fault(self):
"""
Raise a Fault indicating that the procedure should not be used.
"""
raise xmlrpc.Fault(123, "The fault procedure is faulty.")
def _ebRender(self, failure):
"""
Custom exception rendering.
Ref: https://netzguerilla.net/iro/dev/_modules/iro/view/xmlrpc.html
"""
if isinstance(failure.value, Exception):
msg = """%s: %s""" % (failure.type.__name__, failure.value.args[0])
return xmlrpc.Fault(400, msg)
return super(TriggerXMLRPCServer, self)._ebRender(self, failure)
# XXX (jathan): Note that this is out-of-sync w/ the twistd plugin and is
# probably broken.
def main():
"""To daemonize as a twistd plugin! Except this doesn't work and these"""
from twisted.application.internet import TCPServer, SSLServer
from twisted.application.service import Application
from twisted.internet import ssl
rpc = TriggerXMLRPCServer()
xmlrpc.addIntrospection(rpc)
server_factory = server.Site(rpc)
application = Application('trigger_xmlrpc')
#xmlrpc_service = TCPServer(8000, server_factory)
ctx = ssl.DefaultOpenSSLContextFactory('server.key', 'cacert.pem')
xmlrpc_service = SSLServer(8000, server_factory, ctx)
xmlrpc_service.setServiceParent(application)
return application
if __name__ == '__main__':
# To run me as a daemon:
# twistd -l server.log --pidfile server.pid -y server.py
application = main()
|
bsd-3-clause
|
ya7lelkom/googleads-python-lib
|
examples/dfp/v201411/proposal_line_item_service/get_all_proposal_line_items.py
|
4
|
1989
|
#!/usr/bin/python
#
# Copyright 2014 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This code example gets all proposal line items.
To create proposal line items, run create_proposal_line_items.py.
"""
__author__ = 'Nicholas Chen'
# Import appropriate modules from the client library.
from googleads import dfp
def main(client):
# Initialize appropriate service.
proposal_line_item_service = client.GetService(
'ProposalLineItemService', version='v201411')
# Create a filter statement.
statement = dfp.FilterStatement('ORDER BY id ASC')
# Get proposal line items by statement.
while True:
response = proposal_line_item_service.getProposalLineItemsByStatement(
statement.ToStatement())
if 'results' in response:
# Display results.
for idx, proposal_line_item in enumerate(response['results'],
start=statement.offset):
print ('%s) Proposal line item with id \'%s\', belonging to proposal id'
' \'%s\', and named \'%s\' was found.' %
(idx, proposal_line_item['id'], proposal_line_item['proposalId'],
proposal_line_item['name']))
statement.offset += dfp.SUGGESTED_PAGE_LIMIT
else:
break
print '\nNumber of results found: %s' % response['totalResultSetSize']
if __name__ == '__main__':
# Initialize client object.
dfp_client = dfp.DfpClient.LoadFromStorage()
main(dfp_client)
|
apache-2.0
|
hbtech-ai/ARPS
|
report_spider/report_spider/spiders/THU004.py
|
3
|
2439
|
# -*- coding:utf-8 -*-
import sys
reload(sys)
sys.setdefaultencoding('utf-8')
import time
import scrapy
from Global_function import get_localtime, print_new_number, save_messages
now_time = get_localtime(time.strftime("%Y-%m-%d", time.localtime()))
# now_time = 20170401
class THU004_Spider(scrapy.Spider):
name = 'THU004'
start_urls = ['http://www.chemeng.tsinghua.edu.cn/podcast.do?method=news&cid=34']
domain = 'http://www.chemeng.tsinghua.edu.cn/'
counts = 0
def parse(self, response):
messages = response.xpath("//div[@class='employlist']/ul/li")
for i in xrange(len(messages)):
report_url = self.domain + messages[i].xpath(".//a/@href").extract()[0]
report_time = get_localtime(messages[i].xpath(".//cite/text()").extract()[0].strip().strip('[]'))
yield scrapy.Request(report_url, callback=self.parse_pages, meta={'link': report_url, 'number': i + 1})
def parse_pages(self, response):
messages = response.xpath("//td[@height='400']/p")
title = response.xpath("//h4/text()").extract()[0].strip()
time, address, speaker, img_url = '', '', '', ''
for message in messages:
text = self.get_messages(message)
if u'时间:' in text or u'时间:' in text:
time = self.connect_messages(text, ':') if u'时间:' in text else self.connect_messages(text, ':')
if u'地点:' in text or u'地点:' in text:
address = self.connect_messages(text, ':') if u'地点:' in text else self.connect_messages(text, ':')
if u'报告人:' in text or u'报告人:' in text:
speaker = self.connect_messages(text, ':') if u'报告人:' in text else self.connect_messages(text, ':')
img = message.xpath(".//img/@src")
img_url = (self.domain + img.extract()[0][1:]) if len(img) > 0 else ''
if title != '':
self.counts += 1
print_new_number(self.counts, 'THU', self.name)
all_messages = save_messages('THU', self.name, title, time, address, speaker, '',
'', img_url, response.meta['link'], response.meta['number'], u'清华大学', u'化学工程系')
return all_messages
def get_messages(self, messages):
text = ''
message = messages.xpath(".//text()").extract()
for each in message:
text += each.strip()
return text
def connect_messages(self, messages, sign):
message = messages.split(sign)[1:]
text = ''
for i in xrange(len(message)):
if i > 0:
text += ':'
text += message[i].strip()
return text
|
mit
|
dfang/odoo
|
addons/point_of_sale/wizard/pos_payment.py
|
20
|
2299
|
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from odoo import api, fields, models, _
class PosMakePayment(models.TransientModel):
_name = 'pos.make.payment'
_description = 'Point of Sale Payment'
def _default_journal(self):
active_id = self.env.context.get('active_id')
if active_id:
session = self.env['pos.order'].browse(active_id).session_id
return session.config_id.journal_ids and session.config_id.journal_ids.ids[0] or False
return False
def _default_amount(self):
active_id = self.env.context.get('active_id')
if active_id:
order = self.env['pos.order'].browse(active_id)
return (order.amount_total - order.amount_paid)
return False
journal_id = fields.Many2one('account.journal', string='Payment Mode', required=True, default=_default_journal)
amount = fields.Float(digits=(16, 2), required=True, default=_default_amount)
payment_name = fields.Char(string='Payment Reference')
payment_date = fields.Date(string='Payment Date', required=True, default=lambda *a: fields.Datetime.now())
@api.multi
def check(self):
"""Check the order:
if the order is not paid: continue payment,
if the order is paid print ticket.
"""
self.ensure_one()
order = self.env['pos.order'].browse(self.env.context.get('active_id', False))
amount = order.amount_total - order.amount_paid
data = self.read()[0]
# this is probably a problem of osv_memory as it's not compatible with normal OSV's
data['journal'] = data['journal_id'][0]
if amount != 0.0:
order.add_payment(data)
if order.test_paid():
order.action_pos_order_paid()
return {'type': 'ir.actions.act_window_close'}
return self.launch_payment()
def launch_payment(self):
return {
'name': _('Payment'),
'view_type': 'form',
'view_mode': 'form',
'res_model': 'pos.make.payment',
'view_id': False,
'target': 'new',
'views': False,
'type': 'ir.actions.act_window',
'context': self.env.context,
}
|
agpl-3.0
|
befelix/scipy
|
scipy/optimize/tests/test__spectral.py
|
135
|
6585
|
from __future__ import division, absolute_import, print_function
import itertools
import numpy as np
from numpy import exp
from numpy.testing import assert_, assert_equal
from scipy.optimize import root
def test_performance():
# Compare performance results to those listed in
# [Cheng & Li, IMA J. Num. An. 29, 814 (2008)]
# and
# [W. La Cruz, J.M. Martinez, M. Raydan, Math. Comp. 75, 1429 (2006)].
# and those produced by dfsane.f from M. Raydan's website.
#
# Where the results disagree, the largest limits are taken.
e_a = 1e-5
e_r = 1e-4
table_1 = [
dict(F=F_1, x0=x0_1, n=1000, nit=5, nfev=5),
dict(F=F_1, x0=x0_1, n=10000, nit=2, nfev=2),
dict(F=F_2, x0=x0_2, n=500, nit=11, nfev=11),
dict(F=F_2, x0=x0_2, n=2000, nit=11, nfev=11),
# dict(F=F_4, x0=x0_4, n=999, nit=243, nfev=1188), removed: too sensitive to rounding errors
dict(F=F_6, x0=x0_6, n=100, nit=6, nfev=6), # Results from dfsane.f; papers list nit=3, nfev=3
dict(F=F_7, x0=x0_7, n=99, nit=23, nfev=29), # Must have n%3==0, typo in papers?
dict(F=F_7, x0=x0_7, n=999, nit=23, nfev=29), # Must have n%3==0, typo in papers?
dict(F=F_9, x0=x0_9, n=100, nit=12, nfev=18), # Results from dfsane.f; papers list nit=nfev=6?
dict(F=F_9, x0=x0_9, n=1000, nit=12, nfev=18),
dict(F=F_10, x0=x0_10, n=1000, nit=5, nfev=5), # Results from dfsane.f; papers list nit=2, nfev=12
]
# Check also scaling invariance
for xscale, yscale, line_search in itertools.product([1.0, 1e-10, 1e10], [1.0, 1e-10, 1e10],
['cruz', 'cheng']):
for problem in table_1:
n = problem['n']
func = lambda x, n: yscale*problem['F'](x/xscale, n)
args = (n,)
x0 = problem['x0'](n) * xscale
fatol = np.sqrt(n) * e_a * yscale + e_r * np.linalg.norm(func(x0, n))
sigma_eps = 1e-10 * min(yscale/xscale, xscale/yscale)
sigma_0 = xscale/yscale
with np.errstate(over='ignore'):
sol = root(func, x0, args=args,
options=dict(ftol=0, fatol=fatol, maxfev=problem['nfev'] + 1,
sigma_0=sigma_0, sigma_eps=sigma_eps,
line_search=line_search),
method='DF-SANE')
err_msg = repr([xscale, yscale, line_search, problem, np.linalg.norm(func(sol.x, n)),
fatol, sol.success, sol.nit, sol.nfev])
assert_(sol.success, err_msg)
assert_(sol.nfev <= problem['nfev'] + 1, err_msg) # nfev+1: dfsane.f doesn't count first eval
assert_(sol.nit <= problem['nit'], err_msg)
assert_(np.linalg.norm(func(sol.x, n)) <= fatol, err_msg)
def test_complex():
def func(z):
return z**2 - 1 + 2j
x0 = 2.0j
ftol = 1e-4
sol = root(func, x0, tol=ftol, method='DF-SANE')
assert_(sol.success)
f0 = np.linalg.norm(func(x0))
fx = np.linalg.norm(func(sol.x))
assert_(fx <= ftol*f0)
def test_linear_definite():
# The DF-SANE paper proves convergence for "strongly isolated"
# solutions.
#
# For linear systems F(x) = A x - b = 0, with A positive or
# negative definite, the solution is strongly isolated.
def check_solvability(A, b, line_search='cruz'):
func = lambda x: A.dot(x) - b
xp = np.linalg.solve(A, b)
eps = np.linalg.norm(func(xp)) * 1e3
sol = root(func, b, options=dict(fatol=eps, ftol=0, maxfev=17523, line_search=line_search),
method='DF-SANE')
assert_(sol.success)
assert_(np.linalg.norm(func(sol.x)) <= eps)
n = 90
# Test linear pos.def. system
np.random.seed(1234)
A = np.arange(n*n).reshape(n, n)
A = A + n*n * np.diag(1 + np.arange(n))
assert_(np.linalg.eigvals(A).min() > 0)
b = np.arange(n) * 1.0
check_solvability(A, b, 'cruz')
check_solvability(A, b, 'cheng')
# Test linear neg.def. system
check_solvability(-A, b, 'cruz')
check_solvability(-A, b, 'cheng')
def test_shape():
def f(x, arg):
return x - arg
for dt in [float, complex]:
x = np.zeros([2,2])
arg = np.ones([2,2], dtype=dt)
sol = root(f, x, args=(arg,), method='DF-SANE')
assert_(sol.success)
assert_equal(sol.x.shape, x.shape)
# Some of the test functions and initial guesses listed in
# [W. La Cruz, M. Raydan. Optimization Methods and Software, 18, 583 (2003)]
def F_1(x, n):
g = np.zeros([n])
i = np.arange(2, n+1)
g[0] = exp(x[0] - 1) - 1
g[1:] = i*(exp(x[1:] - 1) - x[1:])
return g
def x0_1(n):
x0 = np.empty([n])
x0.fill(n/(n-1))
return x0
def F_2(x, n):
g = np.zeros([n])
i = np.arange(2, n+1)
g[0] = exp(x[0]) - 1
g[1:] = 0.1*i*(exp(x[1:]) + x[:-1] - 1)
return g
def x0_2(n):
x0 = np.empty([n])
x0.fill(1/n**2)
return x0
def F_4(x, n):
assert_equal(n % 3, 0)
g = np.zeros([n])
# Note: the first line is typoed in some of the references;
# correct in original [Gasparo, Optimization Meth. 13, 79 (2000)]
g[::3] = 0.6 * x[::3] + 1.6 * x[1::3]**3 - 7.2 * x[1::3]**2 + 9.6 * x[1::3] - 4.8
g[1::3] = 0.48 * x[::3] - 0.72 * x[1::3]**3 + 3.24 * x[1::3]**2 - 4.32 * x[1::3] - x[2::3] + 0.2 * x[2::3]**3 + 2.16
g[2::3] = 1.25 * x[2::3] - 0.25*x[2::3]**3
return g
def x0_4(n):
assert_equal(n % 3, 0)
x0 = np.array([-1, 1/2, -1] * (n//3))
return x0
def F_6(x, n):
c = 0.9
mu = (np.arange(1, n+1) - 0.5)/n
return x - 1/(1 - c/(2*n) * (mu[:,None]*x / (mu[:,None] + mu)).sum(axis=1))
def x0_6(n):
return np.ones([n])
def F_7(x, n):
assert_equal(n % 3, 0)
def phi(t):
v = 0.5*t - 2
v[t > -1] = ((-592*t**3 + 888*t**2 + 4551*t - 1924)/1998)[t > -1]
v[t >= 2] = (0.5*t + 2)[t >= 2]
return v
g = np.zeros([n])
g[::3] = 1e4 * x[1::3]**2 - 1
g[1::3] = exp(-x[::3]) + exp(-x[1::3]) - 1.0001
g[2::3] = phi(x[2::3])
return g
def x0_7(n):
assert_equal(n % 3, 0)
return np.array([1e-3, 18, 1] * (n//3))
def F_9(x, n):
g = np.zeros([n])
i = np.arange(2, n)
g[0] = x[0]**3/3 + x[1]**2/2
g[1:-1] = -x[1:-1]**2/2 + i*x[1:-1]**3/3 + x[2:]**2/2
g[-1] = -x[-1]**2/2 + n*x[-1]**3/3
return g
def x0_9(n):
return np.ones([n])
def F_10(x, n):
return np.log(1 + x) - x/n
def x0_10(n):
return np.ones([n])
|
bsd-3-clause
|
poffuomo/spark
|
python/pyspark/mllib/feature.py
|
52
|
25634
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Python package for feature in MLlib.
"""
from __future__ import absolute_import
import sys
import warnings
import random
import binascii
if sys.version >= '3':
basestring = str
unicode = str
from py4j.protocol import Py4JJavaError
from pyspark import since
from pyspark.rdd import RDD, ignore_unicode_prefix
from pyspark.mllib.common import callMLlibFunc, JavaModelWrapper
from pyspark.mllib.linalg import (
Vector, Vectors, DenseVector, SparseVector, _convert_to_vector)
from pyspark.mllib.regression import LabeledPoint
from pyspark.mllib.util import JavaLoader, JavaSaveable
__all__ = ['Normalizer', 'StandardScalerModel', 'StandardScaler',
'HashingTF', 'IDFModel', 'IDF', 'Word2Vec', 'Word2VecModel',
'ChiSqSelector', 'ChiSqSelectorModel', 'ElementwiseProduct']
class VectorTransformer(object):
"""
.. note:: DeveloperApi
Base class for transformation of a vector or RDD of vector
"""
def transform(self, vector):
"""
Applies transformation on a vector.
:param vector: vector to be transformed.
"""
raise NotImplementedError
class Normalizer(VectorTransformer):
"""
Normalizes samples individually to unit L\ :sup:`p`\ norm
For any 1 <= `p` < float('inf'), normalizes samples using
sum(abs(vector) :sup:`p`) :sup:`(1/p)` as norm.
For `p` = float('inf'), max(abs(vector)) will be used as norm for
normalization.
:param p: Normalization in L^p^ space, p = 2 by default.
>>> v = Vectors.dense(range(3))
>>> nor = Normalizer(1)
>>> nor.transform(v)
DenseVector([0.0, 0.3333, 0.6667])
>>> rdd = sc.parallelize([v])
>>> nor.transform(rdd).collect()
[DenseVector([0.0, 0.3333, 0.6667])]
>>> nor2 = Normalizer(float("inf"))
>>> nor2.transform(v)
DenseVector([0.0, 0.5, 1.0])
.. versionadded:: 1.2.0
"""
def __init__(self, p=2.0):
assert p >= 1.0, "p should be greater than 1.0"
self.p = float(p)
@since('1.2.0')
def transform(self, vector):
"""
Applies unit length normalization on a vector.
:param vector: vector or RDD of vector to be normalized.
:return: normalized vector. If the norm of the input is zero, it
will return the input vector.
"""
if isinstance(vector, RDD):
vector = vector.map(_convert_to_vector)
else:
vector = _convert_to_vector(vector)
return callMLlibFunc("normalizeVector", self.p, vector)
class JavaVectorTransformer(JavaModelWrapper, VectorTransformer):
"""
Wrapper for the model in JVM
"""
def transform(self, vector):
"""
Applies transformation on a vector or an RDD[Vector].
.. note:: In Python, transform cannot currently be used within
an RDD transformation or action.
Call transform directly on the RDD instead.
:param vector: Vector or RDD of Vector to be transformed.
"""
if isinstance(vector, RDD):
vector = vector.map(_convert_to_vector)
else:
vector = _convert_to_vector(vector)
return self.call("transform", vector)
class StandardScalerModel(JavaVectorTransformer):
"""
Represents a StandardScaler model that can transform vectors.
.. versionadded:: 1.2.0
"""
@since('1.2.0')
def transform(self, vector):
"""
Applies standardization transformation on a vector.
.. note:: In Python, transform cannot currently be used within
an RDD transformation or action.
Call transform directly on the RDD instead.
:param vector: Vector or RDD of Vector to be standardized.
:return: Standardized vector. If the variance of a column is
zero, it will return default `0.0` for the column with
zero variance.
"""
return JavaVectorTransformer.transform(self, vector)
@since('1.4.0')
def setWithMean(self, withMean):
"""
Setter of the boolean which decides
whether it uses mean or not
"""
self.call("setWithMean", withMean)
return self
@since('1.4.0')
def setWithStd(self, withStd):
"""
Setter of the boolean which decides
whether it uses std or not
"""
self.call("setWithStd", withStd)
return self
@property
@since('2.0.0')
def withStd(self):
"""
Returns if the model scales the data to unit standard deviation.
"""
return self.call("withStd")
@property
@since('2.0.0')
def withMean(self):
"""
Returns if the model centers the data before scaling.
"""
return self.call("withMean")
@property
@since('2.0.0')
def std(self):
"""
Return the column standard deviation values.
"""
return self.call("std")
@property
@since('2.0.0')
def mean(self):
"""
Return the column mean values.
"""
return self.call("mean")
class StandardScaler(object):
"""
Standardizes features by removing the mean and scaling to unit
variance using column summary statistics on the samples in the
training set.
:param withMean: False by default. Centers the data with mean
before scaling. It will build a dense output, so take
care when applying to sparse input.
:param withStd: True by default. Scales the data to unit
standard deviation.
>>> vs = [Vectors.dense([-2.0, 2.3, 0]), Vectors.dense([3.8, 0.0, 1.9])]
>>> dataset = sc.parallelize(vs)
>>> standardizer = StandardScaler(True, True)
>>> model = standardizer.fit(dataset)
>>> result = model.transform(dataset)
>>> for r in result.collect(): r
DenseVector([-0.7071, 0.7071, -0.7071])
DenseVector([0.7071, -0.7071, 0.7071])
>>> int(model.std[0])
4
>>> int(model.mean[0]*10)
9
>>> model.withStd
True
>>> model.withMean
True
.. versionadded:: 1.2.0
"""
def __init__(self, withMean=False, withStd=True):
if not (withMean or withStd):
warnings.warn("Both withMean and withStd are false. The model does nothing.")
self.withMean = withMean
self.withStd = withStd
@since('1.2.0')
def fit(self, dataset):
"""
Computes the mean and variance and stores as a model to be used
for later scaling.
:param dataset: The data used to compute the mean and variance
to build the transformation model.
:return: a StandardScalarModel
"""
dataset = dataset.map(_convert_to_vector)
jmodel = callMLlibFunc("fitStandardScaler", self.withMean, self.withStd, dataset)
return StandardScalerModel(jmodel)
class ChiSqSelectorModel(JavaVectorTransformer):
"""
Represents a Chi Squared selector model.
.. versionadded:: 1.4.0
"""
@since('1.4.0')
def transform(self, vector):
"""
Applies transformation on a vector.
:param vector: Vector or RDD of Vector to be transformed.
:return: transformed vector.
"""
return JavaVectorTransformer.transform(self, vector)
class ChiSqSelector(object):
"""
Creates a ChiSquared feature selector.
The selector supports different selection methods: `numTopFeatures`, `percentile`, `fpr`,
`fdr`, `fwe`.
* `numTopFeatures` chooses a fixed number of top features according to a chi-squared test.
* `percentile` is similar but chooses a fraction of all features
instead of a fixed number.
* `fpr` chooses all features whose p-values are below a threshold,
thus controlling the false positive rate of selection.
* `fdr` uses the `Benjamini-Hochberg procedure <https://en.wikipedia.org/wiki/
False_discovery_rate#Benjamini.E2.80.93Hochberg_procedure>`_
to choose all features whose false discovery rate is below a threshold.
* `fwe` chooses all features whose p-values are below a threshold. The threshold is scaled by
1/numFeatures, thus controlling the family-wise error rate of selection.
By default, the selection method is `numTopFeatures`, with the default number of top features
set to 50.
>>> data = sc.parallelize([
... LabeledPoint(0.0, SparseVector(3, {0: 8.0, 1: 7.0})),
... LabeledPoint(1.0, SparseVector(3, {1: 9.0, 2: 6.0})),
... LabeledPoint(1.0, [0.0, 9.0, 8.0]),
... LabeledPoint(2.0, [7.0, 9.0, 5.0]),
... LabeledPoint(2.0, [8.0, 7.0, 3.0])
... ])
>>> model = ChiSqSelector(numTopFeatures=1).fit(data)
>>> model.transform(SparseVector(3, {1: 9.0, 2: 6.0}))
SparseVector(1, {})
>>> model.transform(DenseVector([7.0, 9.0, 5.0]))
DenseVector([7.0])
>>> model = ChiSqSelector(selectorType="fpr", fpr=0.2).fit(data)
>>> model.transform(SparseVector(3, {1: 9.0, 2: 6.0}))
SparseVector(1, {})
>>> model.transform(DenseVector([7.0, 9.0, 5.0]))
DenseVector([7.0])
>>> model = ChiSqSelector(selectorType="percentile", percentile=0.34).fit(data)
>>> model.transform(DenseVector([7.0, 9.0, 5.0]))
DenseVector([7.0])
.. versionadded:: 1.4.0
"""
def __init__(self, numTopFeatures=50, selectorType="numTopFeatures", percentile=0.1, fpr=0.05,
fdr=0.05, fwe=0.05):
self.numTopFeatures = numTopFeatures
self.selectorType = selectorType
self.percentile = percentile
self.fpr = fpr
self.fdr = fdr
self.fwe = fwe
@since('2.1.0')
def setNumTopFeatures(self, numTopFeatures):
"""
set numTopFeature for feature selection by number of top features.
Only applicable when selectorType = "numTopFeatures".
"""
self.numTopFeatures = int(numTopFeatures)
return self
@since('2.1.0')
def setPercentile(self, percentile):
"""
set percentile [0.0, 1.0] for feature selection by percentile.
Only applicable when selectorType = "percentile".
"""
self.percentile = float(percentile)
return self
@since('2.1.0')
def setFpr(self, fpr):
"""
set FPR [0.0, 1.0] for feature selection by FPR.
Only applicable when selectorType = "fpr".
"""
self.fpr = float(fpr)
return self
@since('2.2.0')
def setFdr(self, fdr):
"""
set FDR [0.0, 1.0] for feature selection by FDR.
Only applicable when selectorType = "fdr".
"""
self.fdr = float(fdr)
return self
@since('2.2.0')
def setFwe(self, fwe):
"""
set FWE [0.0, 1.0] for feature selection by FWE.
Only applicable when selectorType = "fwe".
"""
self.fwe = float(fwe)
return self
@since('2.1.0')
def setSelectorType(self, selectorType):
"""
set the selector type of the ChisqSelector.
Supported options: "numTopFeatures" (default), "percentile", "fpr", "fdr", "fwe".
"""
self.selectorType = str(selectorType)
return self
@since('1.4.0')
def fit(self, data):
"""
Returns a ChiSquared feature selector.
:param data: an `RDD[LabeledPoint]` containing the labeled dataset
with categorical features. Real-valued features will be
treated as categorical for each distinct value.
Apply feature discretizer before using this function.
"""
jmodel = callMLlibFunc("fitChiSqSelector", self.selectorType, self.numTopFeatures,
self.percentile, self.fpr, self.fdr, self.fwe, data)
return ChiSqSelectorModel(jmodel)
class PCAModel(JavaVectorTransformer):
"""
Model fitted by [[PCA]] that can project vectors to a low-dimensional space using PCA.
.. versionadded:: 1.5.0
"""
class PCA(object):
"""
A feature transformer that projects vectors to a low-dimensional space using PCA.
>>> data = [Vectors.sparse(5, [(1, 1.0), (3, 7.0)]),
... Vectors.dense([2.0, 0.0, 3.0, 4.0, 5.0]),
... Vectors.dense([4.0, 0.0, 0.0, 6.0, 7.0])]
>>> model = PCA(2).fit(sc.parallelize(data))
>>> pcArray = model.transform(Vectors.sparse(5, [(1, 1.0), (3, 7.0)])).toArray()
>>> pcArray[0]
1.648...
>>> pcArray[1]
-4.013...
.. versionadded:: 1.5.0
"""
def __init__(self, k):
"""
:param k: number of principal components.
"""
self.k = int(k)
@since('1.5.0')
def fit(self, data):
"""
Computes a [[PCAModel]] that contains the principal components of the input vectors.
:param data: source vectors
"""
jmodel = callMLlibFunc("fitPCA", self.k, data)
return PCAModel(jmodel)
class HashingTF(object):
"""
Maps a sequence of terms to their term frequencies using the hashing
trick.
.. note:: The terms must be hashable (can not be dict/set/list...).
:param numFeatures: number of features (default: 2^20)
>>> htf = HashingTF(100)
>>> doc = "a a b b c d".split(" ")
>>> htf.transform(doc)
SparseVector(100, {...})
.. versionadded:: 1.2.0
"""
def __init__(self, numFeatures=1 << 20):
self.numFeatures = numFeatures
self.binary = False
@since("2.0.0")
def setBinary(self, value):
"""
If True, term frequency vector will be binary such that non-zero
term counts will be set to 1
(default: False)
"""
self.binary = value
return self
@since('1.2.0')
def indexOf(self, term):
""" Returns the index of the input term. """
return hash(term) % self.numFeatures
@since('1.2.0')
def transform(self, document):
"""
Transforms the input document (list of terms) to term frequency
vectors, or transform the RDD of document to RDD of term
frequency vectors.
"""
if isinstance(document, RDD):
return document.map(self.transform)
freq = {}
for term in document:
i = self.indexOf(term)
freq[i] = 1.0 if self.binary else freq.get(i, 0) + 1.0
return Vectors.sparse(self.numFeatures, freq.items())
class IDFModel(JavaVectorTransformer):
"""
Represents an IDF model that can transform term frequency vectors.
.. versionadded:: 1.2.0
"""
@since('1.2.0')
def transform(self, x):
"""
Transforms term frequency (TF) vectors to TF-IDF vectors.
If `minDocFreq` was set for the IDF calculation,
the terms which occur in fewer than `minDocFreq`
documents will have an entry of 0.
.. note:: In Python, transform cannot currently be used within
an RDD transformation or action.
Call transform directly on the RDD instead.
:param x: an RDD of term frequency vectors or a term frequency
vector
:return: an RDD of TF-IDF vectors or a TF-IDF vector
"""
return JavaVectorTransformer.transform(self, x)
@since('1.4.0')
def idf(self):
"""
Returns the current IDF vector.
"""
return self.call('idf')
class IDF(object):
"""
Inverse document frequency (IDF).
The standard formulation is used: `idf = log((m + 1) / (d(t) + 1))`,
where `m` is the total number of documents and `d(t)` is the number
of documents that contain term `t`.
This implementation supports filtering out terms which do not appear
in a minimum number of documents (controlled by the variable
`minDocFreq`). For terms that are not in at least `minDocFreq`
documents, the IDF is found as 0, resulting in TF-IDFs of 0.
:param minDocFreq: minimum of documents in which a term
should appear for filtering
>>> n = 4
>>> freqs = [Vectors.sparse(n, (1, 3), (1.0, 2.0)),
... Vectors.dense([0.0, 1.0, 2.0, 3.0]),
... Vectors.sparse(n, [1], [1.0])]
>>> data = sc.parallelize(freqs)
>>> idf = IDF()
>>> model = idf.fit(data)
>>> tfidf = model.transform(data)
>>> for r in tfidf.collect(): r
SparseVector(4, {1: 0.0, 3: 0.5754})
DenseVector([0.0, 0.0, 1.3863, 0.863])
SparseVector(4, {1: 0.0})
>>> model.transform(Vectors.dense([0.0, 1.0, 2.0, 3.0]))
DenseVector([0.0, 0.0, 1.3863, 0.863])
>>> model.transform([0.0, 1.0, 2.0, 3.0])
DenseVector([0.0, 0.0, 1.3863, 0.863])
>>> model.transform(Vectors.sparse(n, (1, 3), (1.0, 2.0)))
SparseVector(4, {1: 0.0, 3: 0.5754})
.. versionadded:: 1.2.0
"""
def __init__(self, minDocFreq=0):
self.minDocFreq = minDocFreq
@since('1.2.0')
def fit(self, dataset):
"""
Computes the inverse document frequency.
:param dataset: an RDD of term frequency vectors
"""
if not isinstance(dataset, RDD):
raise TypeError("dataset should be an RDD of term frequency vectors")
jmodel = callMLlibFunc("fitIDF", self.minDocFreq, dataset.map(_convert_to_vector))
return IDFModel(jmodel)
class Word2VecModel(JavaVectorTransformer, JavaSaveable, JavaLoader):
"""
class for Word2Vec model
.. versionadded:: 1.2.0
"""
@since('1.2.0')
def transform(self, word):
"""
Transforms a word to its vector representation
.. note:: Local use only
:param word: a word
:return: vector representation of word(s)
"""
try:
return self.call("transform", word)
except Py4JJavaError:
raise ValueError("%s not found" % word)
@since('1.2.0')
def findSynonyms(self, word, num):
"""
Find synonyms of a word
:param word: a word or a vector representation of word
:param num: number of synonyms to find
:return: array of (word, cosineSimilarity)
.. note:: Local use only
"""
if not isinstance(word, basestring):
word = _convert_to_vector(word)
words, similarity = self.call("findSynonyms", word, num)
return zip(words, similarity)
@since('1.4.0')
def getVectors(self):
"""
Returns a map of words to their vector representations.
"""
return self.call("getVectors")
@classmethod
@since('1.5.0')
def load(cls, sc, path):
"""
Load a model from the given path.
"""
jmodel = sc._jvm.org.apache.spark.mllib.feature \
.Word2VecModel.load(sc._jsc.sc(), path)
model = sc._jvm.org.apache.spark.mllib.api.python.Word2VecModelWrapper(jmodel)
return Word2VecModel(model)
@ignore_unicode_prefix
class Word2Vec(object):
"""Word2Vec creates vector representation of words in a text corpus.
The algorithm first constructs a vocabulary from the corpus
and then learns vector representation of words in the vocabulary.
The vector representation can be used as features in
natural language processing and machine learning algorithms.
We used skip-gram model in our implementation and hierarchical
softmax method to train the model. The variable names in the
implementation matches the original C implementation.
For original C implementation,
see https://code.google.com/p/word2vec/
For research papers, see
Efficient Estimation of Word Representations in Vector Space
and Distributed Representations of Words and Phrases and their
Compositionality.
>>> sentence = "a b " * 100 + "a c " * 10
>>> localDoc = [sentence, sentence]
>>> doc = sc.parallelize(localDoc).map(lambda line: line.split(" "))
>>> model = Word2Vec().setVectorSize(10).setSeed(42).fit(doc)
Querying for synonyms of a word will not return that word:
>>> syms = model.findSynonyms("a", 2)
>>> [s[0] for s in syms]
[u'b', u'c']
But querying for synonyms of a vector may return the word whose
representation is that vector:
>>> vec = model.transform("a")
>>> syms = model.findSynonyms(vec, 2)
>>> [s[0] for s in syms]
[u'a', u'b']
>>> import os, tempfile
>>> path = tempfile.mkdtemp()
>>> model.save(sc, path)
>>> sameModel = Word2VecModel.load(sc, path)
>>> model.transform("a") == sameModel.transform("a")
True
>>> syms = sameModel.findSynonyms("a", 2)
>>> [s[0] for s in syms]
[u'b', u'c']
>>> from shutil import rmtree
>>> try:
... rmtree(path)
... except OSError:
... pass
.. versionadded:: 1.2.0
"""
def __init__(self):
"""
Construct Word2Vec instance
"""
self.vectorSize = 100
self.learningRate = 0.025
self.numPartitions = 1
self.numIterations = 1
self.seed = None
self.minCount = 5
self.windowSize = 5
@since('1.2.0')
def setVectorSize(self, vectorSize):
"""
Sets vector size (default: 100).
"""
self.vectorSize = vectorSize
return self
@since('1.2.0')
def setLearningRate(self, learningRate):
"""
Sets initial learning rate (default: 0.025).
"""
self.learningRate = learningRate
return self
@since('1.2.0')
def setNumPartitions(self, numPartitions):
"""
Sets number of partitions (default: 1). Use a small number for
accuracy.
"""
self.numPartitions = numPartitions
return self
@since('1.2.0')
def setNumIterations(self, numIterations):
"""
Sets number of iterations (default: 1), which should be smaller
than or equal to number of partitions.
"""
self.numIterations = numIterations
return self
@since('1.2.0')
def setSeed(self, seed):
"""
Sets random seed.
"""
self.seed = seed
return self
@since('1.4.0')
def setMinCount(self, minCount):
"""
Sets minCount, the minimum number of times a token must appear
to be included in the word2vec model's vocabulary (default: 5).
"""
self.minCount = minCount
return self
@since('2.0.0')
def setWindowSize(self, windowSize):
"""
Sets window size (default: 5).
"""
self.windowSize = windowSize
return self
@since('1.2.0')
def fit(self, data):
"""
Computes the vector representation of each word in vocabulary.
:param data: training data. RDD of list of string
:return: Word2VecModel instance
"""
if not isinstance(data, RDD):
raise TypeError("data should be an RDD of list of string")
jmodel = callMLlibFunc("trainWord2VecModel", data, int(self.vectorSize),
float(self.learningRate), int(self.numPartitions),
int(self.numIterations), self.seed,
int(self.minCount), int(self.windowSize))
return Word2VecModel(jmodel)
class ElementwiseProduct(VectorTransformer):
"""
Scales each column of the vector, with the supplied weight vector.
i.e the elementwise product.
>>> weight = Vectors.dense([1.0, 2.0, 3.0])
>>> eprod = ElementwiseProduct(weight)
>>> a = Vectors.dense([2.0, 1.0, 3.0])
>>> eprod.transform(a)
DenseVector([2.0, 2.0, 9.0])
>>> b = Vectors.dense([9.0, 3.0, 4.0])
>>> rdd = sc.parallelize([a, b])
>>> eprod.transform(rdd).collect()
[DenseVector([2.0, 2.0, 9.0]), DenseVector([9.0, 6.0, 12.0])]
.. versionadded:: 1.5.0
"""
def __init__(self, scalingVector):
self.scalingVector = _convert_to_vector(scalingVector)
@since('1.5.0')
def transform(self, vector):
"""
Computes the Hadamard product of the vector.
"""
if isinstance(vector, RDD):
vector = vector.map(_convert_to_vector)
else:
vector = _convert_to_vector(vector)
return callMLlibFunc("elementwiseProductVector", self.scalingVector, vector)
def _test():
import doctest
from pyspark.sql import SparkSession
globs = globals().copy()
spark = SparkSession.builder\
.master("local[4]")\
.appName("mllib.feature tests")\
.getOrCreate()
globs['sc'] = spark.sparkContext
(failure_count, test_count) = doctest.testmod(globs=globs, optionflags=doctest.ELLIPSIS)
spark.stop()
if failure_count:
exit(-1)
if __name__ == "__main__":
sys.path.pop(0)
_test()
|
apache-2.0
|
firebitsbr/infernal-twin
|
build/reportlab/build/lib.linux-i686-2.7/reportlab/graphics/renderPS.py
|
29
|
36097
|
#Copyright ReportLab Europe Ltd. 2000-2012
#see license.txt for license details
#history http://www.reportlab.co.uk/cgi-bin/viewcvs.cgi/public/reportlab/trunk/reportlab/graphics/renderPS.py
__version__=''' $Id$ '''
__doc__="""Render drawing objects in Postscript"""
from reportlab.pdfbase.pdfmetrics import getFont, stringWidth, unicode2T1 # for font info
from reportlab.lib.utils import getBytesIO, getStringIO, asBytes, char2int, rawBytes, asNative, isUnicode
from reportlab.lib.rl_accel import fp_str
from reportlab.lib.colors import black
from reportlab.graphics.renderbase import Renderer, StateTracker, getStateDelta, renderScaledDrawing
from reportlab.graphics.shapes import STATE_DEFAULTS
import math
from operator import getitem
from reportlab import rl_config
_ESCAPEDICT={}
for c in xrange(256):
if c<32 or c>=127:
_ESCAPEDICT[c]= '\\%03o' % c
elif c in (ord('\\'),ord('('),ord(')')):
_ESCAPEDICT[c] = '\\'+chr(c)
else:
_ESCAPEDICT[c] = chr(c)
del c
def _escape_and_limit(s):
s = asBytes(s)
R = []
aR = R.append
n = 0
for c in s:
c = _ESCAPEDICT[char2int(c)]
aR(c)
n += len(c)
if n>=200:
n = 0
aR('\\\n')
return ''.join(R)
# we need to create encoding vectors for each font we use, or they will
# come out in Adobe's old StandardEncoding, which NOBODY uses.
PS_WinAnsiEncoding="""
/RE { %def
findfont begin
currentdict dup length dict begin
{ %forall
1 index /FID ne { def } { pop pop } ifelse
} forall
/FontName exch def dup length 0 ne { %if
/Encoding Encoding 256 array copy def
0 exch { %forall
dup type /nametype eq { %ifelse
Encoding 2 index 2 index put
pop 1 add
}{ %else
exch pop
} ifelse
} forall
} if pop
currentdict dup end end
/FontName get exch definefont pop
} bind def
/WinAnsiEncoding [
39/quotesingle 96/grave 128/euro 130/quotesinglbase/florin/quotedblbase
/ellipsis/dagger/daggerdbl/circumflex/perthousand
/Scaron/guilsinglleft/OE 145/quoteleft/quoteright
/quotedblleft/quotedblright/bullet/endash/emdash
/tilde/trademark/scaron/guilsinglright/oe/dotlessi
159/Ydieresis 164/currency 166/brokenbar 168/dieresis/copyright
/ordfeminine 172/logicalnot 174/registered/macron/ring
177/plusminus/twosuperior/threesuperior/acute/mu
183/periodcentered/cedilla/onesuperior/ordmasculine
188/onequarter/onehalf/threequarters 192/Agrave/Aacute
/Acircumflex/Atilde/Adieresis/Aring/AE/Ccedilla
/Egrave/Eacute/Ecircumflex/Edieresis/Igrave/Iacute
/Icircumflex/Idieresis/Eth/Ntilde/Ograve/Oacute
/Ocircumflex/Otilde/Odieresis/multiply/Oslash
/Ugrave/Uacute/Ucircumflex/Udieresis/Yacute/Thorn
/germandbls/agrave/aacute/acircumflex/atilde/adieresis
/aring/ae/ccedilla/egrave/eacute/ecircumflex
/edieresis/igrave/iacute/icircumflex/idieresis
/eth/ntilde/ograve/oacute/ocircumflex/otilde
/odieresis/divide/oslash/ugrave/uacute/ucircumflex
/udieresis/yacute/thorn/ydieresis
] def
"""
class PSCanvas:
def __init__(self,size=(300,300), PostScriptLevel=2):
self.width, self.height = size
xtraState = []
self._xtraState_push = xtraState.append
self._xtraState_pop = xtraState.pop
self.comments = 0
self.code = []
self.code_append = self.code.append
self._sep = '\n'
self._strokeColor = self._fillColor = self._lineWidth = \
self._font = self._fontSize = self._lineCap = \
self._lineJoin = self._color = None
self._fontsUsed = [] # track them as we go
self.setFont(STATE_DEFAULTS['fontName'],STATE_DEFAULTS['fontSize'])
self.setStrokeColor(STATE_DEFAULTS['strokeColor'])
self.setLineCap(2)
self.setLineJoin(0)
self.setLineWidth(1)
self.PostScriptLevel=PostScriptLevel
def comment(self,msg):
if self.comments: self.code_append('%'+msg)
def drawImage(self, image, x1,y1, x2=None,y2=None): # Postscript Level2 version
# select between postscript level 1 or level 2
if self.PostScriptLevel==1:
self._drawImageLevel1(image, x1,y1, x2=None,y2=None)
elif self.PostScriptLevel==2:
self._drawImageLevel2(image, x1,y1, x2=None,y2=None)
else :
raise ValueError('Unsupported Postscript Level %s' % self.PostScriptLevel)
def clear(self):
self.code_append('showpage') # ugh, this makes no sense oh well.
def _t1_re_encode(self):
if not self._fontsUsed: return
# for each font used, reencode the vectors
C = []
for fontName in self._fontsUsed:
fontObj = getFont(fontName)
if not fontObj._dynamicFont and fontObj.encName=='WinAnsiEncoding':
C.append('WinAnsiEncoding /%s /%s RE' % (fontName, fontName))
if C:
C.insert(0,PS_WinAnsiEncoding)
self.code.insert(1, self._sep.join(C))
def save(self,f=None):
if not hasattr(f,'write'):
_f = open(f,'wb')
else:
_f = f
if self.code[-1]!='showpage': self.clear()
self.code.insert(0,'''\
%%!PS-Adobe-3.0 EPSF-3.0
%%%%BoundingBox: 0 0 %d %d
%%%% Initialization:
/m {moveto} bind def
/l {lineto} bind def
/c {curveto} bind def
''' % (self.width,self.height))
self._t1_re_encode()
_f.write(rawBytes(self._sep.join(self.code)))
if _f is not f:
_f.close()
from reportlab.lib.utils import markfilename
markfilename(f,creatorcode='XPR3',filetype='EPSF')
def saveState(self):
self._xtraState_push((self._fontCodeLoc,))
self.code_append('gsave')
def restoreState(self):
self.code_append('grestore')
self._fontCodeLoc, = self._xtraState_pop()
def stringWidth(self, s, font=None, fontSize=None):
"""Return the logical width of the string if it were drawn
in the current font (defaults to self.font)."""
font = font or self._font
fontSize = fontSize or self._fontSize
return stringWidth(s, font, fontSize)
def setLineCap(self,v):
if self._lineCap!=v:
self._lineCap = v
self.code_append('%d setlinecap'%v)
def setLineJoin(self,v):
if self._lineJoin!=v:
self._lineJoin = v
self.code_append('%d setlinejoin'%v)
def setDash(self, array=[], phase=0):
"""Two notations. pass two numbers, or an array and phase"""
# copied and modified from reportlab.canvas
psoperation = "setdash"
if isinstance(array,(float,int)):
self.code_append('[%s %s] 0 %s' % (array, phase, psoperation))
elif isinstance(array,(tuple,list)):
assert phase >= 0, "phase is a length in user space"
textarray = ' '.join(map(str, array))
self.code_append('[%s] %s %s' % (textarray, phase, psoperation))
def setStrokeColor(self, color):
self._strokeColor = color
self.setColor(color)
def setColor(self, color):
if self._color!=color:
self._color = color
if color:
if hasattr(color, "cyan"):
self.code_append('%s setcmykcolor' % fp_str(color.cyan, color.magenta, color.yellow, color.black))
else:
self.code_append('%s setrgbcolor' % fp_str(color.red, color.green, color.blue))
def setFillColor(self, color):
self._fillColor = color
self.setColor(color)
def setLineWidth(self, width):
if width != self._lineWidth:
self._lineWidth = width
self.code_append('%s setlinewidth' % width)
def setFont(self,font,fontSize,leading=None):
if self._font!=font or self._fontSize!=fontSize:
self._fontCodeLoc = len(self.code)
self._font = font
self._fontSize = fontSize
self.code_append('')
def line(self, x1, y1, x2, y2):
if self._strokeColor != None:
self.setColor(self._strokeColor)
self.code_append('%s m %s l stroke' % (fp_str(x1, y1), fp_str(x2, y2)))
def _escape(self, s):
'''
return a copy of string s with special characters in postscript strings
escaped with backslashes.
'''
try:
return _escape_and_limit(s)
except:
raise ValueError("cannot escape %s" % ascii(s))
def _issueT1String(self,fontObj,x,y,s):
fc = fontObj
code_append = self.code_append
fontSize = self._fontSize
fontsUsed = self._fontsUsed
escape = self._escape
if not isUnicode(s):
try:
s = s.decode('utf8')
except UnicodeDecodeError as e:
i,j = e.args[2:4]
raise UnicodeDecodeError(*(e.args[:4]+('%s\n%s-->%s<--%s' % (e.args[4],s[i-10:i],s[i:j],s[j:j+10]),)))
for f, t in unicode2T1(s,[fontObj]+fontObj.substitutionFonts):
if f!=fc:
psName = asNative(f.face.name)
code_append('(%s) findfont %s scalefont setfont' % (psName,fp_str(fontSize)))
if psName not in fontsUsed:
fontsUsed.append(psName)
fc = f
code_append('%s m (%s) show ' % (fp_str(x,y),escape(t)))
x += f.stringWidth(t.decode(f.encName),fontSize)
if fontObj!=fc:
self._font = None
self.setFont(fontObj.face.name,fontSize)
def drawString(self, x, y, s, angle=0):
if self._fillColor != None:
fontObj = getFont(self._font)
if not self.code[self._fontCodeLoc]:
psName = asNative(fontObj.face.name)
self.code[self._fontCodeLoc]='(%s) findfont %s scalefont setfont' % (psName,fp_str(self._fontSize))
if psName not in self._fontsUsed:
self._fontsUsed.append(psName)
self.setColor(self._fillColor)
if angle!=0:
self.code_append('gsave %s translate %s rotate' % (fp_str(x,y),fp_str(angle)))
x = y = 0
if fontObj._dynamicFont:
s = self._escape(s)
self.code_append('%s m (%s) show ' % (fp_str(x,y),s))
else:
self._issueT1String(fontObj,x,y,s)
if angle!=0:
self.code_append('grestore')
def drawCentredString(self, x, y, text, text_anchor='middle'):
if self._fillColor is not None:
textLen = stringWidth(text, self._font,self._fontSize)
if text_anchor=='end':
x -= textLen
elif text_anchor=='middle':
x -= textLen/2.
elif text_anchor=='numeric':
x -= numericXShift(text_anchor,text,textLen,self._font,self._fontSize)
self.drawString(x,y,text)
def drawRightString(self, text, x, y):
self.drawCentredString(text,x,y,text_anchor='end')
def drawCurve(self, x1, y1, x2, y2, x3, y3, x4, y4, closed=0):
codeline = '%s m %s curveto'
data = (fp_str(x1, y1), fp_str(x2, y2, x3, y3, x4, y4))
if self._fillColor != None:
self.setColor(self._fillColor)
self.code_append((codeline % data) + ' eofill')
if self._strokeColor != None:
self.setColor(self._strokeColor)
self.code_append((codeline % data)
+ ((closed and ' closepath') or '')
+ ' stroke')
########################################################################################
def rect(self, x1,y1, x2,y2, stroke=1, fill=1):
"Draw a rectangle between x1,y1, and x2,y2"
# Path is drawn in counter-clockwise direction"
x1, x2 = min(x1,x2), max(x1, x2) # from piddle.py
y1, y2 = min(y1,y2), max(y1, y2)
self.polygon(((x1,y1),(x2,y1),(x2,y2),(x1,y2)), closed=1, stroke=stroke, fill = fill)
def roundRect(self, x1,y1, x2,y2, rx=8, ry=8):
"""Draw a rounded rectangle between x1,y1, and x2,y2,
with corners inset as ellipses with x radius rx and y radius ry.
These should have x1<x2, y1<y2, rx>0, and ry>0."""
# Path is drawn in counter-clockwise direction
x1, x2 = min(x1,x2), max(x1, x2) # from piddle.py
y1, y2 = min(y1,y2), max(y1, y2)
# Note: arcto command draws a line from current point to beginning of arc
# save current matrix, translate to center of ellipse, scale by rx ry, and draw
# a circle of unit radius in counterclockwise dir, return to original matrix
# arguments are (cx, cy, rx, ry, startAngle, endAngle)
ellipsePath = 'matrix currentmatrix %s %s translate %s %s scale 0 0 1 %s %s arc setmatrix'
# choice between newpath and moveTo beginning of arc
# go with newpath for precision, does this violate any assumptions in code???
rr = ['newpath'] # Round Rect code path
a = rr.append
# upper left corner ellipse is first
a(ellipsePath % (x1+rx, y1+ry, rx, -ry, 90, 180))
a(ellipsePath % (x1+rx, y2-ry, rx, -ry, 180, 270))
a(ellipsePath % (x2-rx, y2-ry, rx, -ry, 270, 360))
a(ellipsePath % (x2-rx, y1+ry, rx, -ry, 0, 90) )
a('closepath')
self._fillAndStroke(rr)
def ellipse(self, x1,y1, x2,y2):
"""Draw an orthogonal ellipse inscribed within the rectangle x1,y1,x2,y2.
These should have x1<x2 and y1<y2."""
#Just invoke drawArc to actually draw the ellipse
self.drawArc(x1,y1, x2,y2)
def circle(self, xc, yc, r):
self.ellipse(xc-r,yc-r, xc+r,yc+r)
def drawArc(self, x1,y1, x2,y2, startAng=0, extent=360, fromcenter=0):
"""Draw a partial ellipse inscribed within the rectangle x1,y1,x2,y2,
starting at startAng degrees and covering extent degrees. Angles
start with 0 to the right (+x) and increase counter-clockwise.
These should have x1<x2 and y1<y2."""
#calculate centre of ellipse
#print "x1,y1,x2,y2,startAng,extent,fromcenter", x1,y1,x2,y2,startAng,extent,fromcenter
cx, cy = (x1+x2)/2.0, (y1+y2)/2.0
rx, ry = (x2-x1)/2.0, (y2-y1)/2.0
codeline = self._genArcCode(x1, y1, x2, y2, startAng, extent)
startAngleRadians = math.pi*startAng/180.0
extentRadians = math.pi*extent/180.0
endAngleRadians = startAngleRadians + extentRadians
codelineAppended = 0
# fill portion
if self._fillColor != None:
self.setColor(self._fillColor)
self.code_append(codeline)
codelineAppended = 1
if self._strokeColor!=None: self.code_append('gsave')
self.lineTo(cx,cy)
self.code_append('eofill')
if self._strokeColor!=None: self.code_append('grestore')
# stroke portion
if self._strokeColor != None:
# this is a bit hacked up. There is certainly a better way...
self.setColor(self._strokeColor)
(startx, starty) = (cx+rx*math.cos(startAngleRadians), cy+ry*math.sin(startAngleRadians))
if not codelineAppended:
self.code_append(codeline)
if fromcenter:
# move to center
self.lineTo(cx,cy)
self.lineTo(startx, starty)
self.code_append('closepath')
self.code_append('stroke')
def _genArcCode(self, x1, y1, x2, y2, startAng, extent):
"Calculate the path for an arc inscribed in rectangle defined by (x1,y1),(x2,y2)"
#calculate semi-minor and semi-major axes of ellipse
xScale = abs((x2-x1)/2.0)
yScale = abs((y2-y1)/2.0)
#calculate centre of ellipse
x, y = (x1+x2)/2.0, (y1+y2)/2.0
codeline = 'matrix currentmatrix %s %s translate %s %s scale 0 0 1 %s %s %s setmatrix'
if extent >= 0:
arc='arc'
else:
arc='arcn'
data = (x,y, xScale, yScale, startAng, startAng+extent, arc)
return codeline % data
def polygon(self, p, closed=0, stroke=1, fill=1):
assert len(p) >= 2, 'Polygon must have 2 or more points'
start = p[0]
p = p[1:]
poly = []
a = poly.append
a("%s m" % fp_str(start))
for point in p:
a("%s l" % fp_str(point))
if closed:
a("closepath")
self._fillAndStroke(poly,stroke=stroke,fill=fill)
def lines(self, lineList, color=None, width=None):
if self._strokeColor != None:
self._setColor(self._strokeColor)
codeline = '%s m %s l stroke'
for line in lineList:
self.code_append(codeline % (fp_str(line[0]),fp_str(line[1])))
def moveTo(self,x,y):
self.code_append('%s m' % fp_str(x, y))
def lineTo(self,x,y):
self.code_append('%s l' % fp_str(x, y))
def curveTo(self,x1,y1,x2,y2,x3,y3):
self.code_append('%s c' % fp_str(x1,y1,x2,y2,x3,y3))
def closePath(self):
self.code_append('closepath')
def polyLine(self, p):
assert len(p) >= 1, 'Polyline must have 1 or more points'
if self._strokeColor != None:
self.setColor(self._strokeColor)
self.moveTo(p[0][0], p[0][1])
for t in p[1:]:
self.lineTo(t[0], t[1])
self.code_append('stroke')
def drawFigure(self, partList, closed=0):
figureCode = []
a = figureCode.append
first = 1
for part in partList:
op = part[0]
args = list(part[1:])
if op == figureLine:
if first:
first = 0
a("%s m" % fp_str(args[:2]))
else:
a("%s l" % fp_str(args[:2]))
a("%s l" % fp_str(args[2:]))
elif op == figureArc:
first = 0
x1,y1,x2,y2,startAngle,extent = args[:6]
a(self._genArcCode(x1,y1,x2,y2,startAngle,extent))
elif op == figureCurve:
if first:
first = 0
a("%s m" % fp_str(args[:2]))
else:
a("%s l" % fp_str(args[:2]))
a("%s curveto" % fp_str(args[2:]))
else:
raise TypeError("unknown figure operator: "+op)
if closed:
a("closepath")
self._fillAndStroke(figureCode)
def _fillAndStroke(self,code,clip=0,fill=1,stroke=1):
fill = self._fillColor and fill
stroke = self._strokeColor and stroke
if fill or stroke or clip:
self.code.extend(code)
if fill:
if stroke or clip: self.code_append("gsave")
self.setColor(self._fillColor)
self.code_append("eofill")
if stroke or clip: self.code_append("grestore")
if stroke:
if clip: self.code_append("gsave")
self.setColor(self._strokeColor)
self.code_append("stroke")
if clip: self.code_append("grestore")
if clip:
self.code_append("clip")
self.code_append("newpath")
def translate(self,x,y):
self.code_append('%s translate' % fp_str(x,y))
def scale(self,x,y):
self.code_append('%s scale' % fp_str(x,y))
def transform(self,a,b,c,d,e,f):
self.code_append('[%s] concat' % fp_str(a,b,c,d,e,f))
def _drawTimeResize(self,w,h):
'''if this is used we're probably in the wrong world'''
self.width, self.height = w, h
############################################################################################
# drawImage(self. image, x1, y1, x2=None, y2=None) is now defined by either _drawImageLevel1
# ._drawImageLevel2, the choice is made in .__init__ depending on option
def _drawImageLevel1(self, image, x1, y1, x2=None,y2=None):
# Postscript Level1 version available for fallback mode when Level2 doesn't work
"""drawImage(self,image,x1,y1,x2=None,y2=None) : If x2 and y2 are ommitted, they are
calculated from image size. (x1,y1) is upper left of image, (x2,y2) is lower right of
image in piddle coordinates."""
# For now let's start with 24 bit RGB images (following piddlePDF again)
component_depth = 8
myimage = image.convert('RGB')
imgwidth, imgheight = myimage.size
if not x2:
x2 = imgwidth + x1
if not y2:
y2 = y1 + imgheight
drawwidth = x2 - x1
drawheight = y2 - y1
#print 'Image size (%d, %d); Draw size (%d, %d)' % (imgwidth, imgheight, drawwidth, drawheight)
# now I need to tell postscript how big image is
# "image operators assume that they receive sample data from
# their data source in x-axis major index order. The coordinate
# of the lower-left corner of the first sample is (0,0), of the
# second (1,0) and so on" -PS2 ref manual p. 215
#
# The ImageMatrix maps unit squre of user space to boundary of the source image
#
# The CurrentTransformationMatrix (CTM) maps the unit square of
# user space to the rect...on the page that is to receive the
# image. A common ImageMatrix is [width 0 0 -height 0 height]
# (for a left to right, top to bottom image )
# first let's map the user coordinates start at offset x1,y1 on page
self.code.extend([
'gsave',
'%s %s translate' % (x1,-y1 - drawheight), # need to start are lower left of image
'%s %s scale' % (drawwidth,drawheight),
'/scanline %d 3 mul string def' % imgwidth # scanline by multiples of image width
])
# now push the dimensions and depth info onto the stack
# and push the ImageMatrix to map the source to the target rectangle (see above)
# finally specify source (PS2 pp. 225 ) and by exmample
self.code.extend([
'%s %s %s' % (imgwidth, imgheight, component_depth),
'[%s %s %s %s %s %s]' % (imgwidth, 0, 0, -imgheight, 0, imgheight),
'{ currentfile scanline readhexstring pop } false 3',
'colorimage '
])
# data source output--now we just need to deliver a hex encode
# series of lines of the right overall size can follow
# piddlePDF again
rawimage = (myimage.tobytes if hasattr(myimage,'tobytes') else myimage.tostring)()
hex_encoded = self._AsciiHexEncode(rawimage)
# write in blocks of 78 chars per line
outstream = getStringIO(hex_encoded)
dataline = outstream.read(78)
while dataline != "":
self.code_append(dataline)
dataline= outstream.read(78)
self.code_append('% end of image data') # for clarity
self.code_append('grestore') # return coordinates to normal
# end of drawImage
def _AsciiHexEncode(self, input): # also based on piddlePDF
"Helper function used by images"
output = getStringIO()
for char in asBytes(input):
output.write('%02x' % char2int(char))
return output.getvalue()
def _drawImageLevel2(self, image, x1,y1, x2=None,y2=None): # Postscript Level2 version
'''At present we're handling only PIL'''
### what sort of image are we to draw
if image.mode=='L' :
imBitsPerComponent = 8
imNumComponents = 1
myimage = image
elif image.mode == '1':
myimage = image.convert('L')
imNumComponents = 1
myimage = image
else :
myimage = image.convert('RGB')
imNumComponents = 3
imBitsPerComponent = 8
imwidth, imheight = myimage.size
if not x2:
x2 = imwidth + x1
if not y2:
y2 = y1 + imheight
drawwidth = x2 - x1
drawheight = y2 - y1
self.code.extend([
'gsave',
'%s %s translate' % (x1,-y1 - drawheight), # need to start are lower left of image
'%s %s scale' % (drawwidth,drawheight)])
if imNumComponents == 3 :
self.code_append('/DeviceRGB setcolorspace')
elif imNumComponents == 1 :
self.code_append('/DeviceGray setcolorspace')
# create the image dictionary
self.code_append("""
<<
/ImageType 1
/Width %d /Height %d %% dimensions of source image
/BitsPerComponent %d""" % (imwidth, imheight, imBitsPerComponent) )
if imNumComponents == 1:
self.code_append('/Decode [0 1]')
if imNumComponents == 3:
self.code_append('/Decode [0 1 0 1 0 1] %% decode color values normally')
self.code.extend([ '/ImageMatrix [%s 0 0 %s 0 %s]' % (imwidth, -imheight, imheight),
'/DataSource currentfile /ASCIIHexDecode filter',
'>> % End image dictionary',
'image'])
# after image operator just need to dump image dat to file as hexstring
rawimage = (myimage.tobytes if hasattr(myimage,'tobytes') else myimage.tostring)()
hex_encoded = self._AsciiHexEncode(rawimage)
# write in blocks of 78 chars per line
outstream = getStringIO(hex_encoded)
dataline = outstream.read(78)
while dataline != "":
self.code_append(dataline)
dataline= outstream.read(78)
self.code_append('> % end of image data') # > is EOD for hex encoded filterfor clarity
self.code_append('grestore') # return coordinates to normal
# renderpdf - draws them onto a canvas
"""Usage:
from reportlab.graphics import renderPS
renderPS.draw(drawing, canvas, x, y)
Execute the script to see some test drawings."""
from reportlab.graphics.shapes import *
# hack so we only get warnings once each
#warnOnce = WarnOnce()
# the main entry point for users...
def draw(drawing, canvas, x=0, y=0, showBoundary=rl_config.showBoundary):
"""As it says"""
R = _PSRenderer()
R.draw(renderScaledDrawing(drawing), canvas, x, y, showBoundary=showBoundary)
def _pointsFromList(L):
'''
given a list of coordinates [x0, y0, x1, y1....]
produce a list of points [(x0,y0), (y1,y0),....]
'''
P=[]
a = P.append
for i in range(0,len(L),2):
a((L[i],L[i+1]))
return P
class _PSRenderer(Renderer):
"""This draws onto a EPS document. It needs to be a class
rather than a function, as some EPS-specific state tracking is
needed outside of the state info in the SVG model."""
def __init__(self):
self._tracker = StateTracker()
def drawNode(self, node):
"""This is the recursive method called for each node
in the tree"""
self._canvas.comment('begin node %r'%node)
color = self._canvas._color
if not (isinstance(node, Path) and node.isClipPath):
self._canvas.saveState()
#apply state changes
deltas = getStateDelta(node)
self._tracker.push(deltas)
self.applyStateChanges(deltas, {})
#draw the object, or recurse
self.drawNodeDispatcher(node)
rDeltas = self._tracker.pop()
if not (isinstance(node, Path) and node.isClipPath):
self._canvas.restoreState()
self._canvas.comment('end node %r'%node)
self._canvas._color = color
#restore things we might have lost (without actually doing anything).
for k, v in rDeltas.items():
if k in self._restores:
setattr(self._canvas,self._restores[k],v)
## _restores = {'stroke':'_stroke','stroke_width': '_lineWidth','stroke_linecap':'_lineCap',
## 'stroke_linejoin':'_lineJoin','fill':'_fill','font_family':'_font',
## 'font_size':'_fontSize'}
_restores = {'strokeColor':'_strokeColor','strokeWidth': '_lineWidth','strokeLineCap':'_lineCap',
'strokeLineJoin':'_lineJoin','fillColor':'_fillColor','fontName':'_font',
'fontSize':'_fontSize'}
def drawRect(self, rect):
if rect.rx == rect.ry == 0:
#plain old rectangle
self._canvas.rect(
rect.x, rect.y,
rect.x+rect.width, rect.y+rect.height)
else:
#cheat and assume ry = rx; better to generalize
#pdfgen roundRect function. TODO
self._canvas.roundRect(
rect.x, rect.y,
rect.x+rect.width, rect.y+rect.height, rect.rx, rect.ry
)
def drawLine(self, line):
if self._canvas._strokeColor:
self._canvas.line(line.x1, line.y1, line.x2, line.y2)
def drawCircle(self, circle):
self._canvas.circle( circle.cx, circle.cy, circle.r)
def drawWedge(self, wedge):
yradius, radius1, yradius1 = wedge._xtraRadii()
if (radius1==0 or radius1 is None) and (yradius1==0 or yradius1 is None) and not wedge.annular:
startangledegrees = wedge.startangledegrees
endangledegrees = wedge.endangledegrees
centerx= wedge.centerx
centery = wedge.centery
radius = wedge.radius
extent = endangledegrees - startangledegrees
self._canvas.drawArc(centerx-radius, centery-yradius, centerx+radius, centery+yradius,
startangledegrees, extent, fromcenter=1)
else:
P = wedge.asPolygon()
if isinstance(P,Path):
self.drawPath(P)
else:
self.drawPolygon(P)
def drawPolyLine(self, p):
if self._canvas._strokeColor:
self._canvas.polyLine(_pointsFromList(p.points))
def drawEllipse(self, ellipse):
#need to convert to pdfgen's bounding box representation
x1 = ellipse.cx - ellipse.rx
x2 = ellipse.cx + ellipse.rx
y1 = ellipse.cy - ellipse.ry
y2 = ellipse.cy + ellipse.ry
self._canvas.ellipse(x1,y1,x2,y2)
def drawPolygon(self, p):
self._canvas.polygon(_pointsFromList(p.points), closed=1)
def drawString(self, stringObj):
if self._canvas._fillColor:
S = self._tracker.getState()
text_anchor, x, y, text = S['textAnchor'], stringObj.x,stringObj.y,stringObj.text
if not text_anchor in ['start','inherited']:
font, fontSize = S['fontName'], S['fontSize']
textLen = stringWidth(text, font,fontSize)
if text_anchor=='end':
x -= textLen
elif text_anchor=='middle':
x -= textLen/2
elif text_anchor=='numeric':
x -= numericXShift(text_anchor,text,textLen,font,fontSize,encoding='winansi')
else:
raise ValueError('bad value for text_anchor '+str(text_anchor))
self._canvas.drawString(x,y,text)
def drawPath(self, path):
from reportlab.graphics.shapes import _renderPath
c = self._canvas
drawFuncs = (c.moveTo, c.lineTo, c.curveTo, c.closePath)
isClosed = _renderPath(path, drawFuncs)
if not isClosed:
c._fillColor = None
c._fillAndStroke([], clip=path.isClipPath)
def applyStateChanges(self, delta, newState):
"""This takes a set of states, and outputs the operators
needed to set those properties"""
for key, value in delta.items():
if key == 'transform':
self._canvas.transform(value[0], value[1], value[2],
value[3], value[4], value[5])
elif key == 'strokeColor':
#this has different semantics in PDF to SVG;
#we always have a color, and either do or do
#not apply it; in SVG one can have a 'None' color
self._canvas.setStrokeColor(value)
elif key == 'strokeWidth':
self._canvas.setLineWidth(value)
elif key == 'strokeLineCap': #0,1,2
self._canvas.setLineCap(value)
elif key == 'strokeLineJoin':
self._canvas.setLineJoin(value)
elif key == 'strokeDashArray':
if value:
if isinstance(value,(list,tuple)) and len(value)==2 and isinstance(value[1],(tuple,list)):
phase = value[0]
value = value[1]
else:
phase = 0
self._canvas.setDash(value,phase)
else:
self._canvas.setDash()
## elif key == 'stroke_opacity':
## warnOnce('Stroke Opacity not supported yet')
elif key == 'fillColor':
#this has different semantics in PDF to SVG;
#we always have a color, and either do or do
#not apply it; in SVG one can have a 'None' color
self._canvas.setFillColor(value)
## elif key == 'fill_rule':
## warnOnce('Fill rules not done yet')
## elif key == 'fill_opacity':
## warnOnce('Fill opacity not done yet')
elif key in ['fontSize', 'fontName']:
# both need setting together in PDF
# one or both might be in the deltas,
# so need to get whichever is missing
fontname = delta.get('fontName', self._canvas._font)
fontsize = delta.get('fontSize', self._canvas._fontSize)
self._canvas.setFont(fontname, fontsize)
def drawImage(self, image):
from reportlab.lib.utils import ImageReader
im = ImageReader(image.path)
x0 = image.x
y0 = image.y
x1 = image.width
if x1 is not None: x1 += x0
y1 = image.height
if y1 is not None: y1 += y0
self._canvas.drawImage(im._image,x0,y0,x1,y1)
def drawToFile(d,fn, showBoundary=rl_config.showBoundary,**kwd):
d = renderScaledDrawing(d)
c = PSCanvas((d.width,d.height))
draw(d, c, 0, 0, showBoundary=showBoundary)
c.save(fn)
def drawToString(d, showBoundary=rl_config.showBoundary):
"Returns a PS as a string in memory, without touching the disk"
s = getBytesIO()
drawToFile(d, s, showBoundary=showBoundary)
return s.getvalue()
#########################################################
#
# test code. First, defin a bunch of drawings.
# Routine to draw them comes at the end.
#
#########################################################
def test(outDir='epsout',shout=False):
from reportlab.graphics import testshapes
from reportlab.rl_config import verbose
OLDFONTS = testshapes._FONTS[:]
testshapes._FONTS[:] = ['Times-Roman','Times-Bold','Times-Italic', 'Times-BoldItalic','Courier']
try:
import os
# save all drawings and their doc strings from the test file
if not os.path.isdir(outDir):
os.mkdir(outDir)
#grab all drawings from the test module
drawings = []
for funcname in dir(testshapes):
if funcname[0:10] == 'getDrawing':
drawing = eval('testshapes.' + funcname + '()') #execute it
docstring = eval('testshapes.' + funcname + '.__doc__')
drawings.append((drawing, docstring))
i = 0
for (d, docstring) in drawings:
filename = outDir + os.sep + 'renderPS_%d.eps'%i
drawToFile(d,filename)
if shout or verbose>2: print('renderPS test saved %s' % ascii(filename))
i += 1
finally:
testshapes._FONTS[:] = OLDFONTS
if __name__=='__main__':
import sys
if len(sys.argv)>1:
outdir = sys.argv[1]
else:
outdir = 'epsout'
test(outdir,shout=True)
|
gpl-3.0
|
achang97/YouTunes
|
lib/python2.7/site-packages/markupsafe/_native.py
|
1243
|
1187
|
# -*- coding: utf-8 -*-
"""
markupsafe._native
~~~~~~~~~~~~~~~~~~
Native Python implementation the C module is not compiled.
:copyright: (c) 2010 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
from markupsafe import Markup
from markupsafe._compat import text_type
def escape(s):
"""Convert the characters &, <, >, ' and " in string s to HTML-safe
sequences. Use this if you need to display text that might contain
such characters in HTML. Marks return value as markup string.
"""
if hasattr(s, '__html__'):
return s.__html__()
return Markup(text_type(s)
.replace('&', '&')
.replace('>', '>')
.replace('<', '<')
.replace("'", ''')
.replace('"', '"')
)
def escape_silent(s):
"""Like :func:`escape` but converts `None` into an empty
markup string.
"""
if s is None:
return Markup()
return escape(s)
def soft_unicode(s):
"""Make a string unicode if it isn't already. That way a markup
string is not converted back to unicode.
"""
if not isinstance(s, text_type):
s = text_type(s)
return s
|
mit
|
thundernet8/WRGameVideos-Server
|
venv/lib/python2.7/site-packages/jinja2/exceptions.py
|
977
|
4428
|
# -*- coding: utf-8 -*-
"""
jinja2.exceptions
~~~~~~~~~~~~~~~~~
Jinja exceptions.
:copyright: (c) 2010 by the Jinja Team.
:license: BSD, see LICENSE for more details.
"""
from jinja2._compat import imap, text_type, PY2, implements_to_string
class TemplateError(Exception):
"""Baseclass for all template errors."""
if PY2:
def __init__(self, message=None):
if message is not None:
message = text_type(message).encode('utf-8')
Exception.__init__(self, message)
@property
def message(self):
if self.args:
message = self.args[0]
if message is not None:
return message.decode('utf-8', 'replace')
def __unicode__(self):
return self.message or u''
else:
def __init__(self, message=None):
Exception.__init__(self, message)
@property
def message(self):
if self.args:
message = self.args[0]
if message is not None:
return message
@implements_to_string
class TemplateNotFound(IOError, LookupError, TemplateError):
"""Raised if a template does not exist."""
# looks weird, but removes the warning descriptor that just
# bogusly warns us about message being deprecated
message = None
def __init__(self, name, message=None):
IOError.__init__(self)
if message is None:
message = name
self.message = message
self.name = name
self.templates = [name]
def __str__(self):
return self.message
class TemplatesNotFound(TemplateNotFound):
"""Like :class:`TemplateNotFound` but raised if multiple templates
are selected. This is a subclass of :class:`TemplateNotFound`
exception, so just catching the base exception will catch both.
.. versionadded:: 2.2
"""
def __init__(self, names=(), message=None):
if message is None:
message = u'none of the templates given were found: ' + \
u', '.join(imap(text_type, names))
TemplateNotFound.__init__(self, names and names[-1] or None, message)
self.templates = list(names)
@implements_to_string
class TemplateSyntaxError(TemplateError):
"""Raised to tell the user that there is a problem with the template."""
def __init__(self, message, lineno, name=None, filename=None):
TemplateError.__init__(self, message)
self.lineno = lineno
self.name = name
self.filename = filename
self.source = None
# this is set to True if the debug.translate_syntax_error
# function translated the syntax error into a new traceback
self.translated = False
def __str__(self):
# for translated errors we only return the message
if self.translated:
return self.message
# otherwise attach some stuff
location = 'line %d' % self.lineno
name = self.filename or self.name
if name:
location = 'File "%s", %s' % (name, location)
lines = [self.message, ' ' + location]
# if the source is set, add the line to the output
if self.source is not None:
try:
line = self.source.splitlines()[self.lineno - 1]
except IndexError:
line = None
if line:
lines.append(' ' + line.strip())
return u'\n'.join(lines)
class TemplateAssertionError(TemplateSyntaxError):
"""Like a template syntax error, but covers cases where something in the
template caused an error at compile time that wasn't necessarily caused
by a syntax error. However it's a direct subclass of
:exc:`TemplateSyntaxError` and has the same attributes.
"""
class TemplateRuntimeError(TemplateError):
"""A generic runtime error in the template engine. Under some situations
Jinja may raise this exception.
"""
class UndefinedError(TemplateRuntimeError):
"""Raised if a template tries to operate on :class:`Undefined`."""
class SecurityError(TemplateRuntimeError):
"""Raised if a template tries to do something insecure if the
sandbox is enabled.
"""
class FilterArgumentError(TemplateRuntimeError):
"""This error is raised if a filter was called with inappropriate
arguments
"""
|
gpl-2.0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.