repo_name
stringlengths 5
100
| path
stringlengths 4
375
| copies
stringclasses 991
values | size
stringlengths 4
7
| content
stringlengths 666
1M
| license
stringclasses 15
values |
---|---|---|---|---|---|
jmerkow/VTK | Parallel/Core/Testing/Python/TestPolyDataPieces.py | 12 | 2945 | #!/usr/bin/env python
import vtk
from vtk.test import Testing
from vtk.util.misc import vtkGetDataRoot
VTK_DATA_ROOT = vtkGetDataRoot()
math = vtk.vtkMath()
math.RandomSeed(22)
sphere = vtk.vtkSphereSource()
sphere.SetPhiResolution(32)
sphere.SetThetaResolution(32)
extract = vtk.vtkExtractPolyDataPiece()
extract.SetInputConnection(sphere.GetOutputPort())
normals = vtk.vtkPolyDataNormals()
normals.SetInputConnection(extract.GetOutputPort())
ps = vtk.vtkPieceScalars()
ps.SetInputConnection(normals.GetOutputPort())
mapper = vtk.vtkPolyDataMapper()
mapper.SetInputConnection(ps.GetOutputPort())
mapper.SetNumberOfPieces(2)
actor = vtk.vtkActor()
actor.SetMapper(mapper)
sphere2 = vtk.vtkSphereSource()
sphere2.SetPhiResolution(32)
sphere2.SetThetaResolution(32)
extract2 = vtk.vtkExtractPolyDataPiece()
extract2.SetInputConnection(sphere2.GetOutputPort())
mapper2 = vtk.vtkPolyDataMapper()
mapper2.SetInputConnection(extract2.GetOutputPort())
mapper2.SetNumberOfPieces(2)
mapper2.SetPiece(1)
mapper2.SetScalarRange(0, 4)
mapper2.SetScalarModeToUseCellFieldData()
mapper2.SetColorModeToMapScalars()
mapper2.ColorByArrayComponent(vtk.vtkDataSetAttributes.GhostArrayName(), 0)
mapper2.SetGhostLevel(4)
# check the pipeline size
extract2.UpdateInformation()
psize = vtk.vtkPipelineSize()
if (psize.GetEstimatedSize(extract2, 0, 0) > 100):
print ("ERROR: Pipeline Size increased")
pass
if (psize.GetNumberOfSubPieces(10, mapper2) != 1):
print ("ERROR: Number of sub pieces changed",
psize.GetNumberOfSubPieces(10, mapper2))
pass
actor2 = vtk.vtkActor()
actor2.SetMapper(mapper2)
actor2.SetPosition(1.5, 0, 0)
sphere3 = vtk.vtkSphereSource()
sphere3.SetPhiResolution(32)
sphere3.SetThetaResolution(32)
extract3 = vtk.vtkExtractPolyDataPiece()
extract3.SetInputConnection(sphere3.GetOutputPort())
ps3 = vtk.vtkPieceScalars()
ps3.SetInputConnection(extract3.GetOutputPort())
mapper3 = vtk.vtkPolyDataMapper()
mapper3.SetInputConnection(ps3.GetOutputPort())
mapper3.SetNumberOfSubPieces(8)
mapper3.SetScalarRange(0, 8)
actor3 = vtk.vtkActor()
actor3.SetMapper(mapper3)
actor3.SetPosition(0, -1.5, 0)
sphere4 = vtk.vtkSphereSource()
sphere4.SetPhiResolution(32)
sphere4.SetThetaResolution(32)
extract4 = vtk.vtkExtractPolyDataPiece()
extract4.SetInputConnection(sphere4.GetOutputPort())
ps4 = vtk.vtkPieceScalars()
ps4.RandomModeOn()
ps4.SetScalarModeToCellData()
ps4.SetInputConnection(extract4.GetOutputPort())
mapper4 = vtk.vtkPolyDataMapper()
mapper4.SetInputConnection(ps4.GetOutputPort())
mapper4.SetNumberOfSubPieces(8)
mapper4.SetScalarRange(0, 8)
actor4 = vtk.vtkActor()
actor4.SetMapper(mapper4)
actor4.SetPosition(1.5, -1.5, 0)
ren = vtk.vtkRenderer()
ren.AddActor(actor)
ren.AddActor(actor2)
ren.AddActor(actor3)
ren.AddActor(actor4)
renWin = vtk.vtkRenderWindow()
renWin.AddRenderer(ren)
iren = vtk.vtkRenderWindowInteractor()
iren.SetRenderWindow(renWin)
iren.Initialize()
#iren.Start()
| bsd-3-clause |
steinwurf/yaml-cpp | test/gmock-1.7.0/gtest/test/gtest_test_utils.py | 1100 | 10812 | #!/usr/bin/env python
#
# Copyright 2006, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Unit test utilities for Google C++ Testing Framework."""
__author__ = '[email protected] (Zhanyong Wan)'
import atexit
import os
import shutil
import sys
import tempfile
import unittest
_test_module = unittest
# Suppresses the 'Import not at the top of the file' lint complaint.
# pylint: disable-msg=C6204
try:
import subprocess
_SUBPROCESS_MODULE_AVAILABLE = True
except:
import popen2
_SUBPROCESS_MODULE_AVAILABLE = False
# pylint: enable-msg=C6204
GTEST_OUTPUT_VAR_NAME = 'GTEST_OUTPUT'
IS_WINDOWS = os.name == 'nt'
IS_CYGWIN = os.name == 'posix' and 'CYGWIN' in os.uname()[0]
# The environment variable for specifying the path to the premature-exit file.
PREMATURE_EXIT_FILE_ENV_VAR = 'TEST_PREMATURE_EXIT_FILE'
environ = os.environ.copy()
def SetEnvVar(env_var, value):
"""Sets/unsets an environment variable to a given value."""
if value is not None:
environ[env_var] = value
elif env_var in environ:
del environ[env_var]
# Here we expose a class from a particular module, depending on the
# environment. The comment suppresses the 'Invalid variable name' lint
# complaint.
TestCase = _test_module.TestCase # pylint: disable-msg=C6409
# Initially maps a flag to its default value. After
# _ParseAndStripGTestFlags() is called, maps a flag to its actual value.
_flag_map = {'source_dir': os.path.dirname(sys.argv[0]),
'build_dir': os.path.dirname(sys.argv[0])}
_gtest_flags_are_parsed = False
def _ParseAndStripGTestFlags(argv):
"""Parses and strips Google Test flags from argv. This is idempotent."""
# Suppresses the lint complaint about a global variable since we need it
# here to maintain module-wide state.
global _gtest_flags_are_parsed # pylint: disable-msg=W0603
if _gtest_flags_are_parsed:
return
_gtest_flags_are_parsed = True
for flag in _flag_map:
# The environment variable overrides the default value.
if flag.upper() in os.environ:
_flag_map[flag] = os.environ[flag.upper()]
# The command line flag overrides the environment variable.
i = 1 # Skips the program name.
while i < len(argv):
prefix = '--' + flag + '='
if argv[i].startswith(prefix):
_flag_map[flag] = argv[i][len(prefix):]
del argv[i]
break
else:
# We don't increment i in case we just found a --gtest_* flag
# and removed it from argv.
i += 1
def GetFlag(flag):
"""Returns the value of the given flag."""
# In case GetFlag() is called before Main(), we always call
# _ParseAndStripGTestFlags() here to make sure the --gtest_* flags
# are parsed.
_ParseAndStripGTestFlags(sys.argv)
return _flag_map[flag]
def GetSourceDir():
"""Returns the absolute path of the directory where the .py files are."""
return os.path.abspath(GetFlag('source_dir'))
def GetBuildDir():
"""Returns the absolute path of the directory where the test binaries are."""
return os.path.abspath(GetFlag('build_dir'))
_temp_dir = None
def _RemoveTempDir():
if _temp_dir:
shutil.rmtree(_temp_dir, ignore_errors=True)
atexit.register(_RemoveTempDir)
def GetTempDir():
"""Returns a directory for temporary files."""
global _temp_dir
if not _temp_dir:
_temp_dir = tempfile.mkdtemp()
return _temp_dir
def GetTestExecutablePath(executable_name, build_dir=None):
"""Returns the absolute path of the test binary given its name.
The function will print a message and abort the program if the resulting file
doesn't exist.
Args:
executable_name: name of the test binary that the test script runs.
build_dir: directory where to look for executables, by default
the result of GetBuildDir().
Returns:
The absolute path of the test binary.
"""
path = os.path.abspath(os.path.join(build_dir or GetBuildDir(),
executable_name))
if (IS_WINDOWS or IS_CYGWIN) and not path.endswith('.exe'):
path += '.exe'
if not os.path.exists(path):
message = (
'Unable to find the test binary. Please make sure to provide path\n'
'to the binary via the --build_dir flag or the BUILD_DIR\n'
'environment variable.')
print >> sys.stderr, message
sys.exit(1)
return path
def GetExitStatus(exit_code):
"""Returns the argument to exit(), or -1 if exit() wasn't called.
Args:
exit_code: the result value of os.system(command).
"""
if os.name == 'nt':
# On Windows, os.WEXITSTATUS() doesn't work and os.system() returns
# the argument to exit() directly.
return exit_code
else:
# On Unix, os.WEXITSTATUS() must be used to extract the exit status
# from the result of os.system().
if os.WIFEXITED(exit_code):
return os.WEXITSTATUS(exit_code)
else:
return -1
class Subprocess:
def __init__(self, command, working_dir=None, capture_stderr=True, env=None):
"""Changes into a specified directory, if provided, and executes a command.
Restores the old directory afterwards.
Args:
command: The command to run, in the form of sys.argv.
working_dir: The directory to change into.
capture_stderr: Determines whether to capture stderr in the output member
or to discard it.
env: Dictionary with environment to pass to the subprocess.
Returns:
An object that represents outcome of the executed process. It has the
following attributes:
terminated_by_signal True iff the child process has been terminated
by a signal.
signal Sygnal that terminated the child process.
exited True iff the child process exited normally.
exit_code The code with which the child process exited.
output Child process's stdout and stderr output
combined in a string.
"""
# The subprocess module is the preferrable way of running programs
# since it is available and behaves consistently on all platforms,
# including Windows. But it is only available starting in python 2.4.
# In earlier python versions, we revert to the popen2 module, which is
# available in python 2.0 and later but doesn't provide required
# functionality (Popen4) under Windows. This allows us to support Mac
# OS X 10.4 Tiger, which has python 2.3 installed.
if _SUBPROCESS_MODULE_AVAILABLE:
if capture_stderr:
stderr = subprocess.STDOUT
else:
stderr = subprocess.PIPE
p = subprocess.Popen(command,
stdout=subprocess.PIPE, stderr=stderr,
cwd=working_dir, universal_newlines=True, env=env)
# communicate returns a tuple with the file obect for the child's
# output.
self.output = p.communicate()[0]
self._return_code = p.returncode
else:
old_dir = os.getcwd()
def _ReplaceEnvDict(dest, src):
# Changes made by os.environ.clear are not inheritable by child
# processes until Python 2.6. To produce inheritable changes we have
# to delete environment items with the del statement.
for key in dest.keys():
del dest[key]
dest.update(src)
# When 'env' is not None, backup the environment variables and replace
# them with the passed 'env'. When 'env' is None, we simply use the
# current 'os.environ' for compatibility with the subprocess.Popen
# semantics used above.
if env is not None:
old_environ = os.environ.copy()
_ReplaceEnvDict(os.environ, env)
try:
if working_dir is not None:
os.chdir(working_dir)
if capture_stderr:
p = popen2.Popen4(command)
else:
p = popen2.Popen3(command)
p.tochild.close()
self.output = p.fromchild.read()
ret_code = p.wait()
finally:
os.chdir(old_dir)
# Restore the old environment variables
# if they were replaced.
if env is not None:
_ReplaceEnvDict(os.environ, old_environ)
# Converts ret_code to match the semantics of
# subprocess.Popen.returncode.
if os.WIFSIGNALED(ret_code):
self._return_code = -os.WTERMSIG(ret_code)
else: # os.WIFEXITED(ret_code) should return True here.
self._return_code = os.WEXITSTATUS(ret_code)
if self._return_code < 0:
self.terminated_by_signal = True
self.exited = False
self.signal = -self._return_code
else:
self.terminated_by_signal = False
self.exited = True
self.exit_code = self._return_code
def Main():
"""Runs the unit test."""
# We must call _ParseAndStripGTestFlags() before calling
# unittest.main(). Otherwise the latter will be confused by the
# --gtest_* flags.
_ParseAndStripGTestFlags(sys.argv)
# The tested binaries should not be writing XML output files unless the
# script explicitly instructs them to.
# TODO([email protected]): Move this into Subprocess when we implement
# passing environment into it as a parameter.
if GTEST_OUTPUT_VAR_NAME in os.environ:
del os.environ[GTEST_OUTPUT_VAR_NAME]
_test_module.main()
| mit |
hanzorama/magenta | magenta/models/attention_rnn/attention_rnn_generate.py | 1 | 1936 | # Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""Generate melodies from a trained checkpoint of the attention RNN model.
Example usage:
$ bazel build magenta/models/attention_rnn:attention_rnn_generate
$ ./bazel-bin/magenta/models/attention_rnn/attention_rnn_generate \
--run_dir=/tmp/lookback_rnn/logdir/run1 \
--output_dir=/tmp/lookback_rnn/generated \
--num_outputs=10 \
--num_steps=128 \
--primer_melody="[60]"
See /magenta/models/shared/melody_rnn_generate.py for flag descriptions.
"""
# internal imports
import tensorflow as tf
from magenta.models.attention_rnn import attention_rnn_generator
from magenta.models.shared import melody_rnn_generate
def main(unused_argv):
melody_rnn_generate.setup_logs()
with attention_rnn_generator.create_generator(
melody_rnn_generate.get_checkpoint(),
melody_rnn_generate.get_bundle(),
melody_rnn_generate.get_steps_per_quarter(),
melody_rnn_generate.get_hparams()) as generator:
if melody_rnn_generate.should_save_generator_bundle():
tf.logging.info('Saving generator bundle to %s' % (
melody_rnn_generate.get_bundle_file()))
generator.create_bundle_file(melody_rnn_generate.get_bundle_file())
else:
melody_rnn_generate.run_with_flags(generator)
def console_entry_point():
tf.app.run(main)
if __name__ == '__main__':
console_entry_point()
| apache-2.0 |
skawu/RT-Thread-STM32F103ZET6 | tools/vs.py | 44 | 4662 | #
# File : vs.py
# This file is part of RT-Thread RTOS
# COPYRIGHT (C) 2006 - 2015, RT-Thread Development Team
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Change Logs:
# Date Author Notes
# 2015-01-20 Bernard Add copyright information
#
import os
import sys
import string
import building
import xml.etree.ElementTree as etree
from xml.etree.ElementTree import SubElement
from utils import _make_path_relative
from utils import xml_indent
fs_encoding = sys.getfilesystemencoding()
def VS_AddGroup(ProjectFiles, parent, name, files, project_path):
Filter = SubElement(parent, 'Filter')
Filter.set('Name', name) #set group name to group
for f in files:
fn = f.rfile()
name = fn.name
path = os.path.dirname(fn.abspath)
path = _make_path_relative(project_path, path)
path = os.path.join(path, name)
File = SubElement(Filter, 'File')
File.set('RelativePath', path.decode(fs_encoding))
def VS_AddHeadFilesGroup(program, elem, project_path):
building.source_ext = []
building.source_ext = ["h"]
for item in program:
building.walk_children(item)
building.source_list.sort()
# print building.source_list
for f in building.source_list:
path = _make_path_relative(project_path, f)
File = SubElement(elem, 'File')
File.set('RelativePath', path.decode(fs_encoding))
def VSProject(target, script, program):
project_path = os.path.dirname(os.path.abspath(target))
tree = etree.parse('template_vs2005.vcproj')
root = tree.getroot()
out = file(target, 'wb')
out.write('<?xml version="1.0" encoding="UTF-8"?>\r\n')
ProjectFiles = []
# add "*.c" files group
for elem in tree.iter(tag='Filter'):
if elem.attrib['Name'] == 'Source Files':
#print elem.tag, elem.attrib
break
for group in script:
group_xml = VS_AddGroup(ProjectFiles, elem, group['name'], group['src'], project_path)
# add "*.h" files group
for elem in tree.iter(tag='Filter'):
if elem.attrib['Name'] == 'Header Files':
break
VS_AddHeadFilesGroup(program, elem, project_path)
# write head include path
if building.Env.has_key('CPPPATH'):
cpp_path = building.Env['CPPPATH']
paths = set()
for path in cpp_path:
inc = _make_path_relative(project_path, os.path.normpath(path))
paths.add(inc) #.replace('\\', '/')
paths = [i for i in paths]
paths.sort()
cpp_path = ';'.join(paths)
# write include path, definitions
for elem in tree.iter(tag='Tool'):
if elem.attrib['Name'] == 'VCCLCompilerTool':
#print elem.tag, elem.attrib
break
elem.set('AdditionalIncludeDirectories', cpp_path)
# write cppdefinitons flags
if building.Env.has_key('CPPDEFINES'):
definitions = ';'.join(building.Env['CPPDEFINES'])
elem.set('PreprocessorDefinitions', definitions)
# write link flags
# write lib dependence
if building.Env.has_key('LIBS'):
for elem in tree.iter(tag='Tool'):
if elem.attrib['Name'] == 'VCLinkerTool':
break
libs_with_extention = [i+'.lib' for i in building.Env['LIBS']]
libs = ' '.join(libs_with_extention)
elem.set('AdditionalDependencies', libs)
# write lib include path
if building.Env.has_key('LIBPATH'):
lib_path = building.Env['LIBPATH']
paths = set()
for path in lib_path:
inc = _make_path_relative(project_path, os.path.normpath(path))
paths.add(inc) #.replace('\\', '/')
paths = [i for i in paths]
paths.sort()
lib_paths = ';'.join(paths)
elem.set('AdditionalLibraryDirectories', lib_paths)
xml_indent(root)
out.write(etree.tostring(root, encoding='utf-8'))
out.close()
| gpl-2.0 |
svdata/kubernetes | cluster/juju/charms/trusty/kubernetes-master/unit_tests/kubernetes_installer_test.py | 213 | 4910 | #!/usr/bin/env python
# Copyright 2015 The Kubernetes Authors All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from mock import patch
from path import path
from path import Path
import pytest
import subprocess
import sys
# Add the hooks directory to the python path.
hooks_dir = Path('__file__').parent.abspath() / 'hooks'
sys.path.insert(0, hooks_dir.abspath())
# Import the module to be tested.
import kubernetes_installer
def test_run():
""" Test the run method both with valid commands and invalid commands. """
ls = 'ls -l {0}/kubernetes_installer.py'.format(hooks_dir)
output = kubernetes_installer.run(ls, False)
assert output
assert 'kubernetes_installer.py' in output
output = kubernetes_installer.run(ls, True)
assert output
assert 'kubernetes_installer.py' in output
invalid_directory = path('/not/a/real/directory')
assert not invalid_directory.exists()
invalid_command = 'ls {0}'.format(invalid_directory)
with pytest.raises(subprocess.CalledProcessError) as error:
kubernetes_installer.run(invalid_command)
print(error)
with pytest.raises(subprocess.CalledProcessError) as error:
kubernetes_installer.run(invalid_command, shell=True)
print(error)
class TestKubernetesInstaller():
def makeone(self, *args, **kw):
""" Create the KubernetesInstaller object and return it. """
from kubernetes_installer import KubernetesInstaller
return KubernetesInstaller(*args, **kw)
def test_init(self):
""" Test that the init method correctly assigns the variables. """
ki = self.makeone('i386', '3.0.1', '/tmp/does_not_exist')
assert ki.aliases
assert 'kube-apiserver' in ki.aliases
assert 'kube-controller-manager' in ki.aliases
assert 'kube-scheduler' in ki.aliases
assert 'kubectl' in ki.aliases
assert 'kubelet' in ki.aliases
assert ki.arch == 'i386'
assert ki.version == '3.0.1'
assert ki.output_dir == path('/tmp/does_not_exist')
@patch('kubernetes_installer.run')
@patch('kubernetes_installer.subprocess.call')
def test_build(self, cmock, rmock):
""" Test the build method with master and non-master branches. """
directory = path('/tmp/kubernetes_installer_test/build')
ki = self.makeone('amd64', 'v99.00.11', directory)
assert not directory.exists(), 'The %s directory exists!' % directory
# Call the build method with "master" branch.
ki.build("master")
# TODO: run is called many times but mock only remembers last one.
rmock.assert_called_with('git reset --hard origin/master')
# TODO: call is complex and hard to verify with mock, fix that.
cmock.assert_called_once()
# Call the build method with something other than "master" branch.
ki.build("branch")
# TODO: run is called many times, but mock only remembers last one.
rmock.assert_called_with('git checkout -b v99.00.11 branch')
# TODO: call is complex and hard to verify with mock, fix that.
cmock.assert_called_once()
directory.rmtree_p()
def test_install(self):
""" Test the install method that it creates the correct links. """
directory = path('/tmp/kubernetes_installer_test/install')
ki = self.makeone('ppc64le', '1.2.3', directory)
assert not directory.exists(), 'The %s directory exits!' % directory
directory.makedirs_p()
# Create the files for the install method to link to.
(directory / 'kube-apiserver').touch()
(directory / 'kube-controller-manager').touch()
(directory / 'kube-proxy').touch()
(directory / 'kube-scheduler').touch()
(directory / 'kubectl').touch()
(directory / 'kubelet').touch()
results = directory / 'install/results/go/here'
assert not results.exists()
ki.install(results)
assert results.isdir()
# Check that all the files were correctly aliased and are links.
assert (results / 'apiserver').islink()
assert (results / 'controller-manager').islink()
assert (results / 'kube-proxy').islink()
assert (results / 'scheduler').islink()
assert (results / 'kubectl').islink()
assert (results / 'kubelet').islink()
directory.rmtree_p()
| apache-2.0 |
BorgERP/borg-erp-6of3 | verticals/hotel61/hotel/__init__.py | 4 | 1097 | # -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import hotel
import wizard
import report
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
mschex1/pokedex | pokedex/formulae.py | 4 | 4538 | # encoding: utf8
"""Faithful translations of calculations the games make."""
from __future__ import division
from itertools import izip
def nCr(n, r):
"""n-choose-r.
Thanks for the "compact" solution go to:
http://stackoverflow.com/questions/2096573/counting-combinations-and-permutations-efficiently
"""
return reduce(
lambda x, y: x * y[0] / y[1],
izip(xrange(n - r + 1, n + 1),
xrange(1, r + 1)),
1)
def calculated_stat(base_stat, level, iv, effort, nature=None):
"""Returns the calculated stat -- i.e. the value actually shown in the game
on a Pokémon's status tab.
"""
# Remember: this is from C; use floor division!
stat = (base_stat * 2 + iv + effort // 4) * level // 100 + 5
if nature:
stat = int(stat * nature)
return stat
def calculated_hp(base_stat, level, iv, effort, nature=None):
"""Similar to `calculated_stat`, except with a slightly different formula
used specifically for HP.
"""
# Shedinja's base stat of 1 is special; its HP is always 1
if base_stat == 1:
return 1
return (base_stat * 2 + iv + effort // 4) * level // 100 + 10 + level
def earned_exp(base_exp, level):
"""Returns the amount of EXP earned when defeating a Pokémon at the given
level.
"""
return base_exp * level // 7
def capture_chance(percent_hp, capture_rate,
ball_bonus=10, status_bonus=1,
capture_bonus=10, capture_modifier=0):
"""Calculates the chance that a Pokémon will be caught, given its capture
rate and the percentage of HP it has remaining.
Bonuses are such that 10 means "unchanged".
Returns five values: the chance of a capture, then the chance of the ball
shaking three, two, one, or zero times. Each of these is a float such that
0.0 <= n <= 1.0. Feel free to ignore all but the first.
"""
# HG/SS Pokéballs modify capture rate rather than the ball bonus
capture_rate = capture_rate * capture_bonus // 10 + capture_modifier
if capture_rate < 1:
capture_rate = 1
elif capture_rate > 255:
capture_rate = 255
# A slight math note:
# The actual formula uses (3 * max_hp - 2 * curr_hp) / (3 * max_hp)
# This uses (1 - 2/3 * curr_hp/max_hp)
# Integer division is taken into account by flooring immediately
# afterwards, so there should be no appreciable rounding error.
base_chance = int(
capture_rate * ball_bonus // 10 * (1 - 2/3 * percent_hp)
)
base_chance = base_chance * status_bonus // 10
# Shake index involves integer sqrt. Lovely.
isqrt = lambda x: int(x ** 0.5)
if not base_chance:
# This is very silly. Due to what must be an oversight, it's possible
# for the above formula to end with a zero chance to catch, which is
# then thrown blindly into the below denominator. Luckily, the games'
# division function is a no-op with a denominator of zero.. which
# means a base_chance of 0 is effectively a base chance of 1.
base_chance = 1
shake_index = 1048560 // isqrt(isqrt(16711680 // base_chance))
# Iff base_chance < 255, then shake_index < 65535.
# The Pokémon now has four chances to escape. The game starts picking
# random uint16s. If such a random number is < shake_index, the Pokémon
# stays in the ball, and it wobbles. If the number is >= shake_index, the
# ball breaks open then and there, and the capture fails.
# If all four are < shake_index, the Pokémon is caught.
# If shake_index >= 65535, all four randoms must be < it, and the Pokémon
# will be caught. Skip hard math
if shake_index >= 65535:
return (1.0, 0.0, 0.0, 0.0, 0.0)
# This brings up an interesting invariant: sum(return_value) == 1.0.
# Something is guaranteed to happen.
# Alrighty. Here's some probability.
# The chance that a single random uint16 will be < shake_index, thus
# keeping the Pokémon in the ball, is:
p = shake_index / 65536
# Now, the chance for n wobbles is the chance that the Pokémon will stay in
# the ball for (n-1) attempts, then break out on the nth.
# The chance of capture is just the chance that the Pokémon stays in the
# ball for all four tries.
# There are five cases: captured, wobbled three times, etc.
return [
p**4, # capture
p**3 * (1 - p),
p**2 * (1 - p),
p**1 * (1 - p),
(1 - p),
]
| mit |
topnotchgeek/capnlog | apps/www/views.py | 1 | 21358 |
import logging
from django.contrib import messages
from django.contrib.auth.decorators import login_required
from django.contrib.auth.views import login, logout
from django.core.urlresolvers import reverse
from django.http import JsonResponse, HttpResponse, HttpResponseBadRequest
from django.db.models import Avg, Min, Max
# Create your views here.
from vanilla import ListView, DetailView, UpdateView, CreateView
from vanilla.views import TemplateView
from .models import BlogEntry, TempHumidity, Station
from .forms import BlogEntryForm
from .util import *
logger = logging.getLogger(__name__)
def wrapped_login(request):
# ctx = _build_context(request, 'Login')
ctx = {}
ctx['page_title'] = 'Login'
# cart = get_cart(request, False)
referer = request.META.get('HTTP_REFERER', None)
if referer:
if "/login" in referer or '/logout' in referer:
pass
else:
ctx['next'] = referer
new_cust=request.GET.get('new_cust', None)
if new_cust:
messages.info(request, 'Welcome %s, thanks for registering!' % (new_cust))
resp = login(request, extra_context=ctx)
# if request.method == 'POST' and hasattr(request, 'user') and request.user.is_authenticated():
# u = request.user
# if cart:
# cart.user = u
# cart.save()
# request.session['cart_id'] = cart.id
# cust = get_customer_from_user(u)
# if cust:
# request.session['cust_id'] = cust.id
# site = get_site(request)
# if site:
# LoggedInUser.objects.filter(username=u.username, site=site).delete()
# else:
# LoggedInUser.objects.filter(username=u.username).delete()
# lu = LoggedInUser()
# lu.username = u.username
## lu.login_time = datetime.datetime.now()
# if site:
# lu.site = site
# lu.save()
return resp
def wrapped_logout(request):
ctx = {}
ctx['page_title'] = 'Logged Out'
resp = logout(request, extra_context=ctx)
return resp
# return logout(request, next_page=reverse('home'))
# site = get_site(request)
# if site:
# LoggedInUser.objects.filter(username=request.user.username, site=site).delete()
# else:
# LoggedInUser.objects.filter(username=request.user.username).delete()
class LoginRequiredMixin(object):
@classmethod
def as_view(cls):
return login_required(super(LoginRequiredMixin, cls).as_view())
class WebsiteView(TemplateView):
def get_page_title(self):
return 'untitled'
def get_context_data(self, **kwargs):
rv = super(WebsiteView, self).get_context_data(**kwargs)
rv['page_title'] = self.get_page_title()
return rv
class HomeView(WebsiteView):
template_name = 'www/home.html'
start_date = datetime.strptime('2016-01-15', '%Y-%m-%d')
# on_date = datetime.strptime('2016-01-15', '%Y-%m-%d')
def get_context_data(self, **kwargs):
rv = super(HomeView, self).get_context_data(**kwargs)
dlt = datetime.now() - self.start_date
rv['start_date'] = self.start_date
rv['onboard'] = dlt.days > 0
rv['days_aboard'] = abs(dlt.days)
return rv
def get_page_title(self):
return 'Captains Blog'
class BlogView(ListView):
template_name = 'www/blog_list.html'
context_object_name = 'blog_entries'
start_date = datetime.strptime('2016-01-15', '%Y-%m-%d')
def get_context_data(self, **kwargs):
rv = super(BlogView, self).get_context_data(**kwargs)
dlt = datetime.now() - self.start_date
rv['start_date'] = self.start_date
rv['onboard'] = dlt.days > 0
rv['days_aboard'] = abs(dlt.days)
rv['page_title'] = 'Captains Log'
# rv['STATIC_URL'] = settings.STATIC_URL
try:
r = BlogEntry.objects.order_by('-create_time')[0]
except IndexError:
r = None
rv['recent_entry'] = r
return rv
def get_queryset(self):
return BlogEntry.objects.order_by('-create_time')
class BlogDetailView(DetailView):
template_name = 'www/blog_detail.html'
context_object_name = 'entry'
model = BlogEntry
lookup_field = 'slug'
def get_context_data(self, **kwargs):
rv = super(BlogDetailView, self).get_context_data(**kwargs)
fbim = FileBasedImageManager(settings.IMAGE_DIR, '*.jpg')
# now = datetime.now()
rv['random_img'] = fbim.get_random_image()
# rv['STATIC_URL'] = settings.STATIC_URL
nxt = None
prv = None
if self.object:
rv['page_title'] = self.object.title
dte = self.object.create_time
try:
prv = BlogEntry.objects.filter(create_time__gt=dte).order_by('create_time')[0]
except IndexError:
prv = None
try:
nxt = BlogEntry.objects.filter(create_time__lt=dte).order_by('-create_time')[0]
except IndexError:
nxt = None
rv['prev_entry'] = prv
rv['next_entry'] = nxt
return rv
class BlogEditView(LoginRequiredMixin, UpdateView):
template_name = 'www/blog_edit.html'
context_object_name = 'entry'
model = BlogEntry
form_class = BlogEntryForm
lookup_field = 'slug'
def get_context_data(self, **kwargs):
rv = super(BlogEditView, self).get_context_data(**kwargs)
if rv is None:
rv = {}
# imgs = []
# for i in range(1, 11):
# imgs.append('new_boat_%s.jpg' % i)
# now = datetime.now()
# rv['random_img'] = imgs[now.second % 10]
# nxt = None
# prv = None
# if self.object:
# rv['page_title'] = 'Edit: %s' % self.object.title
# dte = self.object.modify_time
# try:
# prv = BlogEntry.objects.filter(modify_time__gt=dte).order_by('modify_time')[0]
# except IndexError:
# prv = None
# try:
# nxt = BlogEntry.objects.filter(modify_time__lt=dte).order_by('-modify_time')[0]
# except IndexError:
# nxt = None
# rv['prev_entry'] = prv
# rv['next_entry'] = nxt
return rv
def get_success_url(self):
return reverse('home')
class BlogCreateView(LoginRequiredMixin, CreateView):
template_name = 'www/blog_edit.html'
context_object_name = 'entry'
model = BlogEntry
form_class = BlogEntryForm
lookup_field = 'slug'
def get_context_data(self, **kwargs):
rv = super(BlogCreateView, self).get_context_data(**kwargs)
if rv is None:
rv = {}
rv['page_title'] = 'New Entry'
return rv
def get_success_url(self):
return reverse('home')
class VisitorsView(WebsiteView):
template_name = 'www/visitors.html'
def get_page_title(self):
return 'Visitors'
class PhotosView(WebsiteView):
template_name = 'www/photos.html'
def get_page_title(self):
return 'Photos'
class WeatherView(WebsiteView):
template_name = 'www/weather.html'
stations = None
def get_page_title(self):
return 'San Diego Weather'
def get_context_data(self, **kwargs):
rv = super(WeatherView, self).get_context_data(**kwargs)
if rv is None:
rv = {}
rv['stations'] = ['station-01']
rv['auto_refresh'] = True
rv['auto_refresh_secs'] = 300
return rv
class BoatCamView(WebsiteView):
template_name = 'www/boat_cam.html'
def get_page_title(self):
return 'Boat Cam!'
def get_context_data(self, **kwargs):
rv = super(BoatCamView, self).get_context_data(**kwargs)
if rv is None:
rv = {}
try:
wc = Webcam.objects.get(pk=1)
rv['last_image'] = wc.snapshot_set.latest('ts_create')
except Webcam.DoesNotExist:
pass
rv['auto_refresh'] = True
rv['auto_refresh_secs'] = 60
return rv
class WebcamView(DetailView):
template_name = 'www/wc.html'
context_object_name = 'webcam'
lookup_field = 'slug'
lookup_url_kwarg = 'slug'
model = Webcam
def get_context_data(self, **kwargs):
rv = super(WebcamView, self).get_context_data(**kwargs)
schOn = []
schOff = []
mths = None
cnt = 0
if self.object:
mths = self.object.snapshot_set.datetimes('ts_create', 'month', order='DESC')
cnt = self.object.snapshot_set.count()
rv['page_title'] = self.object.name
if self.object.schedule and len(self.object.schedule) > 0:
try:
sch = json.loads(self.object.schedule)
if sch:
all = sch.get('all', None)
if all:
schOn = all.get('on', None)
schOff = all.get('off', None)
except ValueError:
pass
tz = timezone.get_current_timezone()
ct = timezone.make_aware(datetime.now(), tz)
rv['now'] = ct
# rv['all_days'] = allD
rv['scheduled_on'] = schOn
rv['scheduled_off'] = schOff
rv['months'] = mths
rv['total_snaps'] = cnt
rv['latest_snap'] = self.object.snapshot_set.latest('ts_create')
if self.object.is_scheduled():
rv['auto_refresh'] = True
rv['auto_refresh_secs'] = 60
return rv
class WcMonthView(DetailView):
template_name = 'www/wc.html'
context_object_name = 'webcam'
lookup_field = 'slug'
lookup_url_kwarg = 'slug'
model = Webcam
def get_context_data(self, **kwargs):
rv = super(WcMonthView, self).get_context_data(**kwargs)
y = int(self.kwargs['year'])
m = int(self.kwargs['month'])
tz = timezone.get_current_timezone()
dom1 = timezone.make_aware(datetime(y, m, 1), tz)
tday = timezone.make_aware(datetime.now(), tz)
dlt = timedelta(days=1)
firstD = first_day_before(dom1, 6)
lastD = last_day_after(last_day_of_month(dom1), 5)
curD = firstD
allD = []
schOn = []
schOff = []
fst = None
lst = None
ar_secs = 0
if self.object:
if y == tday.year and m == tday.month and self.object.is_scheduled():
# update_daily_stats(self.object.id, tday)
ar_secs = 60
rv['page_title'] = '%s - %s' % (self.object.name, dom1.strftime('%b %Y'))
ndx = 0
while curD <= lastD:
stats = SnapshotDailyStat.lookup(webcam=self.object, for_date=curD)
allD.append({'index': ndx, 'day': curD, 'count': 0 if stats is None else stats.total_count, 'stats': stats})
curD += dlt
ndx += 1
sched = self.object.get_schedule()
if sched:
all = sched.get('all', None)
if all:
schOn = all.get('on', None)
schOff = all.get('off', None)
fst = self.object.snapshot_set.earliest('ts_create')
lst = self.object.snapshot_set.latest('ts_create')
pm = firstD - dlt
if fst and pm >= fst.ts_create:
rv['prev_month'] = pm
nm = last_day_of_month(dom1) + dlt
if nm <= tday:
rv['next_month'] = nm
# rv['first_day'] = firstD
# rv['last_day'] = lastD
rv['cur_month'] = dom1
rv['now'] = tday
rv['all_days'] = allD
rv['first_day'] = None if fst is None else fst.ts_create
rv['last_day'] = None if lst is None else lst.ts_create
rv['scheduled_on'] = schOn
rv['scheduled_off'] = schOff
if ar_secs > 0:
rv['auto_refresh'] = True
rv['auto_refresh_secs'] = ar_secs
return rv
class WcDayView(TemplateView):
template_name = 'www/wc_day.html'
def get_context_data(self, **kwargs):
rv = super(WcDayView, self).get_context_data(**kwargs)
try:
wc = Webcam.objects.get(slug=self.kwargs['slug'])
except Webcam.DoesNotExist:
wc = None
y = int(self.kwargs['year'])
m = int(self.kwargs['month'])
d = int(self.kwargs['day'])
tz = timezone.get_current_timezone()
curD = datetime(y, m, d)
sfd = None
if wc:
sfd = wc.snaps_for_day(curD)
cnt = sfd.count() if sfd else 0
fst = None
lst = None
am = None
pm = None
amf = None
aml = None
pmf = None
pml = None
noon = datetime(curD.year, curD.month, curD.day, 12, 0, 0)
if cnt > 0:
fst = sfd.earliest('ts_create')
lst = sfd.latest('ts_create')
am = sfd.filter(ts_create__lt=noon)
pm = sfd.filter(ts_create__gt=noon)
if am and am.count() > 0:
amf = am.earliest('ts_create')
aml = am.latest('ts_create')
if pm and pm.count() > 0:
pmf = pm.earliest('ts_create')
pml = pm.latest('ts_create')
rv.update({'webcam': wc,
'day': curD,
'count': cnt,
'earliest': fst,
'latest': lst,
'am': am,
'pm': pm,
'aml': aml,
'amf': amf,
'pmf': pmf,
'pml': pml})
return rv
class AdilHomeView(WebsiteView):
template_name = 'www/adil_home.html'
webcam = None
def get_page_title(self):
return 'A Day in Pictures'
def get_context_data(self, **kwargs):
rv = super(AdilHomeView, self).get_context_data(**kwargs)
tz = timezone.get_current_timezone()
ct = datetime.now(tz)
dlt = timedelta(days=1)
firstD = first_day_before(datetime(ct.year, ct.month, 1, tzinfo=tz), 6)
lastD = last_day_after(last_day_of_month(ct), 5)
curD = firstD
allD = []
self.webcam = None
try:
self.webcam = Webcam.objects.get(pk=1)
while curD <= lastD:
sfd = self.webcam.snaps_for_day(curD)
cnt = sfd.count()
fst = None
lst = None
if cnt > 0:
fst = sfd.earliest('ts_create')
lst = sfd.latest('ts_create')
allD.append({'day': curD, 'count': cnt, 'earliest': fst, 'latest': lst })
curD = curD + dlt
except Webcam.DoesNotExist:
pass
rv['webcam'] = self.webcam
rv['first_day'] = firstD
rv['last_day'] = lastD
rv['now'] = ct
rv['all_days'] = allD
return rv
class DayInTheLifeView(WebsiteView):
template_name = 'www/adil.html'
webcam = None
def get_context_data(self, **kwargs):
rv = super(DayInTheLifeView, self).get_context_data(**kwargs)
tz = timezone.get_current_timezone()
cslug = self.kwargs['slug']
y = int(self.kwargs['year'])
m = int(self.kwargs['month'])
d = int(self.kwargs['day'])
td = timezone.make_aware(datetime.now(), tz)
wdate = timezone.make_aware(datetime(y, m, d),tz)
dlt = timedelta(days=1)
pdate = wdate - dlt
ndate = wdate + dlt
self.webcam = None
try:
self.webcam = Webcam.objects.get(slug=cslug)
rv['webcam'] = self.webcam
rv['all_dates'] = None if self.webcam is None else self.webcam.snapshot_set.all().datetimes('ts_create', 'day')
rv['which_date'] = wdate
rv['prev_date'] = pdate
if ndate < td:
rv['next_date'] = ndate
rv['snaps_by_hour'] = self._build_sbh(y, m, d)
rv['page_title'] = '%s %s' % (self.webcam.name, wdate.strftime("%b %d, %Y"))
except Webcam.DoesNotExist:
pass
return rv
def _build_sbh(self, y, m, d):
sbh = []
for i in range(0,24):
hr = datetime(y, m, d, i, 0)
snaps = self.webcam.snaps_for_hour(y, m, d, i)
sbh.append({"hour": hr, "snaps": snaps})
return sbh
class AdilHourView(TemplateView):
template_name = 'www/adil_hour.html'
def get_context_data(self, **kwargs):
rv = super(AdilHourView, self).get_context_data(**kwargs)
cslug = self.kwargs['slug']
y = int(self.kwargs['year'])
m = int(self.kwargs['month'])
d = int(self.kwargs['day'])
h = int(self.kwargs['hour'])
snaps = None
try:
wc = Webcam.objects.get(slug=cslug)
snaps = wc.snaps_for_hour(y, m, d, h)
except Webcam.DoesNotExist:
pass
# rv['STATIC_URL'] = settings.STATIC_URL
rv['snaps'] = snaps
return rv
DEF_HOURS = 24
class AjaxChartView(TemplateView):
template_name = 'n/a'
def __init__(self):
super(AjaxChartView, self).__init__()
self.station = 'station-01'
# self.kind = None
# self.names = {"KSAN": "San Diego", "KPHX": "Phoenix", "KOKC": "OKC", "KTEB": "Hackensack"}
def get_context_data(self, **kwargs):
rv = super(AjaxChartView, self).get_context_data(**kwargs)
tz = timezone.get_current_timezone()
cur_tm = datetime.now(tz)
dlt = timedelta(hours=DEF_HOURS)
st_tm = cur_tm - dlt
try:
stn = Station.objects.get(name=self.station)
except Station.DoesNotExist:
return {}
rdngs = []
list = TempHumidity.objects.filter(station=stn).filter(reading_time__gte=st_tm).order_by('reading_time')
for c in list:
rt = timezone.localtime(c.reading_time, tz)
t = c.temperature
if t:
if t < -1000:
t = 0
t = float('%.2f' % t)
h = c.humidity
if h:
if h < -1000:
h = 0
h = float('%.2f' % h)
rdngs.append({'yy': rt.year, 'mm': rt.month-1, 'dd': rt.day, 'hh': rt.hour, 'mi': rt.minute, 'temp': t, 'hum': h})
return {'name': stn.name, 'readings': rdngs}
def get(self, request, *args, **kwargs):
# s = request.GET.get('stations', 'KSAN,KPHX')
# self.stations = s.split(',')
self.kind = request.GET.get('kind', 't')
return super(AjaxChartView, self).get(request, *args, **kwargs)
def render_to_response(self, context):
s = json.dumps(context)
return HttpResponse(s, content_type='application/json')
class HiLoView(TemplateView):
template_name = 'www/hilo.html'
def __init__(self):
super(HiLoView, self).__init__()
# self.station = 'station-01'
# self.kind = None
# self.names = {"KSAN": "San Diego", "KPHX": "Phoenix", "KOKC": "OKC", "KTEB": "Hackensack"}
def get_context_data(self, **kwargs):
rv = super(HiLoView, self).get_context_data(**kwargs)
tz = timezone.get_current_timezone()
cur_tm = timezone.make_aware(datetime.now())
sta_nm = self.kwargs['station']
y = int(self.kwargs['year'])
m = int(self.kwargs['month'])
rv['page_title'] = 'Highs and Lows %02d/%d' % (m, y)
d1 = timezone.make_aware(datetime(y, m, 1), tz)
firstD = d1
if d1.weekday() != 6:
firstD = first_day_before(d1, 6)
lastD = last_day_after(last_day_of_month(d1), 5)
days = []
dlt = lastD - firstD
for i in range(dlt.days+1):
dx = firstD + timedelta(days=i)
days.append(dx)
ld = last_day_of_month(d1)
oneDay = timedelta(days=1)
rv['prev_month'] = d1 - oneDay
nm = ld + oneDay
if nm < cur_tm:
rv['next_month'] = nm
hilo = []
try:
sta = Station.objects.get(name=sta_nm)
except Station.DoesNotExist:
sta = None
if sta:
rv['station'] = sta
for d in days:
# k = '%04d-%02d-%02d' % (y, m, d)
# dte = datetime.strptime(k, '%Y-%m-%d')
st = timezone.make_aware(datetime(d.year, d.month, d.day, 00, 00, 00), tz)
et = timezone.make_aware(datetime(d.year, d.month, d.day, 23, 59, 59), tz)
d = {'date': st}
th = sta.temphumidity_set.filter(reading_time__range=(st,et))
if th.count() > 0:
d.update(th.aggregate(Min('temperature'), Max('temperature'), Avg('temperature')))
hilo.append(d)
rv['hilo'] = hilo
return rv
class CrlsView(TemplateView):
template_name = 'n/a'
def get_context_data(self, **kwargs):
rv = super(CrlsView, self).get_context_data(**kwargs)
nm = None
if self.kwargs:
nm = self.kwargs.get('crlname')
rv['crlname'] = nm
return rv
def render_to_response(self, context):
context.pop('view')
return JsonResponse(context)
| apache-2.0 |
andyfaff/scipy | scipy/optimize/_dual_annealing.py | 7 | 29860 | # Dual Annealing implementation.
# Copyright (c) 2018 Sylvain Gubian <[email protected]>,
# Yang Xiang <[email protected]>
# Author: Sylvain Gubian, Yang Xiang, PMP S.A.
"""
A Dual Annealing global optimization algorithm
"""
import numpy as np
from scipy.optimize import OptimizeResult
from scipy.optimize import minimize
from scipy.special import gammaln
from scipy._lib._util import check_random_state
__all__ = ['dual_annealing']
class VisitingDistribution:
"""
Class used to generate new coordinates based on the distorted
Cauchy-Lorentz distribution. Depending on the steps within the strategy
chain, the class implements the strategy for generating new location
changes.
Parameters
----------
lb : array_like
A 1-D NumPy ndarray containing lower bounds of the generated
components. Neither NaN or inf are allowed.
ub : array_like
A 1-D NumPy ndarray containing upper bounds for the generated
components. Neither NaN or inf are allowed.
visiting_param : float
Parameter for visiting distribution. Default value is 2.62.
Higher values give the visiting distribution a heavier tail, this
makes the algorithm jump to a more distant region.
The value range is (1, 3]. It's value is fixed for the life of the
object.
rand_gen : {`~numpy.random.RandomState`, `~numpy.random.Generator`}
A `~numpy.random.RandomState`, `~numpy.random.Generator` object
for using the current state of the created random generator container.
"""
TAIL_LIMIT = 1.e8
MIN_VISIT_BOUND = 1.e-10
def __init__(self, lb, ub, visiting_param, rand_gen):
# if you wish to make _visiting_param adjustable during the life of
# the object then _factor2, _factor3, _factor5, _d1, _factor6 will
# have to be dynamically calculated in `visit_fn`. They're factored
# out here so they don't need to be recalculated all the time.
self._visiting_param = visiting_param
self.rand_gen = rand_gen
self.lower = lb
self.upper = ub
self.bound_range = ub - lb
# these are invariant numbers unless visiting_param changes
self._factor2 = np.exp((4.0 - self._visiting_param) * np.log(
self._visiting_param - 1.0))
self._factor3 = np.exp((2.0 - self._visiting_param) * np.log(2.0)
/ (self._visiting_param - 1.0))
self._factor4_p = np.sqrt(np.pi) * self._factor2 / (self._factor3 * (
3.0 - self._visiting_param))
self._factor5 = 1.0 / (self._visiting_param - 1.0) - 0.5
self._d1 = 2.0 - self._factor5
self._factor6 = np.pi * (1.0 - self._factor5) / np.sin(
np.pi * (1.0 - self._factor5)) / np.exp(gammaln(self._d1))
def visiting(self, x, step, temperature):
""" Based on the step in the strategy chain, new coordinated are
generated by changing all components is the same time or only
one of them, the new values are computed with visit_fn method
"""
dim = x.size
if step < dim:
# Changing all coordinates with a new visiting value
visits = self.visit_fn(temperature, dim)
upper_sample, lower_sample = self.rand_gen.uniform(size=2)
visits[visits > self.TAIL_LIMIT] = self.TAIL_LIMIT * upper_sample
visits[visits < -self.TAIL_LIMIT] = -self.TAIL_LIMIT * lower_sample
x_visit = visits + x
a = x_visit - self.lower
b = np.fmod(a, self.bound_range) + self.bound_range
x_visit = np.fmod(b, self.bound_range) + self.lower
x_visit[np.fabs(
x_visit - self.lower) < self.MIN_VISIT_BOUND] += 1.e-10
else:
# Changing only one coordinate at a time based on strategy
# chain step
x_visit = np.copy(x)
visit = self.visit_fn(temperature, 1)
if visit > self.TAIL_LIMIT:
visit = self.TAIL_LIMIT * self.rand_gen.uniform()
elif visit < -self.TAIL_LIMIT:
visit = -self.TAIL_LIMIT * self.rand_gen.uniform()
index = step - dim
x_visit[index] = visit + x[index]
a = x_visit[index] - self.lower[index]
b = np.fmod(a, self.bound_range[index]) + self.bound_range[index]
x_visit[index] = np.fmod(b, self.bound_range[
index]) + self.lower[index]
if np.fabs(x_visit[index] - self.lower[
index]) < self.MIN_VISIT_BOUND:
x_visit[index] += self.MIN_VISIT_BOUND
return x_visit
def visit_fn(self, temperature, dim):
""" Formula Visita from p. 405 of reference [2] """
x, y = self.rand_gen.normal(size=(dim, 2)).T
factor1 = np.exp(np.log(temperature) / (self._visiting_param - 1.0))
factor4 = self._factor4_p * factor1
# sigmax
x *= np.exp(-(self._visiting_param - 1.0) * np.log(
self._factor6 / factor4) / (3.0 - self._visiting_param))
den = np.exp((self._visiting_param - 1.0) * np.log(np.fabs(y)) /
(3.0 - self._visiting_param))
return x / den
class EnergyState:
"""
Class used to record the energy state. At any time, it knows what is the
currently used coordinates and the most recent best location.
Parameters
----------
lower : array_like
A 1-D NumPy ndarray containing lower bounds for generating an initial
random components in the `reset` method.
upper : array_like
A 1-D NumPy ndarray containing upper bounds for generating an initial
random components in the `reset` method
components. Neither NaN or inf are allowed.
callback : callable, ``callback(x, f, context)``, optional
A callback function which will be called for all minima found.
``x`` and ``f`` are the coordinates and function value of the
latest minimum found, and `context` has value in [0, 1, 2]
"""
# Maximimum number of trials for generating a valid starting point
MAX_REINIT_COUNT = 1000
def __init__(self, lower, upper, callback=None):
self.ebest = None
self.current_energy = None
self.current_location = None
self.xbest = None
self.lower = lower
self.upper = upper
self.callback = callback
def reset(self, func_wrapper, rand_gen, x0=None):
"""
Initialize current location is the search domain. If `x0` is not
provided, a random location within the bounds is generated.
"""
if x0 is None:
self.current_location = rand_gen.uniform(self.lower, self.upper,
size=len(self.lower))
else:
self.current_location = np.copy(x0)
init_error = True
reinit_counter = 0
while init_error:
self.current_energy = func_wrapper.fun(self.current_location)
if self.current_energy is None:
raise ValueError('Objective function is returning None')
if (not np.isfinite(self.current_energy) or np.isnan(
self.current_energy)):
if reinit_counter >= EnergyState.MAX_REINIT_COUNT:
init_error = False
message = (
'Stopping algorithm because function '
'create NaN or (+/-) infinity values even with '
'trying new random parameters'
)
raise ValueError(message)
self.current_location = rand_gen.uniform(self.lower,
self.upper,
size=self.lower.size)
reinit_counter += 1
else:
init_error = False
# If first time reset, initialize ebest and xbest
if self.ebest is None and self.xbest is None:
self.ebest = self.current_energy
self.xbest = np.copy(self.current_location)
# Otherwise, we keep them in case of reannealing reset
def update_best(self, e, x, context):
self.ebest = e
self.xbest = np.copy(x)
if self.callback is not None:
val = self.callback(x, e, context)
if val is not None:
if val:
return('Callback function requested to stop early by '
'returning True')
def update_current(self, e, x):
self.current_energy = e
self.current_location = np.copy(x)
class StrategyChain:
"""
Class that implements within a Markov chain the strategy for location
acceptance and local search decision making.
Parameters
----------
acceptance_param : float
Parameter for acceptance distribution. It is used to control the
probability of acceptance. The lower the acceptance parameter, the
smaller the probability of acceptance. Default value is -5.0 with
a range (-1e4, -5].
visit_dist : VisitingDistribution
Instance of `VisitingDistribution` class.
func_wrapper : ObjectiveFunWrapper
Instance of `ObjectiveFunWrapper` class.
minimizer_wrapper: LocalSearchWrapper
Instance of `LocalSearchWrapper` class.
rand_gen : {None, int, `numpy.random.Generator`,
`numpy.random.RandomState`}, optional
If `seed` is None (or `np.random`), the `numpy.random.RandomState`
singleton is used.
If `seed` is an int, a new ``RandomState`` instance is used,
seeded with `seed`.
If `seed` is already a ``Generator`` or ``RandomState`` instance then
that instance is used.
energy_state: EnergyState
Instance of `EnergyState` class.
"""
def __init__(self, acceptance_param, visit_dist, func_wrapper,
minimizer_wrapper, rand_gen, energy_state):
# Local strategy chain minimum energy and location
self.emin = energy_state.current_energy
self.xmin = np.array(energy_state.current_location)
# Global optimizer state
self.energy_state = energy_state
# Acceptance parameter
self.acceptance_param = acceptance_param
# Visiting distribution instance
self.visit_dist = visit_dist
# Wrapper to objective function
self.func_wrapper = func_wrapper
# Wrapper to the local minimizer
self.minimizer_wrapper = minimizer_wrapper
self.not_improved_idx = 0
self.not_improved_max_idx = 1000
self._rand_gen = rand_gen
self.temperature_step = 0
self.K = 100 * len(energy_state.current_location)
def accept_reject(self, j, e, x_visit):
r = self._rand_gen.uniform()
pqv_temp = 1.0 - ((1.0 - self.acceptance_param) *
(e - self.energy_state.current_energy) / self.temperature_step)
if pqv_temp <= 0.:
pqv = 0.
else:
pqv = np.exp(np.log(pqv_temp) / (
1. - self.acceptance_param))
if r <= pqv:
# We accept the new location and update state
self.energy_state.update_current(e, x_visit)
self.xmin = np.copy(self.energy_state.current_location)
# No improvement for a long time
if self.not_improved_idx >= self.not_improved_max_idx:
if j == 0 or self.energy_state.current_energy < self.emin:
self.emin = self.energy_state.current_energy
self.xmin = np.copy(self.energy_state.current_location)
def run(self, step, temperature):
self.temperature_step = temperature / float(step + 1)
self.not_improved_idx += 1
for j in range(self.energy_state.current_location.size * 2):
if j == 0:
if step == 0:
self.energy_state_improved = True
else:
self.energy_state_improved = False
x_visit = self.visit_dist.visiting(
self.energy_state.current_location, j, temperature)
# Calling the objective function
e = self.func_wrapper.fun(x_visit)
if e < self.energy_state.current_energy:
# We have got a better energy value
self.energy_state.update_current(e, x_visit)
if e < self.energy_state.ebest:
val = self.energy_state.update_best(e, x_visit, 0)
if val is not None:
if val:
return val
self.energy_state_improved = True
self.not_improved_idx = 0
else:
# We have not improved but do we accept the new location?
self.accept_reject(j, e, x_visit)
if self.func_wrapper.nfev >= self.func_wrapper.maxfun:
return ('Maximum number of function call reached '
'during annealing')
# End of StrategyChain loop
def local_search(self):
# Decision making for performing a local search
# based on strategy chain results
# If energy has been improved or no improvement since too long,
# performing a local search with the best strategy chain location
if self.energy_state_improved:
# Global energy has improved, let's see if LS improves further
e, x = self.minimizer_wrapper.local_search(self.energy_state.xbest,
self.energy_state.ebest)
if e < self.energy_state.ebest:
self.not_improved_idx = 0
val = self.energy_state.update_best(e, x, 1)
if val is not None:
if val:
return val
self.energy_state.update_current(e, x)
if self.func_wrapper.nfev >= self.func_wrapper.maxfun:
return ('Maximum number of function call reached '
'during local search')
# Check probability of a need to perform a LS even if no improvement
do_ls = False
if self.K < 90 * len(self.energy_state.current_location):
pls = np.exp(self.K * (
self.energy_state.ebest - self.energy_state.current_energy) /
self.temperature_step)
if pls >= self._rand_gen.uniform():
do_ls = True
# Global energy not improved, let's see what LS gives
# on the best strategy chain location
if self.not_improved_idx >= self.not_improved_max_idx:
do_ls = True
if do_ls:
e, x = self.minimizer_wrapper.local_search(self.xmin, self.emin)
self.xmin = np.copy(x)
self.emin = e
self.not_improved_idx = 0
self.not_improved_max_idx = self.energy_state.current_location.size
if e < self.energy_state.ebest:
val = self.energy_state.update_best(
self.emin, self.xmin, 2)
if val is not None:
if val:
return val
self.energy_state.update_current(e, x)
if self.func_wrapper.nfev >= self.func_wrapper.maxfun:
return ('Maximum number of function call reached '
'during dual annealing')
class ObjectiveFunWrapper:
def __init__(self, func, maxfun=1e7, *args):
self.func = func
self.args = args
# Number of objective function evaluations
self.nfev = 0
# Number of gradient function evaluation if used
self.ngev = 0
# Number of hessian of the objective function if used
self.nhev = 0
self.maxfun = maxfun
def fun(self, x):
self.nfev += 1
return self.func(x, *self.args)
class LocalSearchWrapper:
"""
Class used to wrap around the minimizer used for local search
Default local minimizer is SciPy minimizer L-BFGS-B
"""
LS_MAXITER_RATIO = 6
LS_MAXITER_MIN = 100
LS_MAXITER_MAX = 1000
def __init__(self, search_bounds, func_wrapper, **kwargs):
self.func_wrapper = func_wrapper
self.kwargs = kwargs
self.minimizer = minimize
bounds_list = list(zip(*search_bounds))
self.lower = np.array(bounds_list[0])
self.upper = np.array(bounds_list[1])
# If no minimizer specified, use SciPy minimize with 'L-BFGS-B' method
if not self.kwargs:
n = len(self.lower)
ls_max_iter = min(max(n * self.LS_MAXITER_RATIO,
self.LS_MAXITER_MIN),
self.LS_MAXITER_MAX)
self.kwargs['method'] = 'L-BFGS-B'
self.kwargs['options'] = {
'maxiter': ls_max_iter,
}
self.kwargs['bounds'] = list(zip(self.lower, self.upper))
def local_search(self, x, e):
# Run local search from the given x location where energy value is e
x_tmp = np.copy(x)
mres = self.minimizer(self.func_wrapper.fun, x, **self.kwargs)
if 'njev' in mres:
self.func_wrapper.ngev += mres.njev
if 'nhev' in mres:
self.func_wrapper.nhev += mres.nhev
# Check if is valid value
is_finite = np.all(np.isfinite(mres.x)) and np.isfinite(mres.fun)
in_bounds = np.all(mres.x >= self.lower) and np.all(
mres.x <= self.upper)
is_valid = is_finite and in_bounds
# Use the new point only if it is valid and return a better results
if is_valid and mres.fun < e:
return mres.fun, mres.x
else:
return e, x_tmp
def dual_annealing(func, bounds, args=(), maxiter=1000,
local_search_options={}, initial_temp=5230.,
restart_temp_ratio=2.e-5, visit=2.62, accept=-5.0,
maxfun=1e7, seed=None, no_local_search=False,
callback=None, x0=None):
"""
Find the global minimum of a function using Dual Annealing.
Parameters
----------
func : callable
The objective function to be minimized. Must be in the form
``f(x, *args)``, where ``x`` is the argument in the form of a 1-D array
and ``args`` is a tuple of any additional fixed parameters needed to
completely specify the function.
bounds : sequence, shape (n, 2)
Bounds for variables. ``(min, max)`` pairs for each element in ``x``,
defining bounds for the objective function parameter.
args : tuple, optional
Any additional fixed parameters needed to completely specify the
objective function.
maxiter : int, optional
The maximum number of global search iterations. Default value is 1000.
local_search_options : dict, optional
Extra keyword arguments to be passed to the local minimizer
(`minimize`). Some important options could be:
``method`` for the minimizer method to use and ``args`` for
objective function additional arguments.
initial_temp : float, optional
The initial temperature, use higher values to facilitates a wider
search of the energy landscape, allowing dual_annealing to escape
local minima that it is trapped in. Default value is 5230. Range is
(0.01, 5.e4].
restart_temp_ratio : float, optional
During the annealing process, temperature is decreasing, when it
reaches ``initial_temp * restart_temp_ratio``, the reannealing process
is triggered. Default value of the ratio is 2e-5. Range is (0, 1).
visit : float, optional
Parameter for visiting distribution. Default value is 2.62. Higher
values give the visiting distribution a heavier tail, this makes
the algorithm jump to a more distant region. The value range is (1, 3].
accept : float, optional
Parameter for acceptance distribution. It is used to control the
probability of acceptance. The lower the acceptance parameter, the
smaller the probability of acceptance. Default value is -5.0 with
a range (-1e4, -5].
maxfun : int, optional
Soft limit for the number of objective function calls. If the
algorithm is in the middle of a local search, this number will be
exceeded, the algorithm will stop just after the local search is
done. Default value is 1e7.
seed : {None, int, `numpy.random.Generator`,
`numpy.random.RandomState`}, optional
If `seed` is None (or `np.random`), the `numpy.random.RandomState`
singleton is used.
If `seed` is an int, a new ``RandomState`` instance is used,
seeded with `seed`.
If `seed` is already a ``Generator`` or ``RandomState`` instance then
that instance is used.
Specify `seed` for repeatable minimizations. The random numbers
generated with this seed only affect the visiting distribution function
and new coordinates generation.
no_local_search : bool, optional
If `no_local_search` is set to True, a traditional Generalized
Simulated Annealing will be performed with no local search
strategy applied.
callback : callable, optional
A callback function with signature ``callback(x, f, context)``,
which will be called for all minima found.
``x`` and ``f`` are the coordinates and function value of the
latest minimum found, and ``context`` has value in [0, 1, 2], with the
following meaning:
- 0: minimum detected in the annealing process.
- 1: detection occurred in the local search process.
- 2: detection done in the dual annealing process.
If the callback implementation returns True, the algorithm will stop.
x0 : ndarray, shape(n,), optional
Coordinates of a single N-D starting point.
Returns
-------
res : OptimizeResult
The optimization result represented as a `OptimizeResult` object.
Important attributes are: ``x`` the solution array, ``fun`` the value
of the function at the solution, and ``message`` which describes the
cause of the termination.
See `OptimizeResult` for a description of other attributes.
Notes
-----
This function implements the Dual Annealing optimization. This stochastic
approach derived from [3]_ combines the generalization of CSA (Classical
Simulated Annealing) and FSA (Fast Simulated Annealing) [1]_ [2]_ coupled
to a strategy for applying a local search on accepted locations [4]_.
An alternative implementation of this same algorithm is described in [5]_
and benchmarks are presented in [6]_. This approach introduces an advanced
method to refine the solution found by the generalized annealing
process. This algorithm uses a distorted Cauchy-Lorentz visiting
distribution, with its shape controlled by the parameter :math:`q_{v}`
.. math::
g_{q_{v}}(\\Delta x(t)) \\propto \\frac{ \\
\\left[T_{q_{v}}(t) \\right]^{-\\frac{D}{3-q_{v}}}}{ \\
\\left[{1+(q_{v}-1)\\frac{(\\Delta x(t))^{2}} { \\
\\left[T_{q_{v}}(t)\\right]^{\\frac{2}{3-q_{v}}}}}\\right]^{ \\
\\frac{1}{q_{v}-1}+\\frac{D-1}{2}}}
Where :math:`t` is the artificial time. This visiting distribution is used
to generate a trial jump distance :math:`\\Delta x(t)` of variable
:math:`x(t)` under artificial temperature :math:`T_{q_{v}}(t)`.
From the starting point, after calling the visiting distribution
function, the acceptance probability is computed as follows:
.. math::
p_{q_{a}} = \\min{\\{1,\\left[1-(1-q_{a}) \\beta \\Delta E \\right]^{ \\
\\frac{1}{1-q_{a}}}\\}}
Where :math:`q_{a}` is a acceptance parameter. For :math:`q_{a}<1`, zero
acceptance probability is assigned to the cases where
.. math::
[1-(1-q_{a}) \\beta \\Delta E] < 0
The artificial temperature :math:`T_{q_{v}}(t)` is decreased according to
.. math::
T_{q_{v}}(t) = T_{q_{v}}(1) \\frac{2^{q_{v}-1}-1}{\\left( \\
1 + t\\right)^{q_{v}-1}-1}
Where :math:`q_{v}` is the visiting parameter.
.. versionadded:: 1.2.0
References
----------
.. [1] Tsallis C. Possible generalization of Boltzmann-Gibbs
statistics. Journal of Statistical Physics, 52, 479-487 (1998).
.. [2] Tsallis C, Stariolo DA. Generalized Simulated Annealing.
Physica A, 233, 395-406 (1996).
.. [3] Xiang Y, Sun DY, Fan W, Gong XG. Generalized Simulated
Annealing Algorithm and Its Application to the Thomson Model.
Physics Letters A, 233, 216-220 (1997).
.. [4] Xiang Y, Gong XG. Efficiency of Generalized Simulated
Annealing. Physical Review E, 62, 4473 (2000).
.. [5] Xiang Y, Gubian S, Suomela B, Hoeng J. Generalized
Simulated Annealing for Efficient Global Optimization: the GenSA
Package for R. The R Journal, Volume 5/1 (2013).
.. [6] Mullen, K. Continuous Global Optimization in R. Journal of
Statistical Software, 60(6), 1 - 45, (2014).
:doi:`10.18637/jss.v060.i06`
Examples
--------
The following example is a 10-D problem, with many local minima.
The function involved is called Rastrigin
(https://en.wikipedia.org/wiki/Rastrigin_function)
>>> from scipy.optimize import dual_annealing
>>> func = lambda x: np.sum(x*x - 10*np.cos(2*np.pi*x)) + 10*np.size(x)
>>> lw = [-5.12] * 10
>>> up = [5.12] * 10
>>> ret = dual_annealing(func, bounds=list(zip(lw, up)))
>>> ret.x
array([-4.26437714e-09, -3.91699361e-09, -1.86149218e-09, -3.97165720e-09,
-6.29151648e-09, -6.53145322e-09, -3.93616815e-09, -6.55623025e-09,
-6.05775280e-09, -5.00668935e-09]) # random
>>> ret.fun
0.000000
""" # noqa: E501
if x0 is not None and not len(x0) == len(bounds):
raise ValueError('Bounds size does not match x0')
lu = list(zip(*bounds))
lower = np.array(lu[0])
upper = np.array(lu[1])
# Check that restart temperature ratio is correct
if restart_temp_ratio <= 0. or restart_temp_ratio >= 1.:
raise ValueError('Restart temperature ratio has to be in range (0, 1)')
# Checking bounds are valid
if (np.any(np.isinf(lower)) or np.any(np.isinf(upper)) or np.any(
np.isnan(lower)) or np.any(np.isnan(upper))):
raise ValueError('Some bounds values are inf values or nan values')
# Checking that bounds are consistent
if not np.all(lower < upper):
raise ValueError('Bounds are not consistent min < max')
# Checking that bounds are the same length
if not len(lower) == len(upper):
raise ValueError('Bounds do not have the same dimensions')
# Wrapper for the objective function
func_wrapper = ObjectiveFunWrapper(func, maxfun, *args)
# Wrapper fot the minimizer
minimizer_wrapper = LocalSearchWrapper(
bounds, func_wrapper, **local_search_options)
# Initialization of random Generator for reproducible runs if seed provided
rand_state = check_random_state(seed)
# Initialization of the energy state
energy_state = EnergyState(lower, upper, callback)
energy_state.reset(func_wrapper, rand_state, x0)
# Minimum value of annealing temperature reached to perform
# re-annealing
temperature_restart = initial_temp * restart_temp_ratio
# VisitingDistribution instance
visit_dist = VisitingDistribution(lower, upper, visit, rand_state)
# Strategy chain instance
strategy_chain = StrategyChain(accept, visit_dist, func_wrapper,
minimizer_wrapper, rand_state, energy_state)
need_to_stop = False
iteration = 0
message = []
# OptimizeResult object to be returned
optimize_res = OptimizeResult()
optimize_res.success = True
optimize_res.status = 0
t1 = np.exp((visit - 1) * np.log(2.0)) - 1.0
# Run the search loop
while(not need_to_stop):
for i in range(maxiter):
# Compute temperature for this step
s = float(i) + 2.0
t2 = np.exp((visit - 1) * np.log(s)) - 1.0
temperature = initial_temp * t1 / t2
if iteration >= maxiter:
message.append("Maximum number of iteration reached")
need_to_stop = True
break
# Need a re-annealing process?
if temperature < temperature_restart:
energy_state.reset(func_wrapper, rand_state)
break
# starting strategy chain
val = strategy_chain.run(i, temperature)
if val is not None:
message.append(val)
need_to_stop = True
optimize_res.success = False
break
# Possible local search at the end of the strategy chain
if not no_local_search:
val = strategy_chain.local_search()
if val is not None:
message.append(val)
need_to_stop = True
optimize_res.success = False
break
iteration += 1
# Setting the OptimizeResult values
optimize_res.x = energy_state.xbest
optimize_res.fun = energy_state.ebest
optimize_res.nit = iteration
optimize_res.nfev = func_wrapper.nfev
optimize_res.njev = func_wrapper.ngev
optimize_res.nhev = func_wrapper.nhev
optimize_res.message = message
return optimize_res
| bsd-3-clause |
Aiacos/DevPyLib | mayaLib/rigLib/utils/clothMuscleSetup.py | 1 | 7679 | import pymel.core as pm
import maya.mel as mel
def getAllObjectUnderGroup(group, type='mesh'):
"""
Return all object of given type under group
:param group: str, group name
:param type: str, object type
:return: object list
"""
objList = None
if type == 'mesh':
objList = [pm.listRelatives(o, p=1)[0] for o in pm.listRelatives(group, ad=1, type=type)]
if type == 'transform':
geoList = [pm.listRelatives(o, p=1)[0] for o in pm.listRelatives(group, ad=1, type='mesh')]
objList = [o for o in pm.listRelatives(group, ad=1, type=type) if o not in geoList]
objList = list(set(objList))
objList.sort()
return objList
def clothPaintInputAttract(clothNode, vtxList, value, smoothIteration=1):
channel = 'inputAttract'
clothOutput = pm.listConnections(clothNode.outputMesh, sh=True)[0]
mel.eval('setNClothMapType("' + channel + '","' + clothOutput + '",1); artAttrNClothToolScript 4 ' + channel + ';')
pm.select(vtxList)
# set value
mel.eval('artAttrCtx -e -value ' + str(value) + ' `currentCtx`;')
# replace
mel.eval('artAttrPaintOperation artAttrCtx Replace;')
mel.eval('artAttrCtx -e -clear `currentCtx`;')
# smooth
for i in range(0, smoothIteration):
mel.eval('artAttrPaintOperation artAttrCtx Smooth;')
mel.eval('artAttrCtx -e -clear `currentCtx`;')
pm.select(cl=True)
class ClothMuscle:
def __init__(self):
try:
self.muscleGrp = pm.ls('muscle_GRP')[0]
self.skeletonGrp = pm.ls('skeleton_GRP')[0]
#self.skinGrp = pm.ls('skin_GRP')[0]
except:
pass
self.muscleSystemGrp = pm.group(n='muscleSystem_GRP', em=True)
pm.parent(self.muscleGrp, self.muscleSystemGrp)
pm.parent(self.skeletonGrp, self.muscleSystemGrp)
muscleList = getAllObjectUnderGroup(self.muscleGrp)
self.clothShapeList, self.nucleus = self.createNCloth(muscleList)
# setup Nucleus
pm.rename(self.nucleus, 'muscleSystem_nucleus')
pm.parent(self.nucleus, self.muscleSystemGrp)
self.nucleus.enable.set(1)
self.nucleus.spaceScale.set(0.01)
self.nucleus.subSteps.set(12)
# setup Colliders
self.collisionSetup(self.skeletonGrp)
for clothShape in self.clothShapeList:
self.paintInputAttract(clothShape)
def createNCloth(self, muscleList):
# duplicate muscle (musSim)
muscleSim = pm.duplicate(muscleList)
muscleSimGrp = pm.group(muscleSim, n='muscleSim_GRP', p=self.muscleSystemGrp)
for mus in muscleSim:
pm.rename(mus, str(mus.name()).replace('_GEO1', '_SIM'))
pm.select(muscleSim)
clothShapeList = pm.ls(mel.eval('createNCloth 0;'))
nucleus = pm.listConnections(clothShapeList[0], type='nucleus')[0]
for cloth in clothShapeList:
muscleSimGeo = pm.listConnections(cloth.outputMesh)[0]
pm.rename(cloth.getParent(), str(muscleSimGeo.name()) + '_nCloth')
pm.parent(cloth.getParent(), muscleSimGeo)
# connect inputmeshShape and restShape
print('###############################################')
print(str(muscleSimGeo.name()).replace('_SIM', '_GEO'))
muscleGeo = pm.ls(str(muscleSimGeo.name()).replace('_SIM', '_GEO'))[0]
pm.connectAttr(muscleGeo.getShape().worldMesh[0], cloth.inputMesh, f=True)
pm.connectAttr(muscleGeo.getShape().worldMesh[0], cloth.restShapeMesh, f=True)
# Set Default Value
# Collision
cloth.thickness.set(0.01)
cloth.selfCollideWidthScale.set(1)
# Dynamic Properties
cloth.stretchResistance.set(10)
cloth.bendResistance.set(5)
cloth.inputMeshAttract.set(1)
cloth.inputAttractMethod.set(1)
# Pressure
cloth.pressureMethod.set(1)
# trap checked
return clothShapeList, nucleus
def createCollision(self, geo):
#pm.select(geo)
#collisionShapeList = pm.ls(mel.eval('makeCollideNCloth;'))
timerNode = pm.ls('time1')[0]
colliderNode = pm.createNode('nRigid', n=geo.name() + '_collider' + '_Shape')
pm.rename(colliderNode.getParent(), geo.name() + '_collider')
pm.connectAttr(timerNode.outTime, colliderNode.currentTime, f=True)
pm.connectAttr(geo.getShape().worldMesh[0], colliderNode.inputMesh, f=True)
pm.connectAttr(colliderNode.currentState, self.nucleus.inputPassive[0], f=True)
pm.connectAttr(colliderNode.startState, self.nucleus.inputPassiveStart[0], f=True)
pm.parent(colliderNode, geo)
colliderNode.thickness.set(0.005)
#colliderNode.trappedCheck.set(1)
#colliderNode.pushOut.set(0)
#colliderNode.pushOutRadius.set(0.5)
return colliderNode
def collisionSetup(self, collisionGrp):
collisionGrp = pm.ls(collisionGrp)[0]
collisionGeoList = getAllObjectUnderGroup(collisionGrp)
for collisionGeo in collisionGeoList:
self.createCollision(collisionGeo)
def paintInputAttract(self, clothNode, growSelection=5):
geo = pm.listConnections(clothNode.inputMesh, s=True)[0]
print('PAINT: ', geo)
# paint middle
pm.select(geo)
mel.eval('changeSelectMode -component;')
mel.eval('SelectAll;')
mel.eval('polySelectConstraint -pp 3;')
edges = pm.ls(sl=True)
#mel.eval('polySelectContraint -dis;')
for i in range(growSelection):
mel.eval('select `ls -sl`;PolySelectTraverse 1;select `ls -sl`;')
mel.eval('invertSelection;')
vtxList = pm.ls(sl=True)
clothPaintInputAttract(clothNode, vtxList, 0.4, smoothIteration=3)
def runSolve(self):
# setup nCloth
for clothShape in self.clothShapeList:
# Collisions
clothShape.collisionFlag.set(3)
clothShape.selfCollisionFlag.set(4)
clothShape.thickness.set(0.001)
# Dynamic Properties
clothShape.inputMeshAttract.set(1)
# Paint Dynmaic
self.paintInputAttract(clothShape)
# Pressure
#setPressure(clothShape)
# Quality Settings
clothShape.collideLastThreshold.set(1)
clothShape.evaluationOrder.set(1)
clothShape.bendSolver.set(2)
clothShape.trappedCheck.set(1)
clothShape.selfTrappedCheck.set(1)
clothShape.pushOut.set(0.05)
clothShape.pushOutRadius.set(1)
#for muscleGeo in muscleList:
# deltaMushSetup(muscleGeo)
# setupCollider
#collisionSetup('skeleton_grp')
self.nucleus.enable.set(1)
if __name__ == "__main__":
#mel.eval('file -f -options "v=0;" -ignoreVersion -typ "mayaAscii" -o "/Users/lorenzoargentieri/Qsync/Project/Warewolf/scenes/00_model/anatomy_reference.ma";addRecentFile("/Users/lorenzoargentieri/Qsync/Project/Warewolf/scenes/00_model/anatomy_reference.ma", "mayaAscii");')
cSolver = ClothMuscle()
#cSolver.runSolve() | agpl-3.0 |
VenusGrape/MITx-6.00.1x- | guess _my_number.py | 1 | 3740 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sat Feb 25 13:53:05 2017
@author: venusgrape
In this problem, you'll create a program that guesses a secret number!
The program works as follows: you (the user) thinks of an integer between 0 (inclusive) and 100 (not inclusive).
The computer makes guesses, and you give it input - is its guess too high or too low? Using bisection search,
the computer will guess the user's secret number!
Here is a transcript of an example session:
Please think of a number between 0 and 100!
Is your secret number 50?
Enter 'h' to indicate the guess is too high. Enter 'l' to indicate the guess is too low. Enter 'c' to indicate I guessed correctly. l
Is your secret number 75?
Enter 'h' to indicate the guess is too high. Enter 'l' to indicate the guess is too low. Enter 'c' to indicate I guessed correctly. l
Is your secret number 87?
Enter 'h' to indicate the guess is too high. Enter 'l' to indicate the guess is too low. Enter 'c' to indicate I guessed correctly. h
Is your secret number 81?
Enter 'h' to indicate the guess is too high. Enter 'l' to indicate the guess is too low. Enter 'c' to indicate I guessed correctly. l
Is your secret number 84?
Enter 'h' to indicate the guess is too high. Enter 'l' to indicate the guess is too low. Enter 'c' to indicate I guessed correctly. h
Is your secret number 82?
Enter 'h' to indicate the guess is too high. Enter 'l' to indicate the guess is too low. Enter 'c' to indicate I guessed correctly. l
Is your secret number 83?
Enter 'h' to indicate the guess is too high. Enter 'l' to indicate the guess is too low. Enter 'c' to indicate I guessed correctly. c
Game over. Your secret number was: 83
Important
Hint: Endpoints
** Your program should use bisection search. So think carefully what that means. What will the first guess always be?
How should you calculate subsequent guesses?
** Your initial endpoints should be 0 and 100. Do not optimize your subsequent endpoints by making them be the halfway point plus or minus 1. Rather, just make them be the halfway point.
Python Trick: Printing on the same line
Note: your program should use input to obtain the user's input! Be sure to handle the case when the user's input is not one of h, l, or c.
When the user enters something invalid, you should print out a message to the user explaining you did not understand their input. Then,
you should re-ask the question, and prompt again for input. For example:
Is your secret number 91?
Enter 'h' to indicate the guess is too high. Enter 'l' to indicate the guess is too low. Enter 'c' to indicate I guessed correctly. y
Sorry, I did not understand your input.
Is your secret number 91?
Enter 'h' to indicate the guess is too high. Enter 'l' to indicate the guess is too low. Enter 'c' to indicate I guessed correctly. c
"""
n = 100
epsilon = 0.01
low = 0
high = n
ans = (low + high)/2.0
#print(ans)
while ans**2 - n >= epsilon:
print('Is your secret number', end = ' ')
print(ans,end = ' ')
print('?')
letter = input("Enter 'h' to indicate the guess is too high. \
Enter 'l' to indicate the guess is too low. \
Enter 'c' to indicate I guessed correctly.")
if letter == 'h':
high = ans
ans = (high + low)/2
elif letter == 'l':
low = ans
ans = (high + low)/2
elif letter == 'c':
print('Game over, My answer is:',end = ' ')
print(ans)
break
else:
print("Sorry, I did not understand your input.")
| mit |
ukanga/SickRage | lib/hachoir_parser/file_system/fat.py | 95 | 16153 | from hachoir_core.compatibility import sorted
from hachoir_parser import Parser
from hachoir_core.field import (FieldSet, StaticFieldSet,
RawBytes, PaddingBytes, createPaddingField, Link, Fragment,
Bit, Bits, UInt8, UInt16, UInt32,
String, Bytes, NullBytes)
from hachoir_core.field.integer import GenericInteger
from hachoir_core.endian import LITTLE_ENDIAN
from hachoir_core.text_handler import textHandler, hexadecimal
from hachoir_core.error import error
from hachoir_core.tools import humanFilesize, makePrintable
import datetime
import re
strip_index = re.compile(r'\[[^]]+]$')
class Boot(FieldSet):
static_size = 512*8
def createFields(self):
yield Bytes(self, "jmp", 3, "Jump instruction (to skip over header on boot)")
yield Bytes(self, "oem_name", 8, "OEM Name (padded with spaces)")
yield UInt16(self, "sector_size", "Bytes per sector")
yield UInt8 (self, "cluster_size", "Sectors per cluster")
yield UInt16(self, "reserved_sectors", "Reserved sector count (including boot sector)")
yield UInt8 (self, "fat_nb", "Number of file allocation tables")
yield UInt16(self, "max_root", "Maximum number of root directory entries")
yield UInt16(self, "sectors1", "Total sectors (if zero, use 'sectors2')")
yield UInt8 (self, "media_desc", "Media descriptor")
yield UInt16(self, "fat_size", "Sectors per FAT")
yield UInt16(self, "track_size", "Sectors per track")
yield UInt16(self, "head_nb", "Number of heads")
yield UInt32(self, "hidden", "Hidden sectors")
yield UInt32(self, "sectors2", "Total sectors (if greater than 65535)")
if self.parent.version == 32:
yield UInt32(self, "fat32_size", "Sectors per FAT")
yield UInt16(self, "fat_flags", "FAT Flags")
yield UInt16(self, "version", "Version")
yield UInt32(self, "root_start", "Cluster number of root directory start")
yield UInt16(self, "inf_sector", "Sector number of FS Information Sector")
yield UInt16(self, "boot_copy", "Sector number of a copy of this boot sector")
yield NullBytes(self, "reserved[]", 12, "Reserved")
yield UInt8(self, "phys_drv", "Physical drive number")
yield NullBytes(self, "reserved[]", 1, 'Reserved ("current head")')
yield UInt8(self, "sign", "Signature")
yield textHandler(UInt32(self, "serial", "ID (serial number)"), hexadecimal)
yield String(self, "label", 11, "Volume Label", strip=' ', charset="ASCII")
yield String(self, "fs_type", 8, "FAT file system type", strip=' ', charset="ASCII")
yield Bytes(self, "code", 510-self.current_size/8, "Operating system boot code")
yield Bytes(self, "trail_sig", 2, "Signature (0x55 0xAA)")
class FSInfo(StaticFieldSet):
format = (
(String, "lead_sig", 4, 'Signature ("RRaA")'),
(NullBytes, "reserved[]", 480),
(String, "struct_sig", 4, 'Signature ("rrAa")'),
(UInt32, "free_count", "Last known free cluster count on the volume"),
(UInt32, "nxt_free",),
(NullBytes, "reserved[]", 12),
(Bytes, "trail_sig", 4, "Signature (0x00 0x00 0x55 0xAA)")
)
class FAT(FieldSet):
class FAT(FieldSet):
def createFields(self):
parent = self.parent
version = parent.parent.version
text_handler = parent.text_handler
while self.current_size < self._size:
yield textHandler(GenericInteger(self, 'entry[]', False, version), text_handler)
def createFields(self):
version = self.parent.version
max_entry = 1 << min(28, version)
def FatEntry(chunk):
i = chunk.value
j = (1 - i) % max_entry
if j == 0:
return "reserved cluster"
elif j == 1:
return "free cluster"
elif j < 10:
return "end of a chain"
elif j == 10:
return "bad cluster"
elif j < 18:
return "reserved value"
else:
return str(i)
self.text_handler = FatEntry
while self.current_size < self._size:
yield FAT.FAT(self, 'group[]', size=min(1000*version,self._size-self.current_size))
class Date(FieldSet):
def __init__(self, parent, name):
FieldSet.__init__(self, parent, name, size={
"create": 5,
"access": 2,
"modify": 4,
}[name] * 8)
def createFields(self):
size = self.size / 8
if size > 2:
if size > 4:
yield UInt8(self, "cs", "10ms units, values from 0 to 199")
yield Bits(self, "2sec", 5, "seconds/2")
yield Bits(self, "min", 6, "minutes")
yield Bits(self, "hour", 5, "hours")
yield Bits(self, "day", 5, "(1-31)")
yield Bits(self, "month", 4, "(1-12)")
yield Bits(self, "year", 7, "(0 = 1980, 127 = 2107)")
def createDescription(self):
date = [ self["year"].value, self["month"].value, self["day"].value ]
size = self.size / 8
if size > 2:
mkdate = datetime.datetime
cs = 200 * self["2sec"].value
if size > 4:
cs += self["cs"].value
date += [ self["hour"].value, self["min"].value, cs / 100, cs % 100 * 10000 ]
else:
mkdate = datetime.date
if date == [ 0 for i in date ]:
date = None
else:
date[0] += 1980
try:
date = mkdate(*tuple(date))
except ValueError:
return "invalid"
return str(date)
class InodeLink(Link):
def __init__(self, parent, name, target=None):
Link.__init__(self, parent, name)
self.target = target
self.first = None
def _getTargetPath(self):
if not self.target:
parent = self.parent
self.target = strip_index.sub(r"\\", parent.parent._name) + parent.getFilename().rstrip("/")
return self.target
def createValue(self):
field = InodeGen(self["/"], self.parent, self._getTargetPath())(self)
if field:
self._display = field.path
return Link.createValue(self)
def createDisplay(self):
return "/%s[0]" % self._getTargetPath()
class FileEntry(FieldSet):
static_size = 32*8
process = False
LFN = False
def __init__(self, *args):
FieldSet.__init__(self, *args)
self.status = self.stream.readBits(self.absolute_address, 8, LITTLE_ENDIAN)
if self.status in (0, 0xE5):
return
magic = self.stream.readBits(self.absolute_address+11*8, 8, LITTLE_ENDIAN)
if magic & 0x3F == 0x0F:
self.LFN = True
elif self.getFilename() not in (".", ".."):
self.process = True
def getFilename(self):
name = self["name"].value
if isinstance(name, str):
name = makePrintable(name, "ASCII", to_unicode=True)
ext = self["ext"].value
if ext:
name += "." + ext
if name[0] == 5:
name = "\xE5" + name[1:]
if not self.LFN and self["directory"].value:
name += "/"
return name
def createDescription(self):
if self.status == 0:
return "Free entry"
elif self.status == 0xE5:
return "Deleted file"
elif self.LFN:
name = "".join( field.value for field in self.array("name") )
try:
name = name[:name.index('\0')]
except ValueError:
pass
seq_no = self["seq_no"].value
return "Long filename part: '%s' [%u]" % (name, seq_no)
else:
return "File: '%s'" % self.getFilename()
def getCluster(self):
cluster = self["cluster_lo"].value
if self.parent.parent.version > 16:
cluster += self["cluster_hi"].value << 16
return cluster
def createFields(self):
if not self.LFN:
yield String(self, "name", 8, "DOS file name (padded with spaces)",
strip=' ', charset="ASCII")
yield String(self, "ext", 3, "DOS file extension (padded with spaces)",
strip=' ', charset="ASCII")
yield Bit(self, "read_only")
yield Bit(self, "hidden")
yield Bit(self, "system")
yield Bit(self, "volume_label")
yield Bit(self, "directory")
yield Bit(self, "archive")
yield Bit(self, "device")
yield Bit(self, "unused")
yield RawBytes(self, "reserved", 1, "Something about the case")
yield Date(self, "create")
yield Date(self, "access")
if self.parent.parent.version > 16:
yield UInt16(self, "cluster_hi")
else:
yield UInt16(self, "ea_index")
yield Date(self, "modify")
yield UInt16(self, "cluster_lo")
size = UInt32(self, "size")
yield size
if self.process:
del self.process
target_size = size.value
if self["directory"].value:
if target_size:
size.error("(FAT) value must be zero")
target_size = 0
elif not target_size:
return
self.target_size = 8 * target_size
yield InodeLink(self, "data")
else:
yield UInt8(self, "seq_no", "Sequence Number")
yield String(self, "name[]", 10, "(5 UTF-16 characters)",
charset="UTF-16-LE")
yield UInt8(self, "magic", "Magic number (15)")
yield NullBytes(self, "reserved", 1, "(always 0)")
yield UInt8(self, "checksum", "Checksum of DOS file name")
yield String(self, "name[]", 12, "(6 UTF-16 characters)",
charset="UTF-16-LE")
yield UInt16(self, "first_cluster", "(always 0)")
yield String(self, "name[]", 4, "(2 UTF-16 characters)",
charset="UTF-16-LE")
class Directory(Fragment):
def createFields(self):
while self.current_size < self._size:
yield FileEntry(self, "entry[]")
class File(Fragment):
def _getData(self):
return self["data"]
def createFields(self):
yield Bytes(self, "data", self.datasize/8)
padding = self._size - self.current_size
if padding:
yield createPaddingField(self, padding)
class InodeGen:
def __init__(self, root, entry, path):
self.root = root
self.cluster = root.clusters(entry.getCluster)
self.path = path
self.filesize = entry.target_size
self.done = 0
def createInputStream(cis, **args):
args["size"] = self.filesize
args.setdefault("tags",[]).append(("filename", entry.getFilename()))
return cis(**args)
self.createInputStream = createInputStream
def __call__(self, prev):
name = self.path + "[]"
address, size, last = self.cluster.next()
if self.filesize:
if self.done >= self.filesize:
error("(FAT) bad metadata for " + self.path)
return
field = File(self.root, name, size=size)
if prev.first is None:
field._description = 'File size: %s' % humanFilesize(self.filesize//8)
field.setSubIStream(self.createInputStream)
field.datasize = min(self.filesize - self.done, size)
self.done += field.datasize
else:
field = Directory(self.root, name, size=size)
padding = self.root.getFieldByAddress(address, feed=False)
if not isinstance(padding, (PaddingBytes, RawBytes)):
error("(FAT) address %u doesn't point to a padding field" % address)
return
if last:
next = None
else:
next = lambda: self(field)
field.setLinks(prev.first, next)
self.root.writeFieldsIn(padding, address, (field,))
return field
class FAT_FS(Parser):
endian = LITTLE_ENDIAN
PARSER_TAGS = {
"category": "file_system",
"min_size": 512*8,
"file_ext": ("",),
}
def _validate(self, type_offset):
if self.stream.readBytes(type_offset*8, 8) != ("FAT%-5u" % self.version):
return "Invalid FAT%u signature" % self.version
if self.stream.readBytes(510*8, 2) != "\x55\xAA":
return "Invalid BIOS signature"
return True
def clusters(self, cluster_func):
max_entry = (1 << min(28, self.version)) - 16
cluster = cluster_func()
if 1 < cluster < max_entry:
clus_nb = 1
next = cluster
while True:
next = self.fat[next/1000][next%1000].value
if not 1 < next < max_entry:
break
if cluster + clus_nb == next:
clus_nb += 1
else:
yield self.data_start + cluster * self.cluster_size, clus_nb * self.cluster_size, False
cluster = next
clus_nb = 1
yield self.data_start + cluster * self.cluster_size, clus_nb * self.cluster_size, True
def createFields(self):
# Read boot seector
boot = Boot(self, "boot", "Boot sector")
yield boot
self.sector_size = boot["sector_size"].value
if self.version == 32:
for field in sorted((
(boot["inf_sector"].value, lambda: FSInfo(self, "fsinfo")),
(boot["boot_copy"].value, lambda: Boot(self, "bkboot", "Copy of the boot sector")),
)):
if field[0]:
padding = self.seekByte(field[0] * self.sector_size)
if padding:
yield padding
yield field[1]()
padding = self.seekByte(boot["reserved_sectors"].value * self.sector_size)
if padding:
yield padding
# Read the two FAT
fat_size = boot["fat_size"].value
if fat_size == 0:
fat_size = boot["fat32_size"].value
fat_size *= self.sector_size * 8
for i in xrange(boot["fat_nb"].value):
yield FAT(self, "fat[]", "File Allocation Table", size=fat_size)
# Read inode table (Directory)
self.cluster_size = boot["cluster_size"].value * self.sector_size * 8
self.fat = self["fat[0]"]
if "root_start" in boot:
self.target_size = 0
self.getCluster = lambda: boot["root_start"].value
yield InodeLink(self, "root", "root")
else:
yield Directory(self, "root[]", size=boot["max_root"].value * 32 * 8)
self.data_start = self.current_size - 2 * self.cluster_size
sectors = boot["sectors1"].value
if not sectors:
sectors = boot["sectors2"].value
# Create one big padding field for the end
size = sectors * self.sector_size
if self._size:
size = min(size, self.size//8)
padding = self.seekByte(size)
if padding:
yield padding
class FAT12(FAT_FS):
PARSER_TAGS = {
"id": "fat12",
"description": "FAT12 filesystem",
"magic": (("FAT12 ", 54*8),),
}
version = 12
def validate(self):
return FAT_FS._validate(self, 54)
class FAT16(FAT_FS):
PARSER_TAGS = {
"id": "fat16",
"description": "FAT16 filesystem",
"magic": (("FAT16 ", 54*8),),
}
version = 16
def validate(self):
return FAT_FS._validate(self, 54)
class FAT32(FAT_FS):
PARSER_TAGS = {
"id": "fat32",
"description": "FAT32 filesystem",
"magic": (("FAT32 ", 82*8),),
}
version = 32
def validate(self):
return FAT_FS._validate(self, 82)
| gpl-3.0 |
valuehack/scholarium.at | Bibliothek/views.py | 1 | 1672 | from django.http import HttpResponseRedirect
import re
import os
from . import models
from django.db import transaction
from django.conf import settings
from Grundgeruest.views import ListeMitMenue
def liste_buecher(request):
return ListeMitMenue.as_view(
template_name='Bibliothek/buecher_alt.html',
model=models.Buch,
context_object_name='buecher',
paginate_by=80)(request, page=request.GET.get('seite'))
attributnamen = {
'author': 'autor',
'isbn': 'isbn',
'title': 'titel',
'address': 'adresse',
'edition': 'ausgabe',
'publisher': 'herausgeber',
'keywords': 'stichworte',
'language': 'sprache',
'note': 'notiz',
'abstract': 'zusammenfassung',
'series': 'serie',
'year': 'jahr'}
@transaction.atomic
def aus_datei_einlesen(request, exlibris=''):
f = open(os.path.join(settings.MEDIA_ROOT, 'buchliste'), 'r')
text = f.read()[7:-2] # an die bibtex-Ausgabe von zotero angepasst
f.close()
trennung = re.compile('\}\n\n(?P<name>[@, \w]*)\{')
liste = trennung.sub('XXX', text).split('XXX')
for buch in liste:
zeilen = buch.split(',\n\t')
teilsplit = re.compile(r'(\w+) = \{(.*)\}')
bezeichnung = zeilen[0]
matches = [teilsplit.match(zeile) for zeile in zeilen[1:]]
daten = dict([match.groups() for match in matches if match])
buch = models.Buch.objects.create(bezeichnung=bezeichnung)
buch.exlibris = exlibris
for key in daten:
if key in attributnamen:
setattr(buch, attributnamen[key], daten[key])
buch.save()
return HttpResponseRedirect('/warenkorb/')
| mit |
takluyver/pyzmq | examples/mongodb/client.py | 1 | 1831 | #!/usr/bin/env python
#
# Copyright (c) 2010 Justin Riley
#
# This file is part of pyzmq.
#
# pyzmq is free software; you can redistribute it and/or modify it under
# the terms of the Lesser GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# pyzmq is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# Lesser GNU General Public License for more details.
#
# You should have received a copy of the Lesser GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import json
import zmq
class MongoZMQClient(object):
"""
Client that connects with MongoZMQ server to add/fetch docs
"""
def __init__(self, connect_addr='tcp://127.0.0.1:5000'):
self._context = zmq.Context()
self._socket = self._context.socket(zmq.XREQ)
self._socket.connect(connect_addr)
def _send_recv_msg(self, msg):
self._socket.send_multipart(msg)
return self._socket.recv_multipart()[0]
def get_doc(self, keys):
msg = ['get', json.dumps(keys)]
json_str = self._send_recv_msg(msg)
return json.loads(json_str)
def add_doc(self, doc):
msg = ['add', json.dumps(doc)]
return self._send_recv_msg(msg)
def main():
client = MongoZMQClient()
for i in range(10):
doc = {'job': str(i)}
print "Adding doc", doc
print client.add_doc(doc)
for i in range(10):
query = {'job': str(i)}
print "Getting doc matching query:", query
print client.get_doc(query)
if __name__ == "__main__":
main()
| lgpl-3.0 |
kprestel/PyInvestment | pytech/data/reader.py | 2 | 10810 | """
Act as a wrapper around pandas_datareader and write the responses to the
database to be accessed later.
"""
import datetime as dt
import logging
from typing import Dict, Iterable, Union, Tuple
import numpy as np
import pandas as pd
import pandas_datareader as pdr
from arctic.date import DateRange
from arctic.exceptions import NoDataFoundException
from pandas.tseries.offsets import BDay
from pandas_datareader._utils import RemoteDataError
import pytech.utils.dt_utils as dt_utils
import pytech.utils.pandas_utils as pd_utils
from pytech.decorators.decorators import write_chunks
from pytech.mongo import ARCTIC_STORE
from pytech.mongo.barstore import BarStore
from pytech.utils.exceptions import DataAccessError
from pytech.data._holders import DfLibName
logger = logging.getLogger(__name__)
ticker_input = Union[Iterable, str, pd.DataFrame]
range_type = Union[pd.DatetimeIndex, DateRange]
YAHOO = 'yahoo'
GOOGLE = 'google'
FRED = 'fred'
FAMA_FRENCH = 'famafrench'
class BarReader(object):
"""Read and write data from the DB and the web."""
def __init__(self, lib_name: str):
self.lib_name = lib_name
if lib_name not in ARCTIC_STORE.list_libraries():
# create the lib if it does not already exist
ARCTIC_STORE.initialize_library(lib_name,
BarStore.LIBRARY_TYPE)
self.lib = ARCTIC_STORE[self.lib_name]
def get_data(self,
tickers: ticker_input,
source: str = GOOGLE,
start: dt.datetime = None,
end: dt.datetime = None,
check_db: bool = True,
filter_data: bool = True,
**kwargs) -> Union[pd.DataFrame, Dict[str, pd.DataFrame]]:
"""
Get data and create a :class:`pd.DataFrame` from it.
:param tickers: The ticker(s) that data will be retrieved for.
:param source: The data source. Options:
* yahoo
* google
* fred
* famafrench
* db
* anything else pandas_datareader supports
:param start: Left boundary for range.
defaults to 1/1/2010.
:param end: Right boundary for range.
defaults to today.
:param check_db: Check the database first before making network call.
:param filter_data: Filter data from the DB. Only used if `check_db` is
`True`.
:param kwargs: kwargs are passed blindly to `pandas_datareader`
:return: A `dict[ticker, DataFrame]`.
"""
start, end = dt_utils.sanitize_dates(start, end)
if isinstance(tickers, str):
try:
df_lib_name = self._single_get_data(tickers, source, start,
end, check_db, filter_data,
**kwargs)
return df_lib_name.df
except DataAccessError as e:
raise DataAccessError(
f'Could not get data for ticker: {tickers}') from e
else:
if isinstance(tickers, pd.DataFrame):
tickers = tickers.index
try:
return self._mult_tickers_get_data(tickers, source, start, end,
check_db, filter_data,
**kwargs)
except DataAccessError as e:
raise e
def _mult_tickers_get_data(self,
tickers: Iterable,
source: str,
start: dt.datetime,
end: dt.datetime,
check_db: bool,
filter_data: bool,
**kwargs) -> Dict[str, pd.DataFrame]:
"""Download data for multiple tickers."""
stocks = {}
failed = []
passed = []
for t in tickers:
try:
df_lib_name = self._single_get_data(t, source, start, end,
check_db, filter_data,
**kwargs)
stocks[t] = df_lib_name.df
passed.append(t)
except DataAccessError:
failed.append(t)
if len(passed) == 0:
raise DataAccessError('No data could be retrieved.')
if len(stocks) > 0 and len(failed) > 0 and len(passed) > 0:
df_na = stocks[passed[0]].copy()
df_na[:] = np.nan
for t in failed:
logger.warning(f'No data could be retrieved for ticker: {t}, '
f'replacing with NaN.')
stocks[t] = df_na
return stocks
def _single_get_data(self,
ticker: str,
source: str,
start: dt.datetime,
end: dt.datetime,
check_db: bool,
filter_data: bool,
**kwargs):
"""Do the get data method for a single ticker."""
if check_db:
try:
return self._from_db(ticker, source, start, end,
filter_data, **kwargs)
except DataAccessError:
# don't raise, try to make the network call
logger.info(f'Ticker: {ticker} not found in DB.')
try:
return self._from_web(ticker, source, start, end, **kwargs)
except DataAccessError:
logger.warning(f'Error getting data from {source} '
f'for ticker: {ticker}')
raise
@write_chunks()
def _from_web(self,
ticker: str,
source: str,
start: dt.datetime,
end: dt.datetime,
**kwargs) -> DfLibName:
"""Retrieve data from a web source"""
_ = kwargs.pop('columns', None)
try:
logger.info(f'Making call to {source}. Start date: {start},'
f'End date: {end}')
df = pdr.DataReader(ticker, data_source=source, start=start,
end=end, **kwargs)
if df.empty:
logger.warning('df retrieved was empty.')
# the string should be ignored anyway
return DfLibName(df, lib_name=self.lib_name)
except RemoteDataError as e:
logger.warning(f'Error occurred getting data from {source}')
raise DataAccessError from e
else:
df = pd_utils.rename_bar_cols(df)
df[pd_utils.TICKER_COL] = ticker
if source == YAHOO:
# yahoo doesn't set the index :(
df = df.set_index([pd_utils.DATE_COL])
else:
df.index.name = pd_utils.DATE_COL
return DfLibName(df, lib_name=self.lib_name)
def _from_db(self,
ticker: str,
source: str,
start: dt.datetime,
end: dt.datetime,
filter_data: bool = True,
**kwargs) -> DfLibName:
"""
Try to read data from the DB.
:param ticker: The ticker to retrieve from the DB.
:param source: Only used if there there is not enough data in the DB.
:param start: The start of the range.
:param end: The end of the range.
:param filter_data: Passed to the read method.
:param kwargs: Passed to the read method.
:return: The data frame.
:raises: NoDataFoundException if no data is found for the given ticker.
"""
chunk_range = DateRange(start=start, end=end)
try:
logger.info(f'Checking DB for ticker: {ticker}')
df = self.lib.read(ticker, chunk_range=chunk_range,
filter_data=filter_data, **kwargs)
except NoDataFoundException as e:
raise DataAccessError(f'No data in DB for ticker: {ticker}') from e
except KeyError as e:
# TODO: open a bug report against arctic...
logger.warning('KeyError thrown by Arctic...', e)
raise DataAccessError(
f'Error reading DB for ticker: {ticker}') from e
logger.debug(f'Found ticker: {ticker} in DB.')
db_start = dt_utils.parse_date(df.index.min(axis=1))
db_end = dt_utils.parse_date(df.index.max(axis=1))
# check that all the requested data is present
# TODO: deal with days that it is expected that data shouldn't exist.
if db_start > start and dt_utils.is_trade_day(start):
# db has less data than requested
lower_df_lib_name = self._from_web(ticker, source, start,
db_start - BDay())
lower_df = lower_df_lib_name.df
else:
lower_df = None
if db_end.date() < end.date() and dt_utils.is_trade_day(end):
# db doesn't have as much data than requested
upper_df_lib_name = self._from_web(ticker, source, db_end, end)
upper_df = upper_df_lib_name.df
else:
upper_df = None
new_df = _concat_dfs(lower_df, upper_df, df)
return DfLibName(new_df, self.lib_name)
def get_symbols(self):
for s in self.lib.list_symbols():
yield s
def _concat_dfs(lower_df: pd.DataFrame,
upper_df: pd.DataFrame,
df: pd.DataFrame) -> pd.DataFrame:
"""
Helper method to concat the missing data frames, where `df` is the original
df.
"""
if lower_df is None and upper_df is None:
# everything is already in the df
return df
elif lower_df is not None and upper_df is None:
# missing only lower data
return pd.DataFrame(pd.concat([df, lower_df]))
elif lower_df is None and upper_df is not None:
# missing only upper data
return pd.DataFrame(pd.concat([df, upper_df]))
elif lower_df is not None and upper_df is not None:
# both missing
return pd.DataFrame(pd.concat([df, upper_df, lower_df]))
else:
return df
def load_from_csv(path: str,
start: dt.datetime = None,
end: dt.datetime = None) -> None:
"""
Load a list of tickers from a CSV, and download the data for the
requested period.
:param path: The path to the CSV file.
:param start: The start date to use for the data download.
:param end: The end date to use for the data download.
"""
| mit |
mmnelemane/neutron | neutron/db/migration/alembic_migrations/cisco_init_ops.py | 13 | 11857 | # Copyright 2014 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# Initial schema operations for cisco plugin
from alembic import op
import sqlalchemy as sa
from neutron.plugins.cisco.common import cisco_constants
segment_type = sa.Enum('vlan', 'overlay', 'trunk', 'multi-segment',
name='segment_type')
profile_type = sa.Enum('network', 'policy', name='profile_type')
def upgrade():
op.create_table(
'cisco_policy_profiles',
sa.Column('id', sa.String(length=36), nullable=False),
sa.Column('name', sa.String(length=255), nullable=True),
sa.PrimaryKeyConstraint('id'))
op.create_table(
'cisco_network_profiles',
sa.Column('id', sa.String(length=36), nullable=False),
sa.Column('name', sa.String(length=255), nullable=True),
sa.Column('segment_type', segment_type, nullable=False),
sa.Column('sub_type', sa.String(length=255), nullable=True),
sa.Column('segment_range', sa.String(length=255), nullable=True),
sa.Column('multicast_ip_index', sa.Integer(), nullable=True,
server_default='0'),
sa.Column('multicast_ip_range', sa.String(length=255), nullable=True),
sa.Column('physical_network', sa.String(length=255), nullable=True),
sa.PrimaryKeyConstraint('id'))
op.create_table(
'cisco_n1kv_vxlan_allocations',
sa.Column('vxlan_id', sa.Integer(), autoincrement=False,
nullable=False),
sa.Column('allocated', sa.Boolean(), nullable=False,
server_default=sa.sql.false()),
sa.Column('network_profile_id', sa.String(length=36), nullable=False),
sa.ForeignKeyConstraint(['network_profile_id'],
['cisco_network_profiles.id'],
ondelete='CASCADE',
name='cisco_n1kv_vxlan_allocations_ibfk_1'),
sa.PrimaryKeyConstraint('vxlan_id'))
op.create_table(
'cisco_n1kv_vlan_allocations',
sa.Column('physical_network', sa.String(length=64), nullable=False),
sa.Column('vlan_id', sa.Integer(), autoincrement=False,
nullable=False),
sa.Column('allocated', sa.Boolean(), autoincrement=False,
nullable=False, server_default=sa.sql.false()),
sa.Column('network_profile_id', sa.String(length=36), nullable=False),
sa.PrimaryKeyConstraint('physical_network', 'vlan_id'),
sa.ForeignKeyConstraint(['network_profile_id'],
['cisco_network_profiles.id'],
ondelete='CASCADE',
name='cisco_n1kv_vlan_allocations_ibfk_1'))
op.create_table(
'cisco_credentials',
sa.Column('credential_id', sa.String(length=255), nullable=True),
sa.Column('credential_name', sa.String(length=255), nullable=False),
sa.Column('user_name', sa.String(length=255), nullable=True),
sa.Column('password', sa.String(length=255), nullable=True),
sa.Column('type', sa.String(length=255), nullable=True),
sa.PrimaryKeyConstraint('credential_name'))
op.create_table(
'cisco_qos_policies',
sa.Column('qos_id', sa.String(length=255), nullable=True),
sa.Column('tenant_id', sa.String(length=255), nullable=False),
sa.Column('qos_name', sa.String(length=255), nullable=False),
sa.Column('qos_desc', sa.String(length=255), nullable=True),
sa.PrimaryKeyConstraint('tenant_id', 'qos_name'))
op.create_table(
'cisco_n1kv_profile_bindings',
sa.Column('profile_type', profile_type, nullable=True),
sa.Column('tenant_id', sa.String(length=36), nullable=False,
server_default=cisco_constants.TENANT_ID_NOT_SET),
sa.Column('profile_id', sa.String(length=36), nullable=False),
sa.PrimaryKeyConstraint('tenant_id', 'profile_id'))
op.create_table(
'cisco_n1kv_vmnetworks',
sa.Column('name', sa.String(length=80), nullable=False),
sa.Column('profile_id', sa.String(length=36), nullable=True),
sa.Column('network_id', sa.String(length=36), nullable=True),
sa.Column('port_count', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['profile_id'],
['cisco_policy_profiles.id'], ),
sa.PrimaryKeyConstraint('name'))
op.create_table(
'cisco_n1kv_trunk_segments',
sa.Column('trunk_segment_id', sa.String(length=36), nullable=False),
sa.Column('segment_id', sa.String(length=36), nullable=False),
sa.Column('dot1qtag', sa.String(length=36), nullable=False),
sa.ForeignKeyConstraint(['trunk_segment_id'], ['networks.id'],
ondelete='CASCADE'),
sa.PrimaryKeyConstraint('trunk_segment_id', 'segment_id', 'dot1qtag'))
op.create_table(
'cisco_provider_networks',
sa.Column('network_id', sa.String(length=36), nullable=False),
sa.Column('network_type', sa.String(length=255), nullable=False),
sa.Column('segmentation_id', sa.Integer(), nullable=False),
sa.ForeignKeyConstraint(['network_id'], ['networks.id'],
ondelete='CASCADE'),
sa.PrimaryKeyConstraint('network_id'))
op.create_table(
'cisco_n1kv_multi_segments',
sa.Column('multi_segment_id', sa.String(length=36), nullable=False),
sa.Column('segment1_id', sa.String(length=36), nullable=False),
sa.Column('segment2_id', sa.String(length=36), nullable=False),
sa.Column('encap_profile_name', sa.String(length=36), nullable=True),
sa.ForeignKeyConstraint(['multi_segment_id'], ['networks.id'],
ondelete='CASCADE'),
sa.PrimaryKeyConstraint('multi_segment_id', 'segment1_id',
'segment2_id'))
op.create_table(
'cisco_n1kv_network_bindings',
sa.Column('network_id', sa.String(length=36), nullable=False),
sa.Column('network_type', sa.String(length=32), nullable=False),
sa.Column('physical_network', sa.String(length=64), nullable=True),
sa.Column('segmentation_id', sa.Integer(), nullable=True),
sa.Column('multicast_ip', sa.String(length=32), nullable=True),
sa.Column('profile_id', sa.String(length=36), nullable=True),
sa.ForeignKeyConstraint(['network_id'], ['networks.id'],
ondelete='CASCADE'),
sa.ForeignKeyConstraint(['profile_id'],
['cisco_network_profiles.id']),
sa.PrimaryKeyConstraint('network_id'))
op.create_table(
'cisco_n1kv_port_bindings',
sa.Column('port_id', sa.String(length=36), nullable=False),
sa.Column('profile_id', sa.String(length=36), nullable=True),
sa.ForeignKeyConstraint(['port_id'], ['ports.id'],
ondelete='CASCADE'),
sa.ForeignKeyConstraint(['profile_id'], ['cisco_policy_profiles.id']),
sa.PrimaryKeyConstraint('port_id'))
op.create_table(
'cisco_csr_identifier_map',
sa.Column('tenant_id', sa.String(length=255), nullable=True),
sa.Column('ipsec_site_conn_id', sa.String(length=36),
primary_key=True),
sa.Column('csr_tunnel_id', sa.Integer(), nullable=False),
sa.Column('csr_ike_policy_id', sa.Integer(), nullable=False),
sa.Column('csr_ipsec_policy_id', sa.Integer(), nullable=False),
sa.ForeignKeyConstraint(['ipsec_site_conn_id'],
['ipsec_site_connections.id'],
ondelete='CASCADE')
)
op.create_table(
'cisco_ml2_apic_host_links',
sa.Column('host', sa.String(length=255), nullable=False),
sa.Column('ifname', sa.String(length=64), nullable=False),
sa.Column('ifmac', sa.String(length=32), nullable=True),
sa.Column('swid', sa.String(length=32), nullable=False),
sa.Column('module', sa.String(length=32), nullable=False),
sa.Column('port', sa.String(length=32), nullable=False),
sa.PrimaryKeyConstraint('host', 'ifname'))
op.create_table(
'cisco_ml2_apic_names',
sa.Column('neutron_id', sa.String(length=36), nullable=False),
sa.Column('neutron_type', sa.String(length=32), nullable=False),
sa.Column('apic_name', sa.String(length=255), nullable=False),
sa.PrimaryKeyConstraint('neutron_id', 'neutron_type'))
op.create_table(
'cisco_ml2_apic_contracts',
sa.Column('tenant_id', sa.String(length=255)),
sa.Column('router_id', sa.String(length=64), nullable=False),
sa.ForeignKeyConstraint(['router_id'], ['routers.id']),
sa.PrimaryKeyConstraint('router_id'))
op.create_table('cisco_hosting_devices',
sa.Column('tenant_id', sa.String(length=255), nullable=True),
sa.Column('id', sa.String(length=36), nullable=False),
sa.Column('complementary_id', sa.String(length=36), nullable=True),
sa.Column('device_id', sa.String(length=255), nullable=True),
sa.Column('admin_state_up', sa.Boolean(), nullable=False),
sa.Column('management_port_id', sa.String(length=36), nullable=True),
sa.Column('protocol_port', sa.Integer(), nullable=True),
sa.Column('cfg_agent_id', sa.String(length=36), nullable=True),
sa.Column('created_at', sa.DateTime(), nullable=False),
sa.Column('status', sa.String(length=16), nullable=True),
sa.ForeignKeyConstraint(['cfg_agent_id'], ['agents.id'], ),
sa.ForeignKeyConstraint(['management_port_id'], ['ports.id'],
ondelete='SET NULL'),
sa.PrimaryKeyConstraint('id')
)
op.create_table('cisco_port_mappings',
sa.Column('logical_resource_id', sa.String(length=36), nullable=False),
sa.Column('logical_port_id', sa.String(length=36), nullable=False),
sa.Column('port_type', sa.String(length=32), nullable=True),
sa.Column('network_type', sa.String(length=32), nullable=True),
sa.Column('hosting_port_id', sa.String(length=36), nullable=True),
sa.Column('segmentation_id', sa.Integer(), autoincrement=False,
nullable=True),
sa.ForeignKeyConstraint(['hosting_port_id'], ['ports.id'],
ondelete='CASCADE'),
sa.ForeignKeyConstraint(['logical_port_id'], ['ports.id'],
ondelete='CASCADE'),
sa.PrimaryKeyConstraint('logical_resource_id', 'logical_port_id')
)
op.create_table('cisco_router_mappings',
sa.Column('router_id', sa.String(length=36), nullable=False),
sa.Column('auto_schedule', sa.Boolean(), nullable=False),
sa.Column('hosting_device_id', sa.String(length=36), nullable=True),
sa.ForeignKeyConstraint(['hosting_device_id'],
['cisco_hosting_devices.id'],
ondelete='SET NULL'),
sa.ForeignKeyConstraint(['router_id'], ['routers.id'],
ondelete='CASCADE'),
sa.PrimaryKeyConstraint('router_id')
)
| apache-2.0 |
Lekensteyn/buildbot | master/buildbot/scripts/restart.py | 11 | 1223 | # This file is part of Buildbot. Buildbot is free software: you can
# redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, version 2.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Copyright Buildbot Team Members
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from buildbot.scripts import base
from buildbot.scripts import start
from buildbot.scripts import stop
def restart(config):
basedir = config['basedir']
quiet = config['quiet']
if not base.isBuildmasterDir(basedir):
return 1
if stop.stop(config, wait=True) != 0:
return 1
if not quiet:
print("now restarting buildbot process..")
return start.start(config)
| gpl-2.0 |
kantai/passe-framework-prototype | django/views/generic/detail.py | 154 | 5498 | import re
from django.core.exceptions import ImproperlyConfigured, ObjectDoesNotExist
from django.http import Http404
from django.utils.encoding import smart_str
from django.utils.translation import ugettext as _
from django.views.generic.base import TemplateResponseMixin, View
class SingleObjectMixin(object):
"""
Provides the ability to retrieve a single object for further manipulation.
"""
model = None
queryset = None
slug_field = 'slug'
context_object_name = None
def get_object(self, queryset=None):
"""
Returns the object the view is displaying.
By default this requires `self.queryset` and a `pk` or `slug` argument
in the URLconf, but subclasses can override this to return any object.
"""
# Use a custom queryset if provided; this is required for subclasses
# like DateDetailView
if queryset is None:
queryset = self.get_queryset()
# Next, try looking up by primary key.
pk = self.kwargs.get('pk', None)
slug = self.kwargs.get('slug', None)
if pk is not None:
queryset = queryset.filter(pk=pk)
# Next, try looking up by slug.
elif slug is not None:
slug_field = self.get_slug_field()
queryset = queryset.filter(**{slug_field: slug})
# If none of those are defined, it's an error.
else:
raise AttributeError(u"Generic detail view %s must be called with "
u"either an object pk or a slug."
% self.__class__.__name__)
try:
obj = queryset.get()
except ObjectDoesNotExist:
raise Http404(_(u"No %(verbose_name)s found matching the query") %
{'verbose_name': queryset.model._meta.verbose_name})
return obj
def get_queryset(self):
"""
Get the queryset to look an object up against. May not be called if
`get_object` is overridden.
"""
if self.queryset is None:
if self.model:
return self.model._default_manager.all()
else:
raise ImproperlyConfigured(u"%(cls)s is missing a queryset. Define "
u"%(cls)s.model, %(cls)s.queryset, or override "
u"%(cls)s.get_object()." % {
'cls': self.__class__.__name__
})
return self.queryset._clone()
def get_slug_field(self):
"""
Get the name of a slug field to be used to look up by slug.
"""
return self.slug_field
def get_context_object_name(self, obj):
"""
Get the name to use for the object.
"""
if self.context_object_name:
return self.context_object_name
elif hasattr(obj, '_meta'):
return smart_str(obj._meta.object_name.lower())
else:
return None
def get_context_data(self, **kwargs):
context = kwargs
context_object_name = self.get_context_object_name(self.object)
if context_object_name:
context[context_object_name] = self.object
return context
class BaseDetailView(SingleObjectMixin, View):
def get(self, request, **kwargs):
self.object = self.get_object()
context = self.get_context_data(object=self.object)
return self.render_to_response(context)
class SingleObjectTemplateResponseMixin(TemplateResponseMixin):
template_name_field = None
template_name_suffix = '_detail'
def get_template_names(self):
"""
Return a list of template names to be used for the request. Must return
a list. May not be called if get_template is overridden.
"""
try:
names = super(SingleObjectTemplateResponseMixin, self).get_template_names()
except ImproperlyConfigured:
# If template_name isn't specified, it's not a problem --
# we just start with an empty list.
names = []
# If self.template_name_field is set, grab the value of the field
# of that name from the object; this is the most specific template
# name, if given.
if self.object and self.template_name_field:
name = getattr(self.object, self.template_name_field, None)
if name:
names.insert(0, name)
# The least-specific option is the default <app>/<model>_detail.html;
# only use this if the object in question is a model.
if hasattr(self.object, '_meta'):
names.append("%s/%s%s.html" % (
self.object._meta.app_label,
self.object._meta.object_name.lower(),
self.template_name_suffix
))
elif hasattr(self, 'model') and hasattr(self.model, '_meta'):
names.append("%s/%s%s.html" % (
self.model._meta.app_label,
self.model._meta.object_name.lower(),
self.template_name_suffix
))
return names
class DetailView(SingleObjectTemplateResponseMixin, BaseDetailView):
"""
Render a "detail" view of an object.
By default this is a model instance looked up from `self.queryset`, but the
view will support display of *any* object by overriding `self.get_object()`.
"""
| bsd-3-clause |
IronLanguages/ironpython3 | Tests/test_compiler.py | 1 | 9033 | # Licensed to the .NET Foundation under one or more agreements.
# The .NET Foundation licenses this file to you under the Apache 2.0 License.
# See the LICENSE file in the project root for more information.
import os
import unittest
from iptest import IronPythonTestCase, is_cli, is_netcoreapp, run_test, skipUnlessIronPython
@unittest.skipIf(is_netcoreapp, 'no clr.CompileModules')
@skipUnlessIronPython()
class CompilerTest(IronPythonTestCase):
def compileCode(self, name, *codeArr):
import clr
inputFiles = []
counter = 0
for code in codeArr:
inputFile = os.path.join(self.temporary_dir, name + ("" if counter == 0 else str(counter)) + ".py")
self.write_to_file(inputFile, code)
inputFiles.append(inputFile)
counter+=1
dllFile = os.path.join(self.temporary_dir, name + ".dll")
clr.CompileModules(dllFile, mainModule=inputFiles[0], *inputFiles)
self.delete_files(*inputFiles)
clr.AddReferenceToFileAndPath(dllFile)
def compilePackage(self, packageName, codeDict):
import clr
packagePath = os.path.join(self.temporary_dir, packageName)
self.ensure_directory_present(packagePath)
fileList = []
for fileName, code in codeDict.items():
filePath = os.path.join(packagePath, fileName)
self.ensure_directory_present(os.path.dirname(filePath))
self.write_to_file(filePath, code)
fileList.append(filePath)
dllFile = os.path.join(self.temporary_dir, packageName + ".dll")
clr.CompileModules(dllFile, mainModule=fileList[0], *fileList)
self.delete_files(*fileList)
clr.AddReferenceToFileAndPath(dllFile)
############################ Tests ###################################################
def test_simple(self):
self.compileCode("simpleTest", "def f(): return 42")
import simpleTest
self.assertEqual(simpleTest.f(), 42)
def test_simple_dynsite(self):
#containing a dynamic site.
self.compileCode("simpleDynSiteTest", "def f(a , b): return a + b")
import simpleDynSiteTest
self.assertEqual(simpleDynSiteTest.f(2,3), 5)
def test_syntax_error(self):
self.assertRaises(SyntaxError, self.compileCode, "syntaxerrTest", "def f() pass")
def test_runtime_error(self):
self.compileCode("runtimeError", "def f(): print(a)")
from runtimeError import f
self.assertRaises(NameError, f)
def test_multiple_files(self):
self.compileCode("multiFiles", "def f(): return 42", "def g(): return 33")
import multiFiles, multiFiles1
self.assertEqual(multiFiles.f(), 42)
self.assertEqual(multiFiles1.g(), 33)
def test_multifile_import(self):
self.compileCode("multiFileImport", "import multiFileImport1\ndef f(): return multiFileImport1.f()", "def f(): return 42")
import multiFileImport
self.assertEqual(multiFileImport.f(), 42)
def test_multifile_import_external(self):
self.compileCode("multiFileImportExternal", "import external\ndef f(): return external.f()")
self.write_to_file(os.path.join(self.temporary_dir, "external.py"), "def f(): return 'hello'")
import multiFileImportExternal
self.assertEqual(multiFileImportExternal.f(), 'hello')
def test_load_order_builtins(self):
self.compileCode("sys", "def f(): return 'hello'")
import sys
self.assertRaises(AttributeError, lambda: sys.f)
def test_load_order_modfile(self):
import clr
fileName = os.path.join(self.temporary_dir,"loadOrderMod.py")
dllName = os.path.join(self.temporary_dir,"loadOrderMod.dll")
self.write_to_file(fileName, "def f(): return 'hello'")
clr.CompileModules(dllName, fileName)
self.write_to_file(fileName, "def f(): return 'bonjour'")
clr.AddReferenceToFileAndPath(dllName)
import loadOrderMod
self.assertEqual(loadOrderMod.f(), 'hello')
def test_exceptions(self):
self.compileCode("exceptionsTest", "def f(): raise SystemError")
import exceptionsTest
self.assertRaises(SystemError, exceptionsTest.f)
def test_package_init(self):
self.compilePackage("initPackage", { "__init__.py" : "def f(): return 42" });
import initPackage
self.assertEqual(initPackage.f(), 42)
def test_package_simple(self):
self.compilePackage("simplePackage", { "__init__.py" : "from . import a\nfrom . import b\ndef f(): return a.f() + b.f()",
"a.py" : "def f() : return 10",
"b.py" : "def f() : return 20"})
import simplePackage
self.assertEqual(simplePackage.f(), 30)
self.assertEqual(simplePackage.a.f(), 10)
self.assertEqual(simplePackage.b.f(), 20)
def test_package_subpackage(self):
self.compilePackage("subPackage", { "__init__.py" : "from . import a\nfrom .b import c\ndef f(): return a.f() + c.f()",
"a.py" : "def f(): return 10",
"b/__init__.py" : "def f(): return 'kthxbye'",
"b/c.py" : "def f(): return 20"})
import subPackage
self.assertEqual(subPackage.f(), 30)
self.assertEqual(subPackage.b.f(), 'kthxbye')
self.assertEqual(subPackage.b.c.f(), 20)
def test_package_subpackage_relative_imports(self):
self.compilePackage("subPackage_relative", { "__init__.py" : "from .foo import bar",
"foo/__init__.py" : "from .foo import bar",
"foo/foo.py" : "bar = 'BAR'"})
import subPackage_relative
self.assertEqual(subPackage_relative.bar, 'BAR')
#TODO add some more tests for main after this bug is fixed.
def test_main(self):
self.compileCode("mainTest", "def f(): return __name__")
#this probably won't work. Need to verify once bug is fixed.
import mainTest
self.assertEqual(mainTest.f(), "mainTest")
def test_empty_file(self):
self.compileCode("emptyFile", "")
import emptyFile
def test_negative(self):
import clr
self.assertRaises(TypeError, clr.CompileModules, None, None)
self.assertRaises(IOError, clr.CompileModules, "foo.dll", "ffoo.py")
def test_overwrite(self):
import clr
self.write_to_file(os.path.join(self.temporary_dir, "overwrite.py"), "def foo(): return 'bar'")
dllFile = os.path.join(self.temporary_dir, "overwrite.dll")
clr.CompileModules(dllFile, os.path.join(self.temporary_dir, "overwrite.py"))
self.write_to_file(os.path.join(self.temporary_dir, "overwrite1.py"), "def foo(): return 'boo'")
clr.CompileModules(dllFile, os.path.join(self.temporary_dir, "overwrite1.py"))
clr.AddReferenceToFileAndPath(dllFile)
import overwrite1
self.assertEqual(overwrite1.foo(), 'boo')
def test_cyclic_modules(self):
self.compileCode("cyclic_modules", "import cyclic_modules1\nA = 0", "import cyclic_modules\nA=1")
import cyclic_modules
self.assertEqual(cyclic_modules.A, 0)
self.assertEqual(cyclic_modules.cyclic_modules1.A, 1)
import cyclic_modules1
self.assertEqual(cyclic_modules1.A, 1)
self.assertEqual(cyclic_modules1.cyclic_modules.A, 0)
def test_cyclic_pkg(self):
self.compilePackage("cyclic_package", { "__init__.py" : "from . import cyclic_submodules0\nfrom . import cyclic_submodules1",
"cyclic_submodules0.py" : "import cyclic_package.cyclic_submodules1\nA = 2",
"cyclic_submodules1.py" : "import cyclic_package.cyclic_submodules0\nA = 3"})
import cyclic_package
self.assertEqual(cyclic_package.cyclic_submodules0.A, 2)
self.assertEqual(cyclic_package.cyclic_submodules0.cyclic_package.cyclic_submodules1.A, 3)
self.assertEqual(cyclic_package.cyclic_submodules1.A, 3)
self.assertEqual(cyclic_package.cyclic_submodules1.cyclic_package.cyclic_submodules0.A, 2)
def test_system_core_cp20623(self):
self.compileCode("cp20623", "import System\nA=System.DateTime(350000000).Second\nprint(A)")
import cp20623
self.assertEqual(cp20623.A, 35)
#TODO: need to also generate a standalone exe from cp20623 and try running it
def test_cp30178(self):
self.compileCode("cp30178", 'mydict = { "a": ("Fail", "tuple") }')
import cp30178
self.assertEqual(cp30178.mydict, {'a' : ('Fail', 'tuple')})
run_test(__name__)
| apache-2.0 |
Pymatteo/QtNMR | build/exe.win32-3.4/scipy/integrate/tests/test_integrate.py | 7 | 19403 | # Authors: Nils Wagner, Ed Schofield, Pauli Virtanen, John Travers
"""
Tests for numerical integration.
"""
from __future__ import division, print_function, absolute_import
import numpy as np
from numpy import (arange, zeros, array, dot, sqrt, cos, sin, eye, pi, exp,
allclose)
from scipy.lib.six import xrange
from numpy.testing import (
assert_, TestCase, run_module_suite, assert_array_almost_equal,
assert_raises, assert_allclose, assert_array_equal, assert_equal)
from scipy.integrate import odeint, ode, complex_ode
#------------------------------------------------------------------------------
# Test ODE integrators
#------------------------------------------------------------------------------
class TestOdeint(TestCase):
# Check integrate.odeint
def _do_problem(self, problem):
t = arange(0.0, problem.stop_t, 0.05)
z, infodict = odeint(problem.f, problem.z0, t, full_output=True)
assert_(problem.verify(z, t))
def test_odeint(self):
for problem_cls in PROBLEMS:
problem = problem_cls()
if problem.cmplx:
continue
self._do_problem(problem)
class TestODEClass(TestCase):
ode_class = None # Set in subclass.
def _do_problem(self, problem, integrator, method='adams'):
# ode has callback arguments in different order than odeint
f = lambda t, z: problem.f(z, t)
jac = None
if hasattr(problem, 'jac'):
jac = lambda t, z: problem.jac(z, t)
integrator_params = {}
if problem.lband is not None or problem.uband is not None:
integrator_params['uband'] = problem.uband
integrator_params['lband'] = problem.lband
ig = self.ode_class(f, jac)
ig.set_integrator(integrator,
atol=problem.atol/10,
rtol=problem.rtol/10,
method=method,
**integrator_params)
ig.set_initial_value(problem.z0, t=0.0)
z = ig.integrate(problem.stop_t)
assert_array_equal(z, ig.y)
assert_(ig.successful(), (problem, method))
assert_(problem.verify(array([z]), problem.stop_t), (problem, method))
class TestOde(TestODEClass):
ode_class = ode
def test_vode(self):
# Check the vode solver
for problem_cls in PROBLEMS:
problem = problem_cls()
if problem.cmplx:
continue
if not problem.stiff:
self._do_problem(problem, 'vode', 'adams')
self._do_problem(problem, 'vode', 'bdf')
def test_zvode(self):
# Check the zvode solver
for problem_cls in PROBLEMS:
problem = problem_cls()
if not problem.stiff:
self._do_problem(problem, 'zvode', 'adams')
self._do_problem(problem, 'zvode', 'bdf')
def test_lsoda(self):
# Check the lsoda solver
for problem_cls in PROBLEMS:
problem = problem_cls()
if problem.cmplx:
continue
self._do_problem(problem, 'lsoda')
def test_dopri5(self):
# Check the dopri5 solver
for problem_cls in PROBLEMS:
problem = problem_cls()
if problem.cmplx:
continue
if problem.stiff:
continue
if hasattr(problem, 'jac'):
continue
self._do_problem(problem, 'dopri5')
def test_dop853(self):
# Check the dop853 solver
for problem_cls in PROBLEMS:
problem = problem_cls()
if problem.cmplx:
continue
if problem.stiff:
continue
if hasattr(problem, 'jac'):
continue
self._do_problem(problem, 'dop853')
def test_concurrent_fail(self):
for sol in ('vode', 'zvode', 'lsoda'):
f = lambda t, y: 1.0
r = ode(f).set_integrator(sol)
r.set_initial_value(0, 0)
r2 = ode(f).set_integrator(sol)
r2.set_initial_value(0, 0)
r.integrate(r.t + 0.1)
r2.integrate(r2.t + 0.1)
assert_raises(RuntimeError, r.integrate, r.t + 0.1)
def test_concurrent_ok(self):
f = lambda t, y: 1.0
for k in xrange(3):
for sol in ('vode', 'zvode', 'lsoda', 'dopri5', 'dop853'):
r = ode(f).set_integrator(sol)
r.set_initial_value(0, 0)
r2 = ode(f).set_integrator(sol)
r2.set_initial_value(0, 0)
r.integrate(r.t + 0.1)
r2.integrate(r2.t + 0.1)
r2.integrate(r2.t + 0.1)
assert_allclose(r.y, 0.1)
assert_allclose(r2.y, 0.2)
for sol in ('dopri5', 'dop853'):
r = ode(f).set_integrator(sol)
r.set_initial_value(0, 0)
r2 = ode(f).set_integrator(sol)
r2.set_initial_value(0, 0)
r.integrate(r.t + 0.1)
r.integrate(r.t + 0.1)
r2.integrate(r2.t + 0.1)
r.integrate(r.t + 0.1)
r2.integrate(r2.t + 0.1)
assert_allclose(r.y, 0.3)
assert_allclose(r2.y, 0.2)
class TestComplexOde(TestODEClass):
ode_class = complex_ode
def test_vode(self):
# Check the vode solver
for problem_cls in PROBLEMS:
problem = problem_cls()
if not problem.stiff:
self._do_problem(problem, 'vode', 'adams')
else:
self._do_problem(problem, 'vode', 'bdf')
def test_lsoda(self):
# Check the lsoda solver
for problem_cls in PROBLEMS:
problem = problem_cls()
self._do_problem(problem, 'lsoda')
def test_dopri5(self):
# Check the dopri5 solver
for problem_cls in PROBLEMS:
problem = problem_cls()
if problem.stiff:
continue
if hasattr(problem, 'jac'):
continue
self._do_problem(problem, 'dopri5')
def test_dop853(self):
# Check the dop853 solver
for problem_cls in PROBLEMS:
problem = problem_cls()
if problem.stiff:
continue
if hasattr(problem, 'jac'):
continue
self._do_problem(problem, 'dop853')
class TestSolout(TestCase):
# Check integrate.ode correctly handles solout for dopri5 and dop853
def _run_solout_test(self, integrator):
# Check correct usage of solout
ts = []
ys = []
t0 = 0.0
tend = 10.0
y0 = [1.0, 2.0]
def solout(t, y):
ts.append(t)
ys.append(y.copy())
def rhs(t, y):
return [y[0] + y[1], -y[1]**2]
ig = ode(rhs).set_integrator(integrator)
ig.set_solout(solout)
ig.set_initial_value(y0, t0)
ret = ig.integrate(tend)
assert_array_equal(ys[0], y0)
assert_array_equal(ys[-1], ret)
assert_equal(ts[0], t0)
assert_equal(ts[-1], tend)
def test_solout(self):
for integrator in ('dopri5', 'dop853'):
self._run_solout_test(integrator)
def _run_solout_break_test(self, integrator):
# Check correct usage of stopping via solout
ts = []
ys = []
t0 = 0.0
tend = 10.0
y0 = [1.0, 2.0]
def solout(t, y):
ts.append(t)
ys.append(y.copy())
if t > tend/2.0:
return -1
def rhs(t, y):
return [y[0] + y[1], -y[1]**2]
ig = ode(rhs).set_integrator(integrator)
ig.set_solout(solout)
ig.set_initial_value(y0, t0)
ret = ig.integrate(tend)
assert_array_equal(ys[0], y0)
assert_array_equal(ys[-1], ret)
assert_equal(ts[0], t0)
assert_(ts[-1] > tend/2.0)
assert_(ts[-1] < tend)
def test_solout_break(self):
for integrator in ('dopri5', 'dop853'):
self._run_solout_break_test(integrator)
class TestComplexSolout(TestCase):
# Check integrate.ode correctly handles solout for dopri5 and dop853
def _run_solout_test(self, integrator):
# Check correct usage of solout
ts = []
ys = []
t0 = 0.0
tend = 20.0
y0 = [0.0]
def solout(t, y):
ts.append(t)
ys.append(y.copy())
def rhs(t, y):
return [1.0/(t - 10.0 - 1j)]
ig = complex_ode(rhs).set_integrator(integrator)
ig.set_solout(solout)
ig.set_initial_value(y0, t0)
ret = ig.integrate(tend)
assert_array_equal(ys[0], y0)
assert_array_equal(ys[-1], ret)
assert_equal(ts[0], t0)
assert_equal(ts[-1], tend)
def test_solout(self):
for integrator in ('dopri5', 'dop853'):
self._run_solout_test(integrator)
def _run_solout_break_test(self, integrator):
# Check correct usage of stopping via solout
ts = []
ys = []
t0 = 0.0
tend = 20.0
y0 = [0.0]
def solout(t, y):
ts.append(t)
ys.append(y.copy())
if t > tend/2.0:
return -1
def rhs(t, y):
return [1.0/(t - 10.0 - 1j)]
ig = complex_ode(rhs).set_integrator(integrator)
ig.set_solout(solout)
ig.set_initial_value(y0, t0)
ret = ig.integrate(tend)
assert_array_equal(ys[0], y0)
assert_array_equal(ys[-1], ret)
assert_equal(ts[0], t0)
assert_(ts[-1] > tend/2.0)
assert_(ts[-1] < tend)
def test_solout_break(self):
for integrator in ('dopri5', 'dop853'):
self._run_solout_break_test(integrator)
#------------------------------------------------------------------------------
# Test problems
#------------------------------------------------------------------------------
class ODE:
"""
ODE problem
"""
stiff = False
cmplx = False
stop_t = 1
z0 = []
lband = None
uband = None
atol = 1e-6
rtol = 1e-5
class SimpleOscillator(ODE):
r"""
Free vibration of a simple oscillator::
m \ddot{u} + k u = 0, u(0) = u_0 \dot{u}(0) \dot{u}_0
Solution::
u(t) = u_0*cos(sqrt(k/m)*t)+\dot{u}_0*sin(sqrt(k/m)*t)/sqrt(k/m)
"""
stop_t = 1 + 0.09
z0 = array([1.0, 0.1], float)
k = 4.0
m = 1.0
def f(self, z, t):
tmp = zeros((2, 2), float)
tmp[0, 1] = 1.0
tmp[1, 0] = -self.k / self.m
return dot(tmp, z)
def verify(self, zs, t):
omega = sqrt(self.k / self.m)
u = self.z0[0]*cos(omega*t) + self.z0[1]*sin(omega*t)/omega
return allclose(u, zs[:, 0], atol=self.atol, rtol=self.rtol)
class ComplexExp(ODE):
r"""The equation :lm:`\dot u = i u`"""
stop_t = 1.23*pi
z0 = exp([1j, 2j, 3j, 4j, 5j])
cmplx = True
def f(self, z, t):
return 1j*z
def jac(self, z, t):
return 1j*eye(5)
def verify(self, zs, t):
u = self.z0 * exp(1j*t)
return allclose(u, zs, atol=self.atol, rtol=self.rtol)
class Pi(ODE):
r"""Integrate 1/(t + 1j) from t=-10 to t=10"""
stop_t = 20
z0 = [0]
cmplx = True
def f(self, z, t):
return array([1./(t - 10 + 1j)])
def verify(self, zs, t):
u = -2j * np.arctan(10)
return allclose(u, zs[-1, :], atol=self.atol, rtol=self.rtol)
class CoupledDecay(ODE):
r"""
3 coupled decays suited for banded treatment
(banded mode makes it necessary when N>>3)
"""
stiff = True
stop_t = 0.5
z0 = [5.0, 7.0, 13.0]
lband = 1
uband = 0
lmbd = [0.17, 0.23, 0.29] # fictious decay constants
def f(self, z, t):
lmbd = self.lmbd
return np.array([-lmbd[0]*z[0],
-lmbd[1]*z[1] + lmbd[0]*z[0],
-lmbd[2]*z[2] + lmbd[1]*z[1]])
def jac(self, z, t):
# The full Jacobian is
#
# [-lmbd[0] 0 0 ]
# [ lmbd[0] -lmbd[1] 0 ]
# [ 0 lmbd[1] -lmbd[2]]
#
# The lower and upper bandwidths are lband=1 and uband=0, resp.
# The representation of this array in packed format is
#
# [-lmbd[0] -lmbd[1] -lmbd[2]]
# [ lmbd[0] lmbd[1] 0 ]
lmbd = self.lmbd
j = np.zeros((self.lband + self.uband + 1, 3), order='F')
def set_j(ri, ci, val):
j[self.uband + ri - ci, ci] = val
set_j(0, 0, -lmbd[0])
set_j(1, 0, lmbd[0])
set_j(1, 1, -lmbd[1])
set_j(2, 1, lmbd[1])
set_j(2, 2, -lmbd[2])
return j
def verify(self, zs, t):
# Formulae derived by hand
lmbd = np.array(self.lmbd)
d10 = lmbd[1] - lmbd[0]
d21 = lmbd[2] - lmbd[1]
d20 = lmbd[2] - lmbd[0]
e0 = np.exp(-lmbd[0] * t)
e1 = np.exp(-lmbd[1] * t)
e2 = np.exp(-lmbd[2] * t)
u = np.vstack((
self.z0[0] * e0,
self.z0[1] * e1 + self.z0[0] * lmbd[0] / d10 * (e0 - e1),
self.z0[2] * e2 + self.z0[1] * lmbd[1] / d21 * (e1 - e2) +
lmbd[1] * lmbd[0] * self.z0[0] / d10 *
(1 / d20 * (e0 - e2) - 1 / d21 * (e1 - e2)))).transpose()
return allclose(u, zs, atol=self.atol, rtol=self.rtol)
PROBLEMS = [SimpleOscillator, ComplexExp, Pi, CoupledDecay]
#------------------------------------------------------------------------------
def f(t, x):
dxdt = [x[1], -x[0]]
return dxdt
def jac(t, x):
j = array([[0.0, 1.0],
[-1.0, 0.0]])
return j
def f1(t, x, omega):
dxdt = [omega*x[1], -omega*x[0]]
return dxdt
def jac1(t, x, omega):
j = array([[0.0, omega],
[-omega, 0.0]])
return j
def f2(t, x, omega1, omega2):
dxdt = [omega1*x[1], -omega2*x[0]]
return dxdt
def jac2(t, x, omega1, omega2):
j = array([[0.0, omega1],
[-omega2, 0.0]])
return j
def fv(t, x, omega):
dxdt = [omega[0]*x[1], -omega[1]*x[0]]
return dxdt
def jacv(t, x, omega):
j = array([[0.0, omega[0]],
[-omega[1], 0.0]])
return j
class ODECheckParameterUse(object):
"""Call an ode-class solver with several cases of parameter use."""
# This class is intentionally not a TestCase subclass.
# solver_name must be set before tests can be run with this class.
# Set these in subclasses.
solver_name = ''
solver_uses_jac = False
def _get_solver(self, f, jac):
solver = ode(f, jac)
if self.solver_uses_jac:
solver.set_integrator(self.solver_name, atol=1e-9, rtol=1e-7,
with_jacobian=self.solver_uses_jac)
else:
# XXX Shouldn't set_integrator *always* accept the keyword arg
# 'with_jacobian', and perhaps raise an exception if it is set
# to True if the solver can't actually use it?
solver.set_integrator(self.solver_name, atol=1e-9, rtol=1e-7)
return solver
def _check_solver(self, solver):
ic = [1.0, 0.0]
solver.set_initial_value(ic, 0.0)
solver.integrate(pi)
assert_array_almost_equal(solver.y, [-1.0, 0.0])
def test_no_params(self):
solver = self._get_solver(f, jac)
self._check_solver(solver)
def test_one_scalar_param(self):
solver = self._get_solver(f1, jac1)
omega = 1.0
solver.set_f_params(omega)
if self.solver_uses_jac:
solver.set_jac_params(omega)
self._check_solver(solver)
def test_two_scalar_params(self):
solver = self._get_solver(f2, jac2)
omega1 = 1.0
omega2 = 1.0
solver.set_f_params(omega1, omega2)
if self.solver_uses_jac:
solver.set_jac_params(omega1, omega2)
self._check_solver(solver)
def test_vector_param(self):
solver = self._get_solver(fv, jacv)
omega = [1.0, 1.0]
solver.set_f_params(omega)
if self.solver_uses_jac:
solver.set_jac_params(omega)
self._check_solver(solver)
class DOPRI5CheckParameterUse(ODECheckParameterUse, TestCase):
solver_name = 'dopri5'
solver_uses_jac = False
class DOP853CheckParameterUse(ODECheckParameterUse, TestCase):
solver_name = 'dop853'
solver_uses_jac = False
class VODECheckParameterUse(ODECheckParameterUse, TestCase):
solver_name = 'vode'
solver_uses_jac = True
class ZVODECheckParameterUse(ODECheckParameterUse, TestCase):
solver_name = 'zvode'
solver_uses_jac = True
class LSODACheckParameterUse(ODECheckParameterUse, TestCase):
solver_name = 'lsoda'
solver_uses_jac = True
def test_odeint_banded_jacobian():
# Test the use of the `Dfun`, `ml` and `mu` options of odeint.
def func(y, t, c):
return c.dot(y)
def jac(y, t, c):
return c
def bjac_cols(y, t, c):
return np.column_stack((np.r_[0, np.diag(c, 1)], np.diag(c)))
def bjac_rows(y, t, c):
return np.row_stack((np.r_[0, np.diag(c, 1)], np.diag(c)))
c = array([[-50, 75, 0],
[0, -0.1, 1],
[0, 0, -1e-4]])
y0 = arange(3)
t = np.linspace(0, 50, 6)
# The results of the following three calls should be the same.
sol0, info0 = odeint(func, y0, t, args=(c,), full_output=True,
Dfun=jac)
sol1, info1 = odeint(func, y0, t, args=(c,), full_output=True,
Dfun=bjac_cols, ml=0, mu=1, col_deriv=True)
sol2, info2 = odeint(func, y0, t, args=(c,), full_output=True,
Dfun=bjac_rows, ml=0, mu=1)
# These could probably be compared using `assert_array_equal`.
# The code paths might not be *exactly* the same, so `allclose` is used
# to compare the solutions.
assert_allclose(sol0, sol1)
assert_allclose(sol0, sol2)
# Verify that the number of jacobian evaluations was the same
# for the calls of odeint with banded jacobian. This is a regression
# test--there was a bug in the handling of banded jacobians that resulted
# in an incorrect jacobian matrix being passed to the LSODA code.
# That would cause errors or excessive jacobian evaluations.
assert_array_equal(info1['nje'], info2['nje'])
def test_odeint_errors():
def sys1d(x, t):
return -100*x
def bad1(x, t):
return 1.0/0
def bad2(x, t):
return "foo"
def bad_jac1(x, t):
return 1.0/0
def bad_jac2(x, t):
return [["foo"]]
def sys2d(x, t):
return [-100*x[0], -0.1*x[1]]
def sys2d_bad_jac(x, t):
return [[1.0/0, 0], [0, -0.1]]
assert_raises(ZeroDivisionError, odeint, bad1, 1.0, [0, 1])
assert_raises(ValueError, odeint, bad2, 1.0, [0, 1])
assert_raises(ZeroDivisionError, odeint, sys1d, 1.0, [0, 1], Dfun=bad_jac1)
assert_raises(ValueError, odeint, sys1d, 1.0, [0, 1], Dfun=bad_jac2)
assert_raises(ZeroDivisionError, odeint, sys2d, [1.0, 1.0], [0, 1],
Dfun=sys2d_bad_jac)
if __name__ == "__main__":
run_module_suite()
| gpl-3.0 |
ak2703/edx-platform | openedx/core/lib/logsettings.py | 127 | 5765 | """Get log settings."""
import os
import platform
import sys
from logging.handlers import SysLogHandler
LOG_LEVELS = ['DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL']
def get_logger_config(log_dir,
logging_env="no_env",
tracking_filename="tracking.log",
edx_filename="edx.log",
dev_env=False,
syslog_addr=None,
debug=False,
local_loglevel='INFO',
console_loglevel=None,
service_variant=None):
"""
Return the appropriate logging config dictionary. You should assign the
result of this to the LOGGING var in your settings. The reason it's done
this way instead of registering directly is because I didn't want to worry
about resetting the logging state if this is called multiple times when
settings are extended.
If dev_env is set to true logging will not be done via local rsyslogd,
instead, tracking and application logs will be dropped in log_dir.
"tracking_filename" and "edx_filename" are ignored unless dev_env
is set to true since otherwise logging is handled by rsyslogd.
"""
# Revert to INFO if an invalid string is passed in
if local_loglevel not in LOG_LEVELS:
local_loglevel = 'INFO'
if console_loglevel is None or console_loglevel not in LOG_LEVELS:
console_loglevel = 'DEBUG' if debug else 'INFO'
if service_variant is None:
# default to a blank string so that if SERVICE_VARIANT is not
# set we will not log to a sub directory
service_variant = ''
hostname = platform.node().split(".")[0]
syslog_format = ("[service_variant={service_variant}]"
"[%(name)s][env:{logging_env}] %(levelname)s "
"[{hostname} %(process)d] [%(filename)s:%(lineno)d] "
"- %(message)s").format(service_variant=service_variant,
logging_env=logging_env,
hostname=hostname)
handlers = ['console', 'local']
if syslog_addr:
handlers.append('syslogger-remote')
logger_config = {
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'standard': {
'format': '%(asctime)s %(levelname)s %(process)d '
'[%(name)s] %(filename)s:%(lineno)d - %(message)s',
},
'syslog_format': {'format': syslog_format},
'raw': {'format': '%(message)s'},
},
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse',
}
},
'handlers': {
'console': {
'level': console_loglevel,
'class': 'logging.StreamHandler',
'formatter': 'standard',
'stream': sys.stderr,
},
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
},
'newrelic': {
'level': 'ERROR',
'class': 'lms.lib.newrelic_logging.NewRelicHandler',
'formatter': 'raw',
}
},
'loggers': {
'tracking': {
'handlers': ['tracking'],
'level': 'DEBUG',
'propagate': False,
},
'': {
'handlers': handlers,
'level': 'DEBUG',
'propagate': False
},
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
if syslog_addr:
logger_config['handlers'].update({
'syslogger-remote': {
'level': 'INFO',
'class': 'logging.handlers.SysLogHandler',
'address': syslog_addr,
'formatter': 'syslog_format',
},
})
if dev_env:
tracking_file_loc = os.path.join(log_dir, tracking_filename)
edx_file_loc = os.path.join(log_dir, edx_filename)
logger_config['handlers'].update({
'local': {
'class': 'logging.handlers.RotatingFileHandler',
'level': local_loglevel,
'formatter': 'standard',
'filename': edx_file_loc,
'maxBytes': 1024 * 1024 * 2,
'backupCount': 5,
},
'tracking': {
'level': 'DEBUG',
'class': 'logging.handlers.RotatingFileHandler',
'filename': tracking_file_loc,
'formatter': 'raw',
'maxBytes': 1024 * 1024 * 2,
'backupCount': 5,
},
})
else:
# for production environments we will only
# log INFO and up
logger_config['loggers']['']['level'] = 'INFO'
logger_config['handlers'].update({
'local': {
'level': local_loglevel,
'class': 'logging.handlers.SysLogHandler',
'address': '/dev/log',
'formatter': 'syslog_format',
'facility': SysLogHandler.LOG_LOCAL0,
},
'tracking': {
'level': 'DEBUG',
'class': 'logging.handlers.SysLogHandler',
'address': '/dev/log',
'facility': SysLogHandler.LOG_LOCAL1,
'formatter': 'raw',
},
})
return logger_config
| agpl-3.0 |
ghedsouza/django | tests/indexes/tests.py | 35 | 5422 | from unittest import skipUnless
from django.db import connection
from django.db.models.deletion import CASCADE
from django.db.models.fields.related import ForeignKey
from django.test import TestCase, TransactionTestCase
from .models import Article, ArticleTranslation, IndexTogetherSingleList
class SchemaIndexesTests(TestCase):
"""
Test index handling by the db.backends.schema infrastructure.
"""
def test_index_name_hash(self):
"""
Index names should be deterministic.
"""
with connection.schema_editor() as editor:
index_name = editor._create_index_name(
model=Article,
column_names=("c1",),
suffix="123",
)
self.assertEqual(index_name, "indexes_article_c1_a52bd80b123")
def test_index_name(self):
"""
Index names on the built-in database backends::
* Are truncated as needed.
* Include all the column names.
* Include a deterministic hash.
"""
long_name = 'l%sng' % ('o' * 100)
with connection.schema_editor() as editor:
index_name = editor._create_index_name(
model=Article,
column_names=('c1', 'c2', long_name),
suffix='ix',
)
expected = {
'mysql': 'indexes_article_c1_c2_looooooooooooooooooo_255179b2ix',
'oracle': 'indexes_a_c1_c2_loo_255179b2ix',
'postgresql': 'indexes_article_c1_c2_loooooooooooooooooo_255179b2ix',
'sqlite': 'indexes_article_c1_c2_l%sng_255179b2ix' % ('o' * 100),
}
if connection.vendor not in expected:
self.skipTest('This test is only supported on the built-in database backends.')
self.assertEqual(index_name, expected[connection.vendor])
def test_index_together(self):
editor = connection.schema_editor()
index_sql = editor._model_indexes_sql(Article)
self.assertEqual(len(index_sql), 1)
# Ensure the index name is properly quoted
self.assertIn(
connection.ops.quote_name(
editor._create_index_name(Article, ['headline', 'pub_date'], suffix='_idx')
),
index_sql[0]
)
def test_index_together_single_list(self):
# Test for using index_together with a single list (#22172)
index_sql = connection.schema_editor()._model_indexes_sql(IndexTogetherSingleList)
self.assertEqual(len(index_sql), 1)
@skipUnless(connection.vendor == 'postgresql', "This is a postgresql-specific issue")
def test_postgresql_text_indexes(self):
"""Test creation of PostgreSQL-specific text indexes (#12234)"""
from .models import IndexedArticle
index_sql = connection.schema_editor()._model_indexes_sql(IndexedArticle)
self.assertEqual(len(index_sql), 5)
self.assertIn('("headline" varchar_pattern_ops)', index_sql[1])
self.assertIn('("body" text_pattern_ops)', index_sql[3])
# unique=True and db_index=True should only create the varchar-specific
# index (#19441).
self.assertIn('("slug" varchar_pattern_ops)', index_sql[4])
@skipUnless(connection.vendor == 'postgresql', "This is a postgresql-specific issue")
def test_postgresql_virtual_relation_indexes(self):
"""Test indexes are not created for related objects"""
index_sql = connection.schema_editor()._model_indexes_sql(Article)
self.assertEqual(len(index_sql), 1)
@skipUnless(connection.vendor == 'mysql', 'MySQL tests')
class SchemaIndexesMySQLTests(TransactionTestCase):
available_apps = ['indexes']
def test_no_index_for_foreignkey(self):
"""
MySQL on InnoDB already creates indexes automatically for foreign keys.
(#14180). An index should be created if db_constraint=False (#26171).
"""
storage = connection.introspection.get_storage_engine(
connection.cursor(), ArticleTranslation._meta.db_table
)
if storage != "InnoDB":
self.skip("This test only applies to the InnoDB storage engine")
index_sql = connection.schema_editor()._model_indexes_sql(ArticleTranslation)
self.assertEqual(index_sql, [
'CREATE INDEX `indexes_articletranslation_article_no_constraint_id_d6c0806b` '
'ON `indexes_articletranslation` (`article_no_constraint_id`)'
])
# The index also shouldn't be created if the ForeignKey is added after
# the model was created.
field_created = False
try:
with connection.schema_editor() as editor:
new_field = ForeignKey(Article, CASCADE)
new_field.set_attributes_from_name('new_foreign_key')
editor.add_field(ArticleTranslation, new_field)
field_created = True
self.assertEqual(editor.deferred_sql, [
'ALTER TABLE `indexes_articletranslation` '
'ADD CONSTRAINT `indexes_articletrans_new_foreign_key_id_d27a9146_fk_indexes_a` '
'FOREIGN KEY (`new_foreign_key_id`) REFERENCES `indexes_article` (`id`)'
])
finally:
if field_created:
with connection.schema_editor() as editor:
editor.remove_field(ArticleTranslation, new_field)
| bsd-3-clause |
donutmonger/youtube-dl | youtube_dl/utils.py | 7 | 71290 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import calendar
import codecs
import contextlib
import ctypes
import datetime
import email.utils
import errno
import functools
import gzip
import itertools
import io
import json
import locale
import math
import operator
import os
import pipes
import platform
import re
import ssl
import socket
import struct
import subprocess
import sys
import tempfile
import traceback
import xml.etree.ElementTree
import zlib
from .compat import (
compat_basestring,
compat_chr,
compat_html_entities,
compat_http_client,
compat_kwargs,
compat_parse_qs,
compat_socket_create_connection,
compat_str,
compat_urllib_error,
compat_urllib_parse,
compat_urllib_parse_urlparse,
compat_urllib_request,
compat_urlparse,
shlex_quote,
)
# This is not clearly defined otherwise
compiled_regex_type = type(re.compile(''))
std_headers = {
'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64; rv:10.0) Gecko/20150101 Firefox/20.0 (Chrome)',
'Accept-Charset': 'ISO-8859-1,utf-8;q=0.7,*;q=0.7',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
'Accept-Encoding': 'gzip, deflate',
'Accept-Language': 'en-us,en;q=0.5',
}
NO_DEFAULT = object()
ENGLISH_MONTH_NAMES = [
'January', 'February', 'March', 'April', 'May', 'June',
'July', 'August', 'September', 'October', 'November', 'December']
def preferredencoding():
"""Get preferred encoding.
Returns the best encoding scheme for the system, based on
locale.getpreferredencoding() and some further tweaks.
"""
try:
pref = locale.getpreferredencoding()
'TEST'.encode(pref)
except Exception:
pref = 'UTF-8'
return pref
def write_json_file(obj, fn):
""" Encode obj as JSON and write it to fn, atomically if possible """
fn = encodeFilename(fn)
if sys.version_info < (3, 0) and sys.platform != 'win32':
encoding = get_filesystem_encoding()
# os.path.basename returns a bytes object, but NamedTemporaryFile
# will fail if the filename contains non ascii characters unless we
# use a unicode object
path_basename = lambda f: os.path.basename(fn).decode(encoding)
# the same for os.path.dirname
path_dirname = lambda f: os.path.dirname(fn).decode(encoding)
else:
path_basename = os.path.basename
path_dirname = os.path.dirname
args = {
'suffix': '.tmp',
'prefix': path_basename(fn) + '.',
'dir': path_dirname(fn),
'delete': False,
}
# In Python 2.x, json.dump expects a bytestream.
# In Python 3.x, it writes to a character stream
if sys.version_info < (3, 0):
args['mode'] = 'wb'
else:
args.update({
'mode': 'w',
'encoding': 'utf-8',
})
tf = tempfile.NamedTemporaryFile(**compat_kwargs(args))
try:
with tf:
json.dump(obj, tf)
if sys.platform == 'win32':
# Need to remove existing file on Windows, else os.rename raises
# WindowsError or FileExistsError.
try:
os.unlink(fn)
except OSError:
pass
os.rename(tf.name, fn)
except Exception:
try:
os.remove(tf.name)
except OSError:
pass
raise
if sys.version_info >= (2, 7):
def find_xpath_attr(node, xpath, key, val=None):
""" Find the xpath xpath[@key=val] """
assert re.match(r'^[a-zA-Z-]+$', key)
if val:
assert re.match(r'^[a-zA-Z0-9@\s:._-]*$', val)
expr = xpath + ('[@%s]' % key if val is None else "[@%s='%s']" % (key, val))
return node.find(expr)
else:
def find_xpath_attr(node, xpath, key, val=None):
# Here comes the crazy part: In 2.6, if the xpath is a unicode,
# .//node does not match if a node is a direct child of . !
if isinstance(xpath, compat_str):
xpath = xpath.encode('ascii')
for f in node.findall(xpath):
if key not in f.attrib:
continue
if val is None or f.attrib.get(key) == val:
return f
return None
# On python2.6 the xml.etree.ElementTree.Element methods don't support
# the namespace parameter
def xpath_with_ns(path, ns_map):
components = [c.split(':') for c in path.split('/')]
replaced = []
for c in components:
if len(c) == 1:
replaced.append(c[0])
else:
ns, tag = c
replaced.append('{%s}%s' % (ns_map[ns], tag))
return '/'.join(replaced)
def xpath_text(node, xpath, name=None, fatal=False, default=NO_DEFAULT):
if sys.version_info < (2, 7): # Crazy 2.6
xpath = xpath.encode('ascii')
n = node.find(xpath)
if n is None or n.text is None:
if default is not NO_DEFAULT:
return default
elif fatal:
name = xpath if name is None else name
raise ExtractorError('Could not find XML element %s' % name)
else:
return None
return n.text
def get_element_by_id(id, html):
"""Return the content of the tag with the specified ID in the passed HTML document"""
return get_element_by_attribute("id", id, html)
def get_element_by_attribute(attribute, value, html):
"""Return the content of the tag with the specified attribute in the passed HTML document"""
m = re.search(r'''(?xs)
<([a-zA-Z0-9:._-]+)
(?:\s+[a-zA-Z0-9:._-]+(?:=[a-zA-Z0-9:._-]+|="[^"]+"|='[^']+'))*?
\s+%s=['"]?%s['"]?
(?:\s+[a-zA-Z0-9:._-]+(?:=[a-zA-Z0-9:._-]+|="[^"]+"|='[^']+'))*?
\s*>
(?P<content>.*?)
</\1>
''' % (re.escape(attribute), re.escape(value)), html)
if not m:
return None
res = m.group('content')
if res.startswith('"') or res.startswith("'"):
res = res[1:-1]
return unescapeHTML(res)
def clean_html(html):
"""Clean an HTML snippet into a readable string"""
if html is None: # Convenience for sanitizing descriptions etc.
return html
# Newline vs <br />
html = html.replace('\n', ' ')
html = re.sub(r'\s*<\s*br\s*/?\s*>\s*', '\n', html)
html = re.sub(r'<\s*/\s*p\s*>\s*<\s*p[^>]*>', '\n', html)
# Strip html tags
html = re.sub('<.*?>', '', html)
# Replace html entities
html = unescapeHTML(html)
return html.strip()
def sanitize_open(filename, open_mode):
"""Try to open the given filename, and slightly tweak it if this fails.
Attempts to open the given filename. If this fails, it tries to change
the filename slightly, step by step, until it's either able to open it
or it fails and raises a final exception, like the standard open()
function.
It returns the tuple (stream, definitive_file_name).
"""
try:
if filename == '-':
if sys.platform == 'win32':
import msvcrt
msvcrt.setmode(sys.stdout.fileno(), os.O_BINARY)
return (sys.stdout.buffer if hasattr(sys.stdout, 'buffer') else sys.stdout, filename)
stream = open(encodeFilename(filename), open_mode)
return (stream, filename)
except (IOError, OSError) as err:
if err.errno in (errno.EACCES,):
raise
# In case of error, try to remove win32 forbidden chars
alt_filename = sanitize_path(filename)
if alt_filename == filename:
raise
else:
# An exception here should be caught in the caller
stream = open(encodeFilename(alt_filename), open_mode)
return (stream, alt_filename)
def timeconvert(timestr):
"""Convert RFC 2822 defined time string into system timestamp"""
timestamp = None
timetuple = email.utils.parsedate_tz(timestr)
if timetuple is not None:
timestamp = email.utils.mktime_tz(timetuple)
return timestamp
def sanitize_filename(s, restricted=False, is_id=False):
"""Sanitizes a string so it could be used as part of a filename.
If restricted is set, use a stricter subset of allowed characters.
Set is_id if this is not an arbitrary string, but an ID that should be kept if possible
"""
def replace_insane(char):
if char == '?' or ord(char) < 32 or ord(char) == 127:
return ''
elif char == '"':
return '' if restricted else '\''
elif char == ':':
return '_-' if restricted else ' -'
elif char in '\\/|*<>':
return '_'
if restricted and (char in '!&\'()[]{}$;`^,#' or char.isspace()):
return '_'
if restricted and ord(char) > 127:
return '_'
return char
# Handle timestamps
s = re.sub(r'[0-9]+(?::[0-9]+)+', lambda m: m.group(0).replace(':', '_'), s)
result = ''.join(map(replace_insane, s))
if not is_id:
while '__' in result:
result = result.replace('__', '_')
result = result.strip('_')
# Common case of "Foreign band name - English song title"
if restricted and result.startswith('-_'):
result = result[2:]
if result.startswith('-'):
result = '_' + result[len('-'):]
result = result.lstrip('.')
if not result:
result = '_'
return result
def sanitize_path(s):
"""Sanitizes and normalizes path on Windows"""
if sys.platform != 'win32':
return s
drive_or_unc, _ = os.path.splitdrive(s)
if sys.version_info < (2, 7) and not drive_or_unc:
drive_or_unc, _ = os.path.splitunc(s)
norm_path = os.path.normpath(remove_start(s, drive_or_unc)).split(os.path.sep)
if drive_or_unc:
norm_path.pop(0)
sanitized_path = [
path_part if path_part in ['.', '..'] else re.sub('(?:[/<>:"\\|\\\\?\\*]|\.$)', '#', path_part)
for path_part in norm_path]
if drive_or_unc:
sanitized_path.insert(0, drive_or_unc + os.path.sep)
return os.path.join(*sanitized_path)
def orderedSet(iterable):
""" Remove all duplicates from the input iterable """
res = []
for el in iterable:
if el not in res:
res.append(el)
return res
def _htmlentity_transform(entity):
"""Transforms an HTML entity to a character."""
# Known non-numeric HTML entity
if entity in compat_html_entities.name2codepoint:
return compat_chr(compat_html_entities.name2codepoint[entity])
mobj = re.match(r'#(x[0-9a-fA-F]+|[0-9]+)', entity)
if mobj is not None:
numstr = mobj.group(1)
if numstr.startswith('x'):
base = 16
numstr = '0%s' % numstr
else:
base = 10
return compat_chr(int(numstr, base))
# Unknown entity in name, return its literal representation
return ('&%s;' % entity)
def unescapeHTML(s):
if s is None:
return None
assert type(s) == compat_str
return re.sub(
r'&([^;]+);', lambda m: _htmlentity_transform(m.group(1)), s)
def get_subprocess_encoding():
if sys.platform == 'win32' and sys.getwindowsversion()[0] >= 5:
# For subprocess calls, encode with locale encoding
# Refer to http://stackoverflow.com/a/9951851/35070
encoding = preferredencoding()
else:
encoding = sys.getfilesystemencoding()
if encoding is None:
encoding = 'utf-8'
return encoding
def encodeFilename(s, for_subprocess=False):
"""
@param s The name of the file
"""
assert type(s) == compat_str
# Python 3 has a Unicode API
if sys.version_info >= (3, 0):
return s
# Pass '' directly to use Unicode APIs on Windows 2000 and up
# (Detecting Windows NT 4 is tricky because 'major >= 4' would
# match Windows 9x series as well. Besides, NT 4 is obsolete.)
if not for_subprocess and sys.platform == 'win32' and sys.getwindowsversion()[0] >= 5:
return s
return s.encode(get_subprocess_encoding(), 'ignore')
def decodeFilename(b, for_subprocess=False):
if sys.version_info >= (3, 0):
return b
if not isinstance(b, bytes):
return b
return b.decode(get_subprocess_encoding(), 'ignore')
def encodeArgument(s):
if not isinstance(s, compat_str):
# Legacy code that uses byte strings
# Uncomment the following line after fixing all post processors
# assert False, 'Internal error: %r should be of type %r, is %r' % (s, compat_str, type(s))
s = s.decode('ascii')
return encodeFilename(s, True)
def decodeArgument(b):
return decodeFilename(b, True)
def decodeOption(optval):
if optval is None:
return optval
if isinstance(optval, bytes):
optval = optval.decode(preferredencoding())
assert isinstance(optval, compat_str)
return optval
def formatSeconds(secs):
if secs > 3600:
return '%d:%02d:%02d' % (secs // 3600, (secs % 3600) // 60, secs % 60)
elif secs > 60:
return '%d:%02d' % (secs // 60, secs % 60)
else:
return '%d' % secs
def make_HTTPS_handler(params, **kwargs):
opts_no_check_certificate = params.get('nocheckcertificate', False)
if hasattr(ssl, 'create_default_context'): # Python >= 3.4 or 2.7.9
context = ssl.create_default_context(ssl.Purpose.SERVER_AUTH)
if opts_no_check_certificate:
context.check_hostname = False
context.verify_mode = ssl.CERT_NONE
try:
return YoutubeDLHTTPSHandler(params, context=context, **kwargs)
except TypeError:
# Python 2.7.8
# (create_default_context present but HTTPSHandler has no context=)
pass
if sys.version_info < (3, 2):
return YoutubeDLHTTPSHandler(params, **kwargs)
else: # Python < 3.4
context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
context.verify_mode = (ssl.CERT_NONE
if opts_no_check_certificate
else ssl.CERT_REQUIRED)
context.set_default_verify_paths()
return YoutubeDLHTTPSHandler(params, context=context, **kwargs)
def bug_reports_message():
if ytdl_is_updateable():
update_cmd = 'type youtube-dl -U to update'
else:
update_cmd = 'see https://yt-dl.org/update on how to update'
msg = '; please report this issue on https://yt-dl.org/bug .'
msg += ' Make sure you are using the latest version; %s.' % update_cmd
msg += ' Be sure to call youtube-dl with the --verbose flag and include its complete output.'
return msg
class ExtractorError(Exception):
"""Error during info extraction."""
def __init__(self, msg, tb=None, expected=False, cause=None, video_id=None):
""" tb, if given, is the original traceback (so that it can be printed out).
If expected is set, this is a normal error message and most likely not a bug in youtube-dl.
"""
if sys.exc_info()[0] in (compat_urllib_error.URLError, socket.timeout, UnavailableVideoError):
expected = True
if video_id is not None:
msg = video_id + ': ' + msg
if cause:
msg += ' (caused by %r)' % cause
if not expected:
msg += bug_reports_message()
super(ExtractorError, self).__init__(msg)
self.traceback = tb
self.exc_info = sys.exc_info() # preserve original exception
self.cause = cause
self.video_id = video_id
def format_traceback(self):
if self.traceback is None:
return None
return ''.join(traceback.format_tb(self.traceback))
class UnsupportedError(ExtractorError):
def __init__(self, url):
super(UnsupportedError, self).__init__(
'Unsupported URL: %s' % url, expected=True)
self.url = url
class RegexNotFoundError(ExtractorError):
"""Error when a regex didn't match"""
pass
class DownloadError(Exception):
"""Download Error exception.
This exception may be thrown by FileDownloader objects if they are not
configured to continue on errors. They will contain the appropriate
error message.
"""
def __init__(self, msg, exc_info=None):
""" exc_info, if given, is the original exception that caused the trouble (as returned by sys.exc_info()). """
super(DownloadError, self).__init__(msg)
self.exc_info = exc_info
class SameFileError(Exception):
"""Same File exception.
This exception will be thrown by FileDownloader objects if they detect
multiple files would have to be downloaded to the same file on disk.
"""
pass
class PostProcessingError(Exception):
"""Post Processing exception.
This exception may be raised by PostProcessor's .run() method to
indicate an error in the postprocessing task.
"""
def __init__(self, msg):
self.msg = msg
class MaxDownloadsReached(Exception):
""" --max-downloads limit has been reached. """
pass
class UnavailableVideoError(Exception):
"""Unavailable Format exception.
This exception will be thrown when a video is requested
in a format that is not available for that video.
"""
pass
class ContentTooShortError(Exception):
"""Content Too Short exception.
This exception may be raised by FileDownloader objects when a file they
download is too small for what the server announced first, indicating
the connection was probably interrupted.
"""
def __init__(self, downloaded, expected):
# Both in bytes
self.downloaded = downloaded
self.expected = expected
def _create_http_connection(ydl_handler, http_class, is_https, *args, **kwargs):
hc = http_class(*args, **kwargs)
source_address = ydl_handler._params.get('source_address')
if source_address is not None:
sa = (source_address, 0)
if hasattr(hc, 'source_address'): # Python 2.7+
hc.source_address = sa
else: # Python 2.6
def _hc_connect(self, *args, **kwargs):
sock = compat_socket_create_connection(
(self.host, self.port), self.timeout, sa)
if is_https:
self.sock = ssl.wrap_socket(
sock, self.key_file, self.cert_file,
ssl_version=ssl.PROTOCOL_TLSv1)
else:
self.sock = sock
hc.connect = functools.partial(_hc_connect, hc)
return hc
class YoutubeDLHandler(compat_urllib_request.HTTPHandler):
"""Handler for HTTP requests and responses.
This class, when installed with an OpenerDirector, automatically adds
the standard headers to every HTTP request and handles gzipped and
deflated responses from web servers. If compression is to be avoided in
a particular request, the original request in the program code only has
to include the HTTP header "Youtubedl-No-Compression", which will be
removed before making the real request.
Part of this code was copied from:
http://techknack.net/python-urllib2-handlers/
Andrew Rowls, the author of that code, agreed to release it to the
public domain.
"""
def __init__(self, params, *args, **kwargs):
compat_urllib_request.HTTPHandler.__init__(self, *args, **kwargs)
self._params = params
def http_open(self, req):
return self.do_open(functools.partial(
_create_http_connection, self, compat_http_client.HTTPConnection, False),
req)
@staticmethod
def deflate(data):
try:
return zlib.decompress(data, -zlib.MAX_WBITS)
except zlib.error:
return zlib.decompress(data)
@staticmethod
def addinfourl_wrapper(stream, headers, url, code):
if hasattr(compat_urllib_request.addinfourl, 'getcode'):
return compat_urllib_request.addinfourl(stream, headers, url, code)
ret = compat_urllib_request.addinfourl(stream, headers, url)
ret.code = code
return ret
def http_request(self, req):
for h, v in std_headers.items():
# Capitalize is needed because of Python bug 2275: http://bugs.python.org/issue2275
# The dict keys are capitalized because of this bug by urllib
if h.capitalize() not in req.headers:
req.add_header(h, v)
if 'Youtubedl-no-compression' in req.headers:
if 'Accept-encoding' in req.headers:
del req.headers['Accept-encoding']
del req.headers['Youtubedl-no-compression']
if sys.version_info < (2, 7) and '#' in req.get_full_url():
# Python 2.6 is brain-dead when it comes to fragments
req._Request__original = req._Request__original.partition('#')[0]
req._Request__r_type = req._Request__r_type.partition('#')[0]
return req
def http_response(self, req, resp):
old_resp = resp
# gzip
if resp.headers.get('Content-encoding', '') == 'gzip':
content = resp.read()
gz = gzip.GzipFile(fileobj=io.BytesIO(content), mode='rb')
try:
uncompressed = io.BytesIO(gz.read())
except IOError as original_ioerror:
# There may be junk add the end of the file
# See http://stackoverflow.com/q/4928560/35070 for details
for i in range(1, 1024):
try:
gz = gzip.GzipFile(fileobj=io.BytesIO(content[:-i]), mode='rb')
uncompressed = io.BytesIO(gz.read())
except IOError:
continue
break
else:
raise original_ioerror
resp = self.addinfourl_wrapper(uncompressed, old_resp.headers, old_resp.url, old_resp.code)
resp.msg = old_resp.msg
# deflate
if resp.headers.get('Content-encoding', '') == 'deflate':
gz = io.BytesIO(self.deflate(resp.read()))
resp = self.addinfourl_wrapper(gz, old_resp.headers, old_resp.url, old_resp.code)
resp.msg = old_resp.msg
return resp
https_request = http_request
https_response = http_response
class YoutubeDLHTTPSHandler(compat_urllib_request.HTTPSHandler):
def __init__(self, params, https_conn_class=None, *args, **kwargs):
compat_urllib_request.HTTPSHandler.__init__(self, *args, **kwargs)
self._https_conn_class = https_conn_class or compat_http_client.HTTPSConnection
self._params = params
def https_open(self, req):
kwargs = {}
if hasattr(self, '_context'): # python > 2.6
kwargs['context'] = self._context
if hasattr(self, '_check_hostname'): # python 3.x
kwargs['check_hostname'] = self._check_hostname
return self.do_open(functools.partial(
_create_http_connection, self, self._https_conn_class, True),
req, **kwargs)
def parse_iso8601(date_str, delimiter='T', timezone=None):
""" Return a UNIX timestamp from the given date """
if date_str is None:
return None
if timezone is None:
m = re.search(
r'(\.[0-9]+)?(?:Z$| ?(?P<sign>\+|-)(?P<hours>[0-9]{2}):?(?P<minutes>[0-9]{2})$)',
date_str)
if not m:
timezone = datetime.timedelta()
else:
date_str = date_str[:-len(m.group(0))]
if not m.group('sign'):
timezone = datetime.timedelta()
else:
sign = 1 if m.group('sign') == '+' else -1
timezone = datetime.timedelta(
hours=sign * int(m.group('hours')),
minutes=sign * int(m.group('minutes')))
date_format = '%Y-%m-%d{0}%H:%M:%S'.format(delimiter)
dt = datetime.datetime.strptime(date_str, date_format) - timezone
return calendar.timegm(dt.timetuple())
def unified_strdate(date_str, day_first=True):
"""Return a string with the date in the format YYYYMMDD"""
if date_str is None:
return None
upload_date = None
# Replace commas
date_str = date_str.replace(',', ' ')
# %z (UTC offset) is only supported in python>=3.2
if not re.match(r'^[0-9]{1,2}-[0-9]{1,2}-[0-9]{4}$', date_str):
date_str = re.sub(r' ?(\+|-)[0-9]{2}:?[0-9]{2}$', '', date_str)
# Remove AM/PM + timezone
date_str = re.sub(r'(?i)\s*(?:AM|PM)(?:\s+[A-Z]+)?', '', date_str)
format_expressions = [
'%d %B %Y',
'%d %b %Y',
'%B %d %Y',
'%b %d %Y',
'%b %dst %Y %I:%M%p',
'%b %dnd %Y %I:%M%p',
'%b %dth %Y %I:%M%p',
'%Y %m %d',
'%Y-%m-%d',
'%Y/%m/%d',
'%Y/%m/%d %H:%M:%S',
'%Y-%m-%d %H:%M:%S',
'%Y-%m-%d %H:%M:%S.%f',
'%d.%m.%Y %H:%M',
'%d.%m.%Y %H.%M',
'%Y-%m-%dT%H:%M:%SZ',
'%Y-%m-%dT%H:%M:%S.%fZ',
'%Y-%m-%dT%H:%M:%S.%f0Z',
'%Y-%m-%dT%H:%M:%S',
'%Y-%m-%dT%H:%M:%S.%f',
'%Y-%m-%dT%H:%M',
]
if day_first:
format_expressions.extend([
'%d-%m-%Y',
'%d.%m.%Y',
'%d/%m/%Y',
'%d/%m/%y',
'%d/%m/%Y %H:%M:%S',
])
else:
format_expressions.extend([
'%m-%d-%Y',
'%m.%d.%Y',
'%m/%d/%Y',
'%m/%d/%y',
'%m/%d/%Y %H:%M:%S',
])
for expression in format_expressions:
try:
upload_date = datetime.datetime.strptime(date_str, expression).strftime('%Y%m%d')
except ValueError:
pass
if upload_date is None:
timetuple = email.utils.parsedate_tz(date_str)
if timetuple:
upload_date = datetime.datetime(*timetuple[:6]).strftime('%Y%m%d')
return upload_date
def determine_ext(url, default_ext='unknown_video'):
if url is None:
return default_ext
guess = url.partition('?')[0].rpartition('.')[2]
if re.match(r'^[A-Za-z0-9]+$', guess):
return guess
else:
return default_ext
def subtitles_filename(filename, sub_lang, sub_format):
return filename.rsplit('.', 1)[0] + '.' + sub_lang + '.' + sub_format
def date_from_str(date_str):
"""
Return a datetime object from a string in the format YYYYMMDD or
(now|today)[+-][0-9](day|week|month|year)(s)?"""
today = datetime.date.today()
if date_str in ('now', 'today'):
return today
if date_str == 'yesterday':
return today - datetime.timedelta(days=1)
match = re.match('(now|today)(?P<sign>[+-])(?P<time>\d+)(?P<unit>day|week|month|year)(s)?', date_str)
if match is not None:
sign = match.group('sign')
time = int(match.group('time'))
if sign == '-':
time = -time
unit = match.group('unit')
# A bad aproximation?
if unit == 'month':
unit = 'day'
time *= 30
elif unit == 'year':
unit = 'day'
time *= 365
unit += 's'
delta = datetime.timedelta(**{unit: time})
return today + delta
return datetime.datetime.strptime(date_str, "%Y%m%d").date()
def hyphenate_date(date_str):
"""
Convert a date in 'YYYYMMDD' format to 'YYYY-MM-DD' format"""
match = re.match(r'^(\d\d\d\d)(\d\d)(\d\d)$', date_str)
if match is not None:
return '-'.join(match.groups())
else:
return date_str
class DateRange(object):
"""Represents a time interval between two dates"""
def __init__(self, start=None, end=None):
"""start and end must be strings in the format accepted by date"""
if start is not None:
self.start = date_from_str(start)
else:
self.start = datetime.datetime.min.date()
if end is not None:
self.end = date_from_str(end)
else:
self.end = datetime.datetime.max.date()
if self.start > self.end:
raise ValueError('Date range: "%s" , the start date must be before the end date' % self)
@classmethod
def day(cls, day):
"""Returns a range that only contains the given day"""
return cls(day, day)
def __contains__(self, date):
"""Check if the date is in the range"""
if not isinstance(date, datetime.date):
date = date_from_str(date)
return self.start <= date <= self.end
def __str__(self):
return '%s - %s' % (self.start.isoformat(), self.end.isoformat())
def platform_name():
""" Returns the platform name as a compat_str """
res = platform.platform()
if isinstance(res, bytes):
res = res.decode(preferredencoding())
assert isinstance(res, compat_str)
return res
def _windows_write_string(s, out):
""" Returns True if the string was written using special methods,
False if it has yet to be written out."""
# Adapted from http://stackoverflow.com/a/3259271/35070
import ctypes
import ctypes.wintypes
WIN_OUTPUT_IDS = {
1: -11,
2: -12,
}
try:
fileno = out.fileno()
except AttributeError:
# If the output stream doesn't have a fileno, it's virtual
return False
except io.UnsupportedOperation:
# Some strange Windows pseudo files?
return False
if fileno not in WIN_OUTPUT_IDS:
return False
GetStdHandle = ctypes.WINFUNCTYPE(
ctypes.wintypes.HANDLE, ctypes.wintypes.DWORD)(
(b"GetStdHandle", ctypes.windll.kernel32))
h = GetStdHandle(WIN_OUTPUT_IDS[fileno])
WriteConsoleW = ctypes.WINFUNCTYPE(
ctypes.wintypes.BOOL, ctypes.wintypes.HANDLE, ctypes.wintypes.LPWSTR,
ctypes.wintypes.DWORD, ctypes.POINTER(ctypes.wintypes.DWORD),
ctypes.wintypes.LPVOID)((b"WriteConsoleW", ctypes.windll.kernel32))
written = ctypes.wintypes.DWORD(0)
GetFileType = ctypes.WINFUNCTYPE(ctypes.wintypes.DWORD, ctypes.wintypes.DWORD)((b"GetFileType", ctypes.windll.kernel32))
FILE_TYPE_CHAR = 0x0002
FILE_TYPE_REMOTE = 0x8000
GetConsoleMode = ctypes.WINFUNCTYPE(
ctypes.wintypes.BOOL, ctypes.wintypes.HANDLE,
ctypes.POINTER(ctypes.wintypes.DWORD))(
(b"GetConsoleMode", ctypes.windll.kernel32))
INVALID_HANDLE_VALUE = ctypes.wintypes.DWORD(-1).value
def not_a_console(handle):
if handle == INVALID_HANDLE_VALUE or handle is None:
return True
return ((GetFileType(handle) & ~FILE_TYPE_REMOTE) != FILE_TYPE_CHAR or
GetConsoleMode(handle, ctypes.byref(ctypes.wintypes.DWORD())) == 0)
if not_a_console(h):
return False
def next_nonbmp_pos(s):
try:
return next(i for i, c in enumerate(s) if ord(c) > 0xffff)
except StopIteration:
return len(s)
while s:
count = min(next_nonbmp_pos(s), 1024)
ret = WriteConsoleW(
h, s, count if count else 2, ctypes.byref(written), None)
if ret == 0:
raise OSError('Failed to write string')
if not count: # We just wrote a non-BMP character
assert written.value == 2
s = s[1:]
else:
assert written.value > 0
s = s[written.value:]
return True
def write_string(s, out=None, encoding=None):
if out is None:
out = sys.stderr
assert type(s) == compat_str
if sys.platform == 'win32' and encoding is None and hasattr(out, 'fileno'):
if _windows_write_string(s, out):
return
if ('b' in getattr(out, 'mode', '') or
sys.version_info[0] < 3): # Python 2 lies about mode of sys.stderr
byt = s.encode(encoding or preferredencoding(), 'ignore')
out.write(byt)
elif hasattr(out, 'buffer'):
enc = encoding or getattr(out, 'encoding', None) or preferredencoding()
byt = s.encode(enc, 'ignore')
out.buffer.write(byt)
else:
out.write(s)
out.flush()
def bytes_to_intlist(bs):
if not bs:
return []
if isinstance(bs[0], int): # Python 3
return list(bs)
else:
return [ord(c) for c in bs]
def intlist_to_bytes(xs):
if not xs:
return b''
return struct_pack('%dB' % len(xs), *xs)
# Cross-platform file locking
if sys.platform == 'win32':
import ctypes.wintypes
import msvcrt
class OVERLAPPED(ctypes.Structure):
_fields_ = [
('Internal', ctypes.wintypes.LPVOID),
('InternalHigh', ctypes.wintypes.LPVOID),
('Offset', ctypes.wintypes.DWORD),
('OffsetHigh', ctypes.wintypes.DWORD),
('hEvent', ctypes.wintypes.HANDLE),
]
kernel32 = ctypes.windll.kernel32
LockFileEx = kernel32.LockFileEx
LockFileEx.argtypes = [
ctypes.wintypes.HANDLE, # hFile
ctypes.wintypes.DWORD, # dwFlags
ctypes.wintypes.DWORD, # dwReserved
ctypes.wintypes.DWORD, # nNumberOfBytesToLockLow
ctypes.wintypes.DWORD, # nNumberOfBytesToLockHigh
ctypes.POINTER(OVERLAPPED) # Overlapped
]
LockFileEx.restype = ctypes.wintypes.BOOL
UnlockFileEx = kernel32.UnlockFileEx
UnlockFileEx.argtypes = [
ctypes.wintypes.HANDLE, # hFile
ctypes.wintypes.DWORD, # dwReserved
ctypes.wintypes.DWORD, # nNumberOfBytesToLockLow
ctypes.wintypes.DWORD, # nNumberOfBytesToLockHigh
ctypes.POINTER(OVERLAPPED) # Overlapped
]
UnlockFileEx.restype = ctypes.wintypes.BOOL
whole_low = 0xffffffff
whole_high = 0x7fffffff
def _lock_file(f, exclusive):
overlapped = OVERLAPPED()
overlapped.Offset = 0
overlapped.OffsetHigh = 0
overlapped.hEvent = 0
f._lock_file_overlapped_p = ctypes.pointer(overlapped)
handle = msvcrt.get_osfhandle(f.fileno())
if not LockFileEx(handle, 0x2 if exclusive else 0x0, 0,
whole_low, whole_high, f._lock_file_overlapped_p):
raise OSError('Locking file failed: %r' % ctypes.FormatError())
def _unlock_file(f):
assert f._lock_file_overlapped_p
handle = msvcrt.get_osfhandle(f.fileno())
if not UnlockFileEx(handle, 0,
whole_low, whole_high, f._lock_file_overlapped_p):
raise OSError('Unlocking file failed: %r' % ctypes.FormatError())
else:
import fcntl
def _lock_file(f, exclusive):
fcntl.flock(f, fcntl.LOCK_EX if exclusive else fcntl.LOCK_SH)
def _unlock_file(f):
fcntl.flock(f, fcntl.LOCK_UN)
class locked_file(object):
def __init__(self, filename, mode, encoding=None):
assert mode in ['r', 'a', 'w']
self.f = io.open(filename, mode, encoding=encoding)
self.mode = mode
def __enter__(self):
exclusive = self.mode != 'r'
try:
_lock_file(self.f, exclusive)
except IOError:
self.f.close()
raise
return self
def __exit__(self, etype, value, traceback):
try:
_unlock_file(self.f)
finally:
self.f.close()
def __iter__(self):
return iter(self.f)
def write(self, *args):
return self.f.write(*args)
def read(self, *args):
return self.f.read(*args)
def get_filesystem_encoding():
encoding = sys.getfilesystemencoding()
return encoding if encoding is not None else 'utf-8'
def shell_quote(args):
quoted_args = []
encoding = get_filesystem_encoding()
for a in args:
if isinstance(a, bytes):
# We may get a filename encoded with 'encodeFilename'
a = a.decode(encoding)
quoted_args.append(pipes.quote(a))
return ' '.join(quoted_args)
def smuggle_url(url, data):
""" Pass additional data in a URL for internal use. """
sdata = compat_urllib_parse.urlencode(
{'__youtubedl_smuggle': json.dumps(data)})
return url + '#' + sdata
def unsmuggle_url(smug_url, default=None):
if '#__youtubedl_smuggle' not in smug_url:
return smug_url, default
url, _, sdata = smug_url.rpartition('#')
jsond = compat_parse_qs(sdata)['__youtubedl_smuggle'][0]
data = json.loads(jsond)
return url, data
def format_bytes(bytes):
if bytes is None:
return 'N/A'
if type(bytes) is str:
bytes = float(bytes)
if bytes == 0.0:
exponent = 0
else:
exponent = int(math.log(bytes, 1024.0))
suffix = ['B', 'KiB', 'MiB', 'GiB', 'TiB', 'PiB', 'EiB', 'ZiB', 'YiB'][exponent]
converted = float(bytes) / float(1024 ** exponent)
return '%.2f%s' % (converted, suffix)
def parse_filesize(s):
if s is None:
return None
# The lower-case forms are of course incorrect and inofficial,
# but we support those too
_UNIT_TABLE = {
'B': 1,
'b': 1,
'KiB': 1024,
'KB': 1000,
'kB': 1024,
'Kb': 1000,
'MiB': 1024 ** 2,
'MB': 1000 ** 2,
'mB': 1024 ** 2,
'Mb': 1000 ** 2,
'GiB': 1024 ** 3,
'GB': 1000 ** 3,
'gB': 1024 ** 3,
'Gb': 1000 ** 3,
'TiB': 1024 ** 4,
'TB': 1000 ** 4,
'tB': 1024 ** 4,
'Tb': 1000 ** 4,
'PiB': 1024 ** 5,
'PB': 1000 ** 5,
'pB': 1024 ** 5,
'Pb': 1000 ** 5,
'EiB': 1024 ** 6,
'EB': 1000 ** 6,
'eB': 1024 ** 6,
'Eb': 1000 ** 6,
'ZiB': 1024 ** 7,
'ZB': 1000 ** 7,
'zB': 1024 ** 7,
'Zb': 1000 ** 7,
'YiB': 1024 ** 8,
'YB': 1000 ** 8,
'yB': 1024 ** 8,
'Yb': 1000 ** 8,
}
units_re = '|'.join(re.escape(u) for u in _UNIT_TABLE)
m = re.match(
r'(?P<num>[0-9]+(?:[,.][0-9]*)?)\s*(?P<unit>%s)' % units_re, s)
if not m:
return None
num_str = m.group('num').replace(',', '.')
mult = _UNIT_TABLE[m.group('unit')]
return int(float(num_str) * mult)
def month_by_name(name):
""" Return the number of a month by (locale-independently) English name """
try:
return ENGLISH_MONTH_NAMES.index(name) + 1
except ValueError:
return None
def month_by_abbreviation(abbrev):
""" Return the number of a month by (locale-independently) English
abbreviations """
try:
return [s[:3] for s in ENGLISH_MONTH_NAMES].index(abbrev) + 1
except ValueError:
return None
def fix_xml_ampersands(xml_str):
"""Replace all the '&' by '&' in XML"""
return re.sub(
r'&(?!amp;|lt;|gt;|apos;|quot;|#x[0-9a-fA-F]{,4};|#[0-9]{,4};)',
'&',
xml_str)
def setproctitle(title):
assert isinstance(title, compat_str)
try:
libc = ctypes.cdll.LoadLibrary("libc.so.6")
except OSError:
return
title_bytes = title.encode('utf-8')
buf = ctypes.create_string_buffer(len(title_bytes))
buf.value = title_bytes
try:
libc.prctl(15, buf, 0, 0, 0)
except AttributeError:
return # Strange libc, just skip this
def remove_start(s, start):
if s.startswith(start):
return s[len(start):]
return s
def remove_end(s, end):
if s.endswith(end):
return s[:-len(end)]
return s
def url_basename(url):
path = compat_urlparse.urlparse(url).path
return path.strip('/').split('/')[-1]
class HEADRequest(compat_urllib_request.Request):
def get_method(self):
return "HEAD"
def int_or_none(v, scale=1, default=None, get_attr=None, invscale=1):
if get_attr:
if v is not None:
v = getattr(v, get_attr, None)
if v == '':
v = None
return default if v is None else (int(v) * invscale // scale)
def str_or_none(v, default=None):
return default if v is None else compat_str(v)
def str_to_int(int_str):
""" A more relaxed version of int_or_none """
if int_str is None:
return None
int_str = re.sub(r'[,\.\+]', '', int_str)
return int(int_str)
def float_or_none(v, scale=1, invscale=1, default=None):
return default if v is None else (float(v) * invscale / scale)
def parse_duration(s):
if not isinstance(s, compat_basestring):
return None
s = s.strip()
m = re.match(
r'''(?ix)(?:P?T)?
(?:
(?P<only_mins>[0-9.]+)\s*(?:mins?\.?|minutes?)\s*|
(?P<only_hours>[0-9.]+)\s*(?:hours?)|
\s*(?P<hours_reversed>[0-9]+)\s*(?:[:h]|hours?)\s*(?P<mins_reversed>[0-9]+)\s*(?:[:m]|mins?\.?|minutes?)\s*|
(?:
(?:
(?:(?P<days>[0-9]+)\s*(?:[:d]|days?)\s*)?
(?P<hours>[0-9]+)\s*(?:[:h]|hours?)\s*
)?
(?P<mins>[0-9]+)\s*(?:[:m]|mins?|minutes?)\s*
)?
(?P<secs>[0-9]+)(?P<ms>\.[0-9]+)?\s*(?:s|secs?|seconds?)?
)$''', s)
if not m:
return None
res = 0
if m.group('only_mins'):
return float_or_none(m.group('only_mins'), invscale=60)
if m.group('only_hours'):
return float_or_none(m.group('only_hours'), invscale=60 * 60)
if m.group('secs'):
res += int(m.group('secs'))
if m.group('mins_reversed'):
res += int(m.group('mins_reversed')) * 60
if m.group('mins'):
res += int(m.group('mins')) * 60
if m.group('hours'):
res += int(m.group('hours')) * 60 * 60
if m.group('hours_reversed'):
res += int(m.group('hours_reversed')) * 60 * 60
if m.group('days'):
res += int(m.group('days')) * 24 * 60 * 60
if m.group('ms'):
res += float(m.group('ms'))
return res
def prepend_extension(filename, ext, expected_real_ext=None):
name, real_ext = os.path.splitext(filename)
return (
'{0}.{1}{2}'.format(name, ext, real_ext)
if not expected_real_ext or real_ext[1:] == expected_real_ext
else '{0}.{1}'.format(filename, ext))
def replace_extension(filename, ext, expected_real_ext=None):
name, real_ext = os.path.splitext(filename)
return '{0}.{1}'.format(
name if not expected_real_ext or real_ext[1:] == expected_real_ext else filename,
ext)
def check_executable(exe, args=[]):
""" Checks if the given binary is installed somewhere in PATH, and returns its name.
args can be a list of arguments for a short output (like -version) """
try:
subprocess.Popen([exe] + args, stdout=subprocess.PIPE, stderr=subprocess.PIPE).communicate()
except OSError:
return False
return exe
def get_exe_version(exe, args=['--version'],
version_re=None, unrecognized='present'):
""" Returns the version of the specified executable,
or False if the executable is not present """
try:
out, _ = subprocess.Popen(
[encodeArgument(exe)] + args,
stdout=subprocess.PIPE, stderr=subprocess.STDOUT).communicate()
except OSError:
return False
if isinstance(out, bytes): # Python 2.x
out = out.decode('ascii', 'ignore')
return detect_exe_version(out, version_re, unrecognized)
def detect_exe_version(output, version_re=None, unrecognized='present'):
assert isinstance(output, compat_str)
if version_re is None:
version_re = r'version\s+([-0-9._a-zA-Z]+)'
m = re.search(version_re, output)
if m:
return m.group(1)
else:
return unrecognized
class PagedList(object):
def __len__(self):
# This is only useful for tests
return len(self.getslice())
class OnDemandPagedList(PagedList):
def __init__(self, pagefunc, pagesize):
self._pagefunc = pagefunc
self._pagesize = pagesize
def getslice(self, start=0, end=None):
res = []
for pagenum in itertools.count(start // self._pagesize):
firstid = pagenum * self._pagesize
nextfirstid = pagenum * self._pagesize + self._pagesize
if start >= nextfirstid:
continue
page_results = list(self._pagefunc(pagenum))
startv = (
start % self._pagesize
if firstid <= start < nextfirstid
else 0)
endv = (
((end - 1) % self._pagesize) + 1
if (end is not None and firstid <= end <= nextfirstid)
else None)
if startv != 0 or endv is not None:
page_results = page_results[startv:endv]
res.extend(page_results)
# A little optimization - if current page is not "full", ie. does
# not contain page_size videos then we can assume that this page
# is the last one - there are no more ids on further pages -
# i.e. no need to query again.
if len(page_results) + startv < self._pagesize:
break
# If we got the whole page, but the next page is not interesting,
# break out early as well
if end == nextfirstid:
break
return res
class InAdvancePagedList(PagedList):
def __init__(self, pagefunc, pagecount, pagesize):
self._pagefunc = pagefunc
self._pagecount = pagecount
self._pagesize = pagesize
def getslice(self, start=0, end=None):
res = []
start_page = start // self._pagesize
end_page = (
self._pagecount if end is None else (end // self._pagesize + 1))
skip_elems = start - start_page * self._pagesize
only_more = None if end is None else end - start
for pagenum in range(start_page, end_page):
page = list(self._pagefunc(pagenum))
if skip_elems:
page = page[skip_elems:]
skip_elems = None
if only_more is not None:
if len(page) < only_more:
only_more -= len(page)
else:
page = page[:only_more]
res.extend(page)
break
res.extend(page)
return res
def uppercase_escape(s):
unicode_escape = codecs.getdecoder('unicode_escape')
return re.sub(
r'\\U[0-9a-fA-F]{8}',
lambda m: unicode_escape(m.group(0))[0],
s)
def lowercase_escape(s):
unicode_escape = codecs.getdecoder('unicode_escape')
return re.sub(
r'\\u[0-9a-fA-F]{4}',
lambda m: unicode_escape(m.group(0))[0],
s)
def escape_rfc3986(s):
"""Escape non-ASCII characters as suggested by RFC 3986"""
if sys.version_info < (3, 0) and isinstance(s, compat_str):
s = s.encode('utf-8')
return compat_urllib_parse.quote(s, b"%/;:@&=+$,!~*'()?#[]")
def escape_url(url):
"""Escape URL as suggested by RFC 3986"""
url_parsed = compat_urllib_parse_urlparse(url)
return url_parsed._replace(
path=escape_rfc3986(url_parsed.path),
params=escape_rfc3986(url_parsed.params),
query=escape_rfc3986(url_parsed.query),
fragment=escape_rfc3986(url_parsed.fragment)
).geturl()
try:
struct.pack('!I', 0)
except TypeError:
# In Python 2.6 (and some 2.7 versions), struct requires a bytes argument
def struct_pack(spec, *args):
if isinstance(spec, compat_str):
spec = spec.encode('ascii')
return struct.pack(spec, *args)
def struct_unpack(spec, *args):
if isinstance(spec, compat_str):
spec = spec.encode('ascii')
return struct.unpack(spec, *args)
else:
struct_pack = struct.pack
struct_unpack = struct.unpack
def read_batch_urls(batch_fd):
def fixup(url):
if not isinstance(url, compat_str):
url = url.decode('utf-8', 'replace')
BOM_UTF8 = '\xef\xbb\xbf'
if url.startswith(BOM_UTF8):
url = url[len(BOM_UTF8):]
url = url.strip()
if url.startswith(('#', ';', ']')):
return False
return url
with contextlib.closing(batch_fd) as fd:
return [url for url in map(fixup, fd) if url]
def urlencode_postdata(*args, **kargs):
return compat_urllib_parse.urlencode(*args, **kargs).encode('ascii')
try:
etree_iter = xml.etree.ElementTree.Element.iter
except AttributeError: # Python <=2.6
etree_iter = lambda n: n.findall('.//*')
def parse_xml(s):
class TreeBuilder(xml.etree.ElementTree.TreeBuilder):
def doctype(self, name, pubid, system):
pass # Ignore doctypes
parser = xml.etree.ElementTree.XMLParser(target=TreeBuilder())
kwargs = {'parser': parser} if sys.version_info >= (2, 7) else {}
tree = xml.etree.ElementTree.XML(s.encode('utf-8'), **kwargs)
# Fix up XML parser in Python 2.x
if sys.version_info < (3, 0):
for n in etree_iter(tree):
if n.text is not None:
if not isinstance(n.text, compat_str):
n.text = n.text.decode('utf-8')
return tree
US_RATINGS = {
'G': 0,
'PG': 10,
'PG-13': 13,
'R': 16,
'NC': 18,
}
def parse_age_limit(s):
if s is None:
return None
m = re.match(r'^(?P<age>\d{1,2})\+?$', s)
return int(m.group('age')) if m else US_RATINGS.get(s, None)
def strip_jsonp(code):
return re.sub(
r'(?s)^[a-zA-Z0-9_]+\s*\(\s*(.*)\);?\s*?(?://[^\n]*)*$', r'\1', code)
def js_to_json(code):
def fix_kv(m):
v = m.group(0)
if v in ('true', 'false', 'null'):
return v
if v.startswith('"'):
return v
if v.startswith("'"):
v = v[1:-1]
v = re.sub(r"\\\\|\\'|\"", lambda m: {
'\\\\': '\\\\',
"\\'": "'",
'"': '\\"',
}[m.group(0)], v)
return '"%s"' % v
res = re.sub(r'''(?x)
"(?:[^"\\]*(?:\\\\|\\['"nu]))*[^"\\]*"|
'(?:[^'\\]*(?:\\\\|\\['"nu]))*[^'\\]*'|
[a-zA-Z_][.a-zA-Z_0-9]*
''', fix_kv, code)
res = re.sub(r',(\s*[\]}])', lambda m: m.group(1), res)
return res
def qualities(quality_ids):
""" Get a numeric quality value out of a list of possible values """
def q(qid):
try:
return quality_ids.index(qid)
except ValueError:
return -1
return q
DEFAULT_OUTTMPL = '%(title)s-%(id)s.%(ext)s'
def limit_length(s, length):
""" Add ellipses to overly long strings """
if s is None:
return None
ELLIPSES = '...'
if len(s) > length:
return s[:length - len(ELLIPSES)] + ELLIPSES
return s
def version_tuple(v):
return tuple(int(e) for e in re.split(r'[-.]', v))
def is_outdated_version(version, limit, assume_new=True):
if not version:
return not assume_new
try:
return version_tuple(version) < version_tuple(limit)
except ValueError:
return not assume_new
def ytdl_is_updateable():
""" Returns if youtube-dl can be updated with -U """
from zipimport import zipimporter
return isinstance(globals().get('__loader__'), zipimporter) or hasattr(sys, 'frozen')
def args_to_str(args):
# Get a short string representation for a subprocess command
return ' '.join(shlex_quote(a) for a in args)
def mimetype2ext(mt):
_, _, res = mt.rpartition('/')
return {
'x-ms-wmv': 'wmv',
'x-mp4-fragmented': 'mp4',
'ttml+xml': 'ttml',
}.get(res, res)
def urlhandle_detect_ext(url_handle):
try:
url_handle.headers
getheader = lambda h: url_handle.headers[h]
except AttributeError: # Python < 3
getheader = url_handle.info().getheader
cd = getheader('Content-Disposition')
if cd:
m = re.match(r'attachment;\s*filename="(?P<filename>[^"]+)"', cd)
if m:
e = determine_ext(m.group('filename'), default_ext=None)
if e:
return e
return mimetype2ext(getheader('Content-Type'))
def age_restricted(content_limit, age_limit):
""" Returns True iff the content should be blocked """
if age_limit is None: # No limit set
return False
if content_limit is None:
return False # Content available for everyone
return age_limit < content_limit
def is_html(first_bytes):
""" Detect whether a file contains HTML by examining its first bytes. """
BOMS = [
(b'\xef\xbb\xbf', 'utf-8'),
(b'\x00\x00\xfe\xff', 'utf-32-be'),
(b'\xff\xfe\x00\x00', 'utf-32-le'),
(b'\xff\xfe', 'utf-16-le'),
(b'\xfe\xff', 'utf-16-be'),
]
for bom, enc in BOMS:
if first_bytes.startswith(bom):
s = first_bytes[len(bom):].decode(enc, 'replace')
break
else:
s = first_bytes.decode('utf-8', 'replace')
return re.match(r'^\s*<', s)
def determine_protocol(info_dict):
protocol = info_dict.get('protocol')
if protocol is not None:
return protocol
url = info_dict['url']
if url.startswith('rtmp'):
return 'rtmp'
elif url.startswith('mms'):
return 'mms'
elif url.startswith('rtsp'):
return 'rtsp'
ext = determine_ext(url)
if ext == 'm3u8':
return 'm3u8'
elif ext == 'f4m':
return 'f4m'
return compat_urllib_parse_urlparse(url).scheme
def render_table(header_row, data):
""" Render a list of rows, each as a list of values """
table = [header_row] + data
max_lens = [max(len(compat_str(v)) for v in col) for col in zip(*table)]
format_str = ' '.join('%-' + compat_str(ml + 1) + 's' for ml in max_lens[:-1]) + '%s'
return '\n'.join(format_str % tuple(row) for row in table)
def _match_one(filter_part, dct):
COMPARISON_OPERATORS = {
'<': operator.lt,
'<=': operator.le,
'>': operator.gt,
'>=': operator.ge,
'=': operator.eq,
'!=': operator.ne,
}
operator_rex = re.compile(r'''(?x)\s*
(?P<key>[a-z_]+)
\s*(?P<op>%s)(?P<none_inclusive>\s*\?)?\s*
(?:
(?P<intval>[0-9.]+(?:[kKmMgGtTpPeEzZyY]i?[Bb]?)?)|
(?P<strval>(?![0-9.])[a-z0-9A-Z]*)
)
\s*$
''' % '|'.join(map(re.escape, COMPARISON_OPERATORS.keys())))
m = operator_rex.search(filter_part)
if m:
op = COMPARISON_OPERATORS[m.group('op')]
if m.group('strval') is not None:
if m.group('op') not in ('=', '!='):
raise ValueError(
'Operator %s does not support string values!' % m.group('op'))
comparison_value = m.group('strval')
else:
try:
comparison_value = int(m.group('intval'))
except ValueError:
comparison_value = parse_filesize(m.group('intval'))
if comparison_value is None:
comparison_value = parse_filesize(m.group('intval') + 'B')
if comparison_value is None:
raise ValueError(
'Invalid integer value %r in filter part %r' % (
m.group('intval'), filter_part))
actual_value = dct.get(m.group('key'))
if actual_value is None:
return m.group('none_inclusive')
return op(actual_value, comparison_value)
UNARY_OPERATORS = {
'': lambda v: v is not None,
'!': lambda v: v is None,
}
operator_rex = re.compile(r'''(?x)\s*
(?P<op>%s)\s*(?P<key>[a-z_]+)
\s*$
''' % '|'.join(map(re.escape, UNARY_OPERATORS.keys())))
m = operator_rex.search(filter_part)
if m:
op = UNARY_OPERATORS[m.group('op')]
actual_value = dct.get(m.group('key'))
return op(actual_value)
raise ValueError('Invalid filter part %r' % filter_part)
def match_str(filter_str, dct):
""" Filter a dictionary with a simple string syntax. Returns True (=passes filter) or false """
return all(
_match_one(filter_part, dct) for filter_part in filter_str.split('&'))
def match_filter_func(filter_str):
def _match_func(info_dict):
if match_str(filter_str, info_dict):
return None
else:
video_title = info_dict.get('title', info_dict.get('id', 'video'))
return '%s does not pass filter %s, skipping ..' % (video_title, filter_str)
return _match_func
def parse_dfxp_time_expr(time_expr):
if not time_expr:
return 0.0
mobj = re.match(r'^(?P<time_offset>\d+(?:\.\d+)?)s?$', time_expr)
if mobj:
return float(mobj.group('time_offset'))
mobj = re.match(r'^(\d+):(\d\d):(\d\d(?:\.\d+)?)$', time_expr)
if mobj:
return 3600 * int(mobj.group(1)) + 60 * int(mobj.group(2)) + float(mobj.group(3))
def srt_subtitles_timecode(seconds):
return '%02d:%02d:%02d,%03d' % (seconds / 3600, (seconds % 3600) / 60, seconds % 60, (seconds % 1) * 1000)
def dfxp2srt(dfxp_data):
_x = functools.partial(xpath_with_ns, ns_map={
'ttml': 'http://www.w3.org/ns/ttml',
'ttaf1': 'http://www.w3.org/2006/10/ttaf1',
})
def parse_node(node):
str_or_empty = functools.partial(str_or_none, default='')
out = str_or_empty(node.text)
for child in node:
if child.tag in (_x('ttml:br'), _x('ttaf1:br'), 'br'):
out += '\n' + str_or_empty(child.tail)
elif child.tag in (_x('ttml:span'), _x('ttaf1:span'), 'span'):
out += str_or_empty(parse_node(child))
else:
out += str_or_empty(xml.etree.ElementTree.tostring(child))
return out
dfxp = xml.etree.ElementTree.fromstring(dfxp_data.encode('utf-8'))
out = []
paras = dfxp.findall(_x('.//ttml:p')) or dfxp.findall(_x('.//ttaf1:p')) or dfxp.findall('.//p')
if not paras:
raise ValueError('Invalid dfxp/TTML subtitle')
for para, index in zip(paras, itertools.count(1)):
begin_time = parse_dfxp_time_expr(para.attrib['begin'])
end_time = parse_dfxp_time_expr(para.attrib.get('end'))
if not end_time:
end_time = begin_time + parse_dfxp_time_expr(para.attrib['dur'])
out.append('%d\n%s --> %s\n%s\n\n' % (
index,
srt_subtitles_timecode(begin_time),
srt_subtitles_timecode(end_time),
parse_node(para)))
return ''.join(out)
class ISO639Utils(object):
# See http://www.loc.gov/standards/iso639-2/ISO-639-2_utf-8.txt
_lang_map = {
'aa': 'aar',
'ab': 'abk',
'ae': 'ave',
'af': 'afr',
'ak': 'aka',
'am': 'amh',
'an': 'arg',
'ar': 'ara',
'as': 'asm',
'av': 'ava',
'ay': 'aym',
'az': 'aze',
'ba': 'bak',
'be': 'bel',
'bg': 'bul',
'bh': 'bih',
'bi': 'bis',
'bm': 'bam',
'bn': 'ben',
'bo': 'bod',
'br': 'bre',
'bs': 'bos',
'ca': 'cat',
'ce': 'che',
'ch': 'cha',
'co': 'cos',
'cr': 'cre',
'cs': 'ces',
'cu': 'chu',
'cv': 'chv',
'cy': 'cym',
'da': 'dan',
'de': 'deu',
'dv': 'div',
'dz': 'dzo',
'ee': 'ewe',
'el': 'ell',
'en': 'eng',
'eo': 'epo',
'es': 'spa',
'et': 'est',
'eu': 'eus',
'fa': 'fas',
'ff': 'ful',
'fi': 'fin',
'fj': 'fij',
'fo': 'fao',
'fr': 'fra',
'fy': 'fry',
'ga': 'gle',
'gd': 'gla',
'gl': 'glg',
'gn': 'grn',
'gu': 'guj',
'gv': 'glv',
'ha': 'hau',
'he': 'heb',
'hi': 'hin',
'ho': 'hmo',
'hr': 'hrv',
'ht': 'hat',
'hu': 'hun',
'hy': 'hye',
'hz': 'her',
'ia': 'ina',
'id': 'ind',
'ie': 'ile',
'ig': 'ibo',
'ii': 'iii',
'ik': 'ipk',
'io': 'ido',
'is': 'isl',
'it': 'ita',
'iu': 'iku',
'ja': 'jpn',
'jv': 'jav',
'ka': 'kat',
'kg': 'kon',
'ki': 'kik',
'kj': 'kua',
'kk': 'kaz',
'kl': 'kal',
'km': 'khm',
'kn': 'kan',
'ko': 'kor',
'kr': 'kau',
'ks': 'kas',
'ku': 'kur',
'kv': 'kom',
'kw': 'cor',
'ky': 'kir',
'la': 'lat',
'lb': 'ltz',
'lg': 'lug',
'li': 'lim',
'ln': 'lin',
'lo': 'lao',
'lt': 'lit',
'lu': 'lub',
'lv': 'lav',
'mg': 'mlg',
'mh': 'mah',
'mi': 'mri',
'mk': 'mkd',
'ml': 'mal',
'mn': 'mon',
'mr': 'mar',
'ms': 'msa',
'mt': 'mlt',
'my': 'mya',
'na': 'nau',
'nb': 'nob',
'nd': 'nde',
'ne': 'nep',
'ng': 'ndo',
'nl': 'nld',
'nn': 'nno',
'no': 'nor',
'nr': 'nbl',
'nv': 'nav',
'ny': 'nya',
'oc': 'oci',
'oj': 'oji',
'om': 'orm',
'or': 'ori',
'os': 'oss',
'pa': 'pan',
'pi': 'pli',
'pl': 'pol',
'ps': 'pus',
'pt': 'por',
'qu': 'que',
'rm': 'roh',
'rn': 'run',
'ro': 'ron',
'ru': 'rus',
'rw': 'kin',
'sa': 'san',
'sc': 'srd',
'sd': 'snd',
'se': 'sme',
'sg': 'sag',
'si': 'sin',
'sk': 'slk',
'sl': 'slv',
'sm': 'smo',
'sn': 'sna',
'so': 'som',
'sq': 'sqi',
'sr': 'srp',
'ss': 'ssw',
'st': 'sot',
'su': 'sun',
'sv': 'swe',
'sw': 'swa',
'ta': 'tam',
'te': 'tel',
'tg': 'tgk',
'th': 'tha',
'ti': 'tir',
'tk': 'tuk',
'tl': 'tgl',
'tn': 'tsn',
'to': 'ton',
'tr': 'tur',
'ts': 'tso',
'tt': 'tat',
'tw': 'twi',
'ty': 'tah',
'ug': 'uig',
'uk': 'ukr',
'ur': 'urd',
'uz': 'uzb',
've': 'ven',
'vi': 'vie',
'vo': 'vol',
'wa': 'wln',
'wo': 'wol',
'xh': 'xho',
'yi': 'yid',
'yo': 'yor',
'za': 'zha',
'zh': 'zho',
'zu': 'zul',
}
@classmethod
def short2long(cls, code):
"""Convert language code from ISO 639-1 to ISO 639-2/T"""
return cls._lang_map.get(code[:2])
@classmethod
def long2short(cls, code):
"""Convert language code from ISO 639-2/T to ISO 639-1"""
for short_name, long_name in cls._lang_map.items():
if long_name == code:
return short_name
class ISO3166Utils(object):
# From http://data.okfn.org/data/core/country-list
_country_map = {
'AF': 'Afghanistan',
'AX': 'Åland Islands',
'AL': 'Albania',
'DZ': 'Algeria',
'AS': 'American Samoa',
'AD': 'Andorra',
'AO': 'Angola',
'AI': 'Anguilla',
'AQ': 'Antarctica',
'AG': 'Antigua and Barbuda',
'AR': 'Argentina',
'AM': 'Armenia',
'AW': 'Aruba',
'AU': 'Australia',
'AT': 'Austria',
'AZ': 'Azerbaijan',
'BS': 'Bahamas',
'BH': 'Bahrain',
'BD': 'Bangladesh',
'BB': 'Barbados',
'BY': 'Belarus',
'BE': 'Belgium',
'BZ': 'Belize',
'BJ': 'Benin',
'BM': 'Bermuda',
'BT': 'Bhutan',
'BO': 'Bolivia, Plurinational State of',
'BQ': 'Bonaire, Sint Eustatius and Saba',
'BA': 'Bosnia and Herzegovina',
'BW': 'Botswana',
'BV': 'Bouvet Island',
'BR': 'Brazil',
'IO': 'British Indian Ocean Territory',
'BN': 'Brunei Darussalam',
'BG': 'Bulgaria',
'BF': 'Burkina Faso',
'BI': 'Burundi',
'KH': 'Cambodia',
'CM': 'Cameroon',
'CA': 'Canada',
'CV': 'Cape Verde',
'KY': 'Cayman Islands',
'CF': 'Central African Republic',
'TD': 'Chad',
'CL': 'Chile',
'CN': 'China',
'CX': 'Christmas Island',
'CC': 'Cocos (Keeling) Islands',
'CO': 'Colombia',
'KM': 'Comoros',
'CG': 'Congo',
'CD': 'Congo, the Democratic Republic of the',
'CK': 'Cook Islands',
'CR': 'Costa Rica',
'CI': 'Côte d\'Ivoire',
'HR': 'Croatia',
'CU': 'Cuba',
'CW': 'Curaçao',
'CY': 'Cyprus',
'CZ': 'Czech Republic',
'DK': 'Denmark',
'DJ': 'Djibouti',
'DM': 'Dominica',
'DO': 'Dominican Republic',
'EC': 'Ecuador',
'EG': 'Egypt',
'SV': 'El Salvador',
'GQ': 'Equatorial Guinea',
'ER': 'Eritrea',
'EE': 'Estonia',
'ET': 'Ethiopia',
'FK': 'Falkland Islands (Malvinas)',
'FO': 'Faroe Islands',
'FJ': 'Fiji',
'FI': 'Finland',
'FR': 'France',
'GF': 'French Guiana',
'PF': 'French Polynesia',
'TF': 'French Southern Territories',
'GA': 'Gabon',
'GM': 'Gambia',
'GE': 'Georgia',
'DE': 'Germany',
'GH': 'Ghana',
'GI': 'Gibraltar',
'GR': 'Greece',
'GL': 'Greenland',
'GD': 'Grenada',
'GP': 'Guadeloupe',
'GU': 'Guam',
'GT': 'Guatemala',
'GG': 'Guernsey',
'GN': 'Guinea',
'GW': 'Guinea-Bissau',
'GY': 'Guyana',
'HT': 'Haiti',
'HM': 'Heard Island and McDonald Islands',
'VA': 'Holy See (Vatican City State)',
'HN': 'Honduras',
'HK': 'Hong Kong',
'HU': 'Hungary',
'IS': 'Iceland',
'IN': 'India',
'ID': 'Indonesia',
'IR': 'Iran, Islamic Republic of',
'IQ': 'Iraq',
'IE': 'Ireland',
'IM': 'Isle of Man',
'IL': 'Israel',
'IT': 'Italy',
'JM': 'Jamaica',
'JP': 'Japan',
'JE': 'Jersey',
'JO': 'Jordan',
'KZ': 'Kazakhstan',
'KE': 'Kenya',
'KI': 'Kiribati',
'KP': 'Korea, Democratic People\'s Republic of',
'KR': 'Korea, Republic of',
'KW': 'Kuwait',
'KG': 'Kyrgyzstan',
'LA': 'Lao People\'s Democratic Republic',
'LV': 'Latvia',
'LB': 'Lebanon',
'LS': 'Lesotho',
'LR': 'Liberia',
'LY': 'Libya',
'LI': 'Liechtenstein',
'LT': 'Lithuania',
'LU': 'Luxembourg',
'MO': 'Macao',
'MK': 'Macedonia, the Former Yugoslav Republic of',
'MG': 'Madagascar',
'MW': 'Malawi',
'MY': 'Malaysia',
'MV': 'Maldives',
'ML': 'Mali',
'MT': 'Malta',
'MH': 'Marshall Islands',
'MQ': 'Martinique',
'MR': 'Mauritania',
'MU': 'Mauritius',
'YT': 'Mayotte',
'MX': 'Mexico',
'FM': 'Micronesia, Federated States of',
'MD': 'Moldova, Republic of',
'MC': 'Monaco',
'MN': 'Mongolia',
'ME': 'Montenegro',
'MS': 'Montserrat',
'MA': 'Morocco',
'MZ': 'Mozambique',
'MM': 'Myanmar',
'NA': 'Namibia',
'NR': 'Nauru',
'NP': 'Nepal',
'NL': 'Netherlands',
'NC': 'New Caledonia',
'NZ': 'New Zealand',
'NI': 'Nicaragua',
'NE': 'Niger',
'NG': 'Nigeria',
'NU': 'Niue',
'NF': 'Norfolk Island',
'MP': 'Northern Mariana Islands',
'NO': 'Norway',
'OM': 'Oman',
'PK': 'Pakistan',
'PW': 'Palau',
'PS': 'Palestine, State of',
'PA': 'Panama',
'PG': 'Papua New Guinea',
'PY': 'Paraguay',
'PE': 'Peru',
'PH': 'Philippines',
'PN': 'Pitcairn',
'PL': 'Poland',
'PT': 'Portugal',
'PR': 'Puerto Rico',
'QA': 'Qatar',
'RE': 'Réunion',
'RO': 'Romania',
'RU': 'Russian Federation',
'RW': 'Rwanda',
'BL': 'Saint Barthélemy',
'SH': 'Saint Helena, Ascension and Tristan da Cunha',
'KN': 'Saint Kitts and Nevis',
'LC': 'Saint Lucia',
'MF': 'Saint Martin (French part)',
'PM': 'Saint Pierre and Miquelon',
'VC': 'Saint Vincent and the Grenadines',
'WS': 'Samoa',
'SM': 'San Marino',
'ST': 'Sao Tome and Principe',
'SA': 'Saudi Arabia',
'SN': 'Senegal',
'RS': 'Serbia',
'SC': 'Seychelles',
'SL': 'Sierra Leone',
'SG': 'Singapore',
'SX': 'Sint Maarten (Dutch part)',
'SK': 'Slovakia',
'SI': 'Slovenia',
'SB': 'Solomon Islands',
'SO': 'Somalia',
'ZA': 'South Africa',
'GS': 'South Georgia and the South Sandwich Islands',
'SS': 'South Sudan',
'ES': 'Spain',
'LK': 'Sri Lanka',
'SD': 'Sudan',
'SR': 'Suriname',
'SJ': 'Svalbard and Jan Mayen',
'SZ': 'Swaziland',
'SE': 'Sweden',
'CH': 'Switzerland',
'SY': 'Syrian Arab Republic',
'TW': 'Taiwan, Province of China',
'TJ': 'Tajikistan',
'TZ': 'Tanzania, United Republic of',
'TH': 'Thailand',
'TL': 'Timor-Leste',
'TG': 'Togo',
'TK': 'Tokelau',
'TO': 'Tonga',
'TT': 'Trinidad and Tobago',
'TN': 'Tunisia',
'TR': 'Turkey',
'TM': 'Turkmenistan',
'TC': 'Turks and Caicos Islands',
'TV': 'Tuvalu',
'UG': 'Uganda',
'UA': 'Ukraine',
'AE': 'United Arab Emirates',
'GB': 'United Kingdom',
'US': 'United States',
'UM': 'United States Minor Outlying Islands',
'UY': 'Uruguay',
'UZ': 'Uzbekistan',
'VU': 'Vanuatu',
'VE': 'Venezuela, Bolivarian Republic of',
'VN': 'Viet Nam',
'VG': 'Virgin Islands, British',
'VI': 'Virgin Islands, U.S.',
'WF': 'Wallis and Futuna',
'EH': 'Western Sahara',
'YE': 'Yemen',
'ZM': 'Zambia',
'ZW': 'Zimbabwe',
}
@classmethod
def short2full(cls, code):
"""Convert an ISO 3166-2 country code to the corresponding full name"""
return cls._country_map.get(code.upper())
class PerRequestProxyHandler(compat_urllib_request.ProxyHandler):
def __init__(self, proxies=None):
# Set default handlers
for type in ('http', 'https'):
setattr(self, '%s_open' % type,
lambda r, proxy='__noproxy__', type=type, meth=self.proxy_open:
meth(r, proxy, type))
return compat_urllib_request.ProxyHandler.__init__(self, proxies)
def proxy_open(self, req, proxy, type):
req_proxy = req.headers.get('Ytdl-request-proxy')
if req_proxy is not None:
proxy = req_proxy
del req.headers['Ytdl-request-proxy']
if proxy == '__noproxy__':
return None # No Proxy
return compat_urllib_request.ProxyHandler.proxy_open(
self, req, proxy, type)
| unlicense |
jmartinm/inspire-next | inspire/testsuite/test_workflows.py | 1 | 11909 | # -*- coding: utf-8 -*-
#
# This file is part of INSPIRE.
# Copyright (C) 2014, 2015 CERN.
#
# INSPIRE is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# INSPIRE is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with INSPIRE; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""Tests for workflows."""
from __future__ import print_function, absolute_import
import httpretty
import os
import pkg_resources
import tempfile
from invenio.celery import celery
from invenio.testsuite import make_test_suite, run_test_suite
from .helpers import WorkflowTasksTestCase
class WorkflowTest(WorkflowTasksTestCase):
"""Test the Payload class.
The Payload class, derived from the Deposition class representing a user
submission, can be used to represent the result of harvesting a record in
a workflow.
These two classes share their data model and have similar APIs.
"""
def setUp(self):
"""Setup tests."""
from invenio_knowledge.api import add_kb
from inspire.modules.workflows.receivers import precache_holdingpen_row
from invenio_workflows.receivers import index_holdingpen_record
from invenio_workflows.signals import (
workflow_halted,
workflow_object_saved
)
# Disable the holdingpen caching receiver.
workflow_halted.disconnect(precache_holdingpen_row)
workflow_object_saved.disconnect(index_holdingpen_record)
self.create_registries()
self.record_oai_arxiv_plots = pkg_resources.resource_string(
'inspire.testsuite',
os.path.join(
'workflows',
'fixtures',
'oai_arxiv_record_with_plots.xml'
)
)
self.some_record = pkg_resources.resource_string(
'inspire.testsuite',
os.path.join(
'workflows',
'fixtures',
'some_record.xml'
)
)
self.arxiv_tarball = pkg_resources.resource_stream(
'inspire.testsuite',
os.path.join(
'workflows',
'fixtures',
'1407.7587v1'
)
)
self.arxiv_pdf = pkg_resources.resource_stream(
'inspire.testsuite',
os.path.join(
'workflows',
'fixtures',
'1407.7587v1.pdf'
)
)
# Add temp KB
add_kb('harvesting_fixture_kb')
def tearDown(self):
"""Clean up created objects."""
from invenio_knowledge.api import delete_kb
from invenio_workflows.models import Workflow
from inspire.modules.workflows.receivers import precache_holdingpen_row
from invenio_workflows.receivers import index_holdingpen_record
from invenio_workflows.signals import (
workflow_halted,
workflow_object_saved
)
workflow_halted.connect(precache_holdingpen_row)
workflow_object_saved.connect(index_holdingpen_record)
self.delete_objects(
Workflow.get(Workflow.module_name == 'unit_tests').all())
self.cleanup_registries()
delete_kb('harvesting_fixture_kb')
def test_payload_creation(self):
"""A Payload can be created."""
from invenio_workflows.api import start
from invenio_workflows.engine import WorkflowStatus
workflow = start('payload_fixture',
data=[self.some_record],
module_name="unit_tests")
self.assertEqual(WorkflowStatus.COMPLETED, workflow.status)
self.assertTrue(len(workflow.completed_objects) == 1)
modified_object = workflow.completed_objects[0]
for l in ['files', 'sips', 'type', 'drafts', 'title']:
self.assertIn(l, modified_object.data)
def test_payload_sip_creation(self):
"""A Payload has a sip."""
from invenio_workflows.api import start
from inspire.modules.workflows.models import Payload
workflow = start('payload_fixture',
data=[self.some_record],
module_name="unit_tests")
modified_object = workflow.completed_objects[0]
p = Payload(modified_object)
sip = p.get_latest_sip()
self.assertTrue(sip.metadata)
# self.assertTrue(sip.package)
def test_payload_model_creation(self):
"""A workflow can specify a model to encapsulate behaviour."""
from invenio_workflows.api import start
workflow = start('payload_model_fixture',
data=[self.some_record],
module_name="unit_tests")
modified_object = workflow.completed_objects[0]
p = workflow.workflow_definition.model(modified_object)
sip = p.get_latest_sip()
self.assertTrue(sip.metadata)
# self.assertTrue(sip.package)
def test_payload_file_creation(self):
"""Can add a file to a Payload."""
from invenio_workflows.models import BibWorkflowObject
from inspire.modules.workflows.models import Payload
from inspire.utils.helpers import (
get_file_by_name,
add_file_by_name,
)
obj = BibWorkflowObject.create_object()
obj.save()
obj.data = obj.get_data() # FIXME hack until workflow 2.0
payload = Payload.create(workflow_object=obj, type="payload_fixture")
payload.save()
fd, filename = tempfile.mkstemp()
os.close(fd)
newpath = add_file_by_name(payload, filename)
self.assertTrue(newpath)
self.assertTrue(get_file_by_name(payload,
os.path.basename(filename)))
BibWorkflowObject.delete(obj)
@httpretty.activate
def test_harvesting_workflow_with_match(self):
"""Test an harvesting workflow when the record already exists."""
from invenio_base.globals import cfg
from invenio_workflows.api import start
httpretty.HTTPretty.allow_net_connect = False
httpretty.register_uri(
httpretty.GET,
cfg['WORKFLOWS_MATCH_REMOTE_SERVER_URL'],
body='[1212]',
status=200
)
workflow = start('harvesting_fixture',
data=[self.record_oai_arxiv_plots],
module_name='unit_tests')
# XXX(jacquerie): find a better check
self.assertEqual(workflow.objects, [])
@httpretty.activate
def test_harvesting_workflow_without_match(self):
"""Test a full harvesting workflow."""
from invenio_base.globals import cfg
from invenio_workflows.api import start
from inspire.utils.helpers import (
get_record_from_obj,
)
httpretty.HTTPretty.allow_net_connect = False
httpretty.register_uri(
httpretty.GET,
cfg['WORKFLOWS_MATCH_REMOTE_SERVER_URL'],
body='[]',
status=200
)
httpretty.register_uri(
httpretty.GET,
'http://arxiv.org/e-print/1407.7587',
content_type="application/x-eprint-tar",
body=self.arxiv_tarball.read(),
status=200,
adding_headers={
"Content-Encoding": 'x-gzip',
}
)
httpretty.register_uri(
httpretty.GET,
'http://arxiv.org/pdf/1407.7587.pdf',
content_type="application/pdf",
body=self.arxiv_pdf.read(),
status=200,
)
robotupload_url = os.path.join(
cfg.get("CFG_ROBOTUPLOAD_SUBMISSION_BASEURL"),
"batchuploader/robotupload/insert"
)
httpretty.register_uri(
httpretty.POST,
robotupload_url,
body="[INFO] bibupload batchupload --insert /dummy/file/path\n",
status=200,
)
workflow = start('harvesting_fixture',
data=[self.record_oai_arxiv_plots],
module_name='unit_tests')
# Let's get the record metadata and check contents
obj = workflow.halted_objects[0]
record = get_record_from_obj(obj, workflow)
# Files should have been attached (tarball + pdf)
# self.assertTrue(len(obj.data["files"]) == 2)
# Some plots/files should have been added to FFTs
# self.assertTrue(record.get('fft'))
# A publication note should have been extracted
self.assertTrue(record.get('publication_info'))
# A prediction should have been made
self.assertTrue(obj.get_tasks_results().get("arxiv_guessing"))
record = get_record_from_obj(obj, workflow)
# This one is not yet CORE
self.assertFalse("CORE" in record.get("collections.primary"))
# Now let's resolve it as accepted and continue
obj.remove_action()
obj.extra_data["approved"] = True
obj.extra_data["core"] = True
obj.set_extra_data(obj.extra_data)
obj.save()
workflow = obj.continue_workflow()
record = get_record_from_obj(obj, workflow)
# Now it is CORE
self.assertTrue("CORE" in record.get("collections.primary"))
class AgnosticTest(WorkflowTasksTestCase):
"""Test that the data model can still accept a deposition."""
def setUp(self):
"""Setup tests."""
from invenio_deposit.models import Deposition, DepositionType
from invenio_deposit.registry import deposit_types, \
deposit_default_type
from invenio_deposit.form import WebDepositForm
from invenio_deposit.tasks import prefill_draft, \
prepare_sip
celery.conf['CELERY_ALWAYS_EAGER'] = True
def agnostic_task(obj, eng):
data_model = eng.workflow_definition.model(obj)
sip = data_model.get_latest_sip()
print(sip.metadata)
class DefaultType(DepositionType):
pass
class SimpleRecordTestForm(WebDepositForm):
pass
class DepositModelTest(DepositionType):
"""A test workflow for the model."""
model = Deposition
draft_definitions = {
'default': SimpleRecordTestForm,
}
workflow = [
prefill_draft(draft_id='default'),
prepare_sip(),
agnostic_task,
]
deposit_types.register(DefaultType)
deposit_types.register(DepositModelTest)
deposit_default_type.register(DefaultType)
def teardown(self):
"""Clean up created objects."""
self.cleanup_registries()
def test_agnostic_deposit(self):
"""A deposition still has the same data model."""
from invenio_deposit.models import Deposition
from invenio_ext.login.legacy_user import UserInfo
u = UserInfo(uid=1)
d = Deposition.create(u, type='DepositModelTest')
d.save()
d.run_workflow()
completed_object = d.engine.completed_objects[0]
for l in ['files', 'sips', 'type', 'drafts', 'title']:
self.assertIn(l, completed_object.data)
TEST_SUITE = make_test_suite(AgnosticTest, WorkflowTest)
if __name__ == "__main__":
run_test_suite(TEST_SUITE)
| gpl-2.0 |
kdwink/intellij-community | plugins/hg4idea/testData/bin/hgext/children.py | 93 | 1556 | # Mercurial extension to provide the 'hg children' command
#
# Copyright 2007 by Intevation GmbH <[email protected]>
#
# Author(s):
# Thomas Arendsen Hein <[email protected]>
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
'''command to display child changesets (DEPRECATED)
This extension is deprecated. You should use :hg:`log -r
"children(REV)"` instead.
'''
from mercurial import cmdutil, commands
from mercurial.commands import templateopts
from mercurial.i18n import _
testedwith = 'internal'
def children(ui, repo, file_=None, **opts):
"""show the children of the given or working directory revision
Print the children of the working directory's revisions. If a
revision is given via -r/--rev, the children of that revision will
be printed. If a file argument is given, revision in which the
file was last changed (after the working directory revision or the
argument to --rev if given) is printed.
"""
rev = opts.get('rev')
if file_:
ctx = repo.filectx(file_, changeid=rev)
else:
ctx = repo[rev]
displayer = cmdutil.show_changeset(ui, repo, opts)
for cctx in ctx.children():
displayer.show(cctx)
displayer.close()
cmdtable = {
"children":
(children,
[('r', 'rev', '',
_('show children of the specified revision'), _('REV')),
] + templateopts,
_('hg children [-r REV] [FILE]')),
}
commands.inferrepo += " children"
| apache-2.0 |
batermj/algorithm-challenger | code-analysis/programming_anguage/python/source_codes/Python3.5.9/Python-3.5.9/Lib/tkinter/test/runtktests.py | 9 | 2257 | """
Use this module to get and run all tk tests.
tkinter tests should live in a package inside the directory where this file
lives, like test_tkinter.
Extensions also should live in packages following the same rule as above.
"""
import os
import sys
import unittest
import importlib
import test.support
this_dir_path = os.path.abspath(os.path.dirname(__file__))
def is_package(path):
for name in os.listdir(path):
if name in ('__init__.py', '__init__.pyc'):
return True
return False
def get_tests_modules(basepath=this_dir_path, gui=True, packages=None):
"""This will import and yield modules whose names start with test_
and are inside packages found in the path starting at basepath.
If packages is specified it should contain package names that
want their tests collected.
"""
py_ext = '.py'
for dirpath, dirnames, filenames in os.walk(basepath):
for dirname in list(dirnames):
if dirname[0] == '.':
dirnames.remove(dirname)
if is_package(dirpath) and filenames:
pkg_name = dirpath[len(basepath) + len(os.sep):].replace('/', '.')
if packages and pkg_name not in packages:
continue
filenames = filter(
lambda x: x.startswith('test_') and x.endswith(py_ext),
filenames)
for name in filenames:
try:
yield importlib.import_module(
".%s.%s" % (pkg_name, name[:-len(py_ext)]),
"tkinter.test")
except test.support.ResourceDenied:
if gui:
raise
def get_tests(text=True, gui=True, packages=None):
"""Yield all the tests in the modules found by get_tests_modules.
If nogui is True, only tests that do not require a GUI will be
returned."""
attrs = []
if text:
attrs.append('tests_nogui')
if gui:
attrs.append('tests_gui')
for module in get_tests_modules(gui=gui, packages=packages):
for attr in attrs:
for test in getattr(module, attr, ()):
yield test
if __name__ == "__main__":
test.support.run_unittest(*get_tests())
| apache-2.0 |
barbarahui/harvester | scripts/queue_image_harvest_for_doc_ids.py | 3 | 3728 | # -*- coding: utf-8 -*-
#! /bin/env python
import sys
import os
from harvester.post_processing.couchdb_runner import CouchDBJobEnqueue
from harvester.image_harvest import harvest_image_for_doc
EMAIL_RETURN_ADDRESS = os.environ.get('EMAIL_RETURN_ADDRESS',
'[email protected]')
# csv delim email addresses
EMAIL_SYS_ADMIN = os.environ.get('EMAIL_SYS_ADMINS', None)
IMAGE_HARVEST_TIMEOUT = 144000
def def_args():
import argparse
parser = argparse.ArgumentParser(description='Harvest a collection')
parser.add_argument('user_email', type=str, help='user email')
parser.add_argument('rq_queue', type=str, help='RQ Queue to put job in')
parser.add_argument('--object_auth', nargs='?',
help='HTTP Auth needed to download images - username:password')
parser.add_argument('--url_couchdb', nargs='?',
help='Override url to couchdb')
parser.add_argument('--timeout', nargs='?',
help='set image harvest timeout in sec (14400 - 4hrs default)')
parser.add_argument('doc_ids', type=str,
help='Comma separated CouchDB document ids')
return parser
def main(doc_ids, **kwargs):
enq = CouchDBJobEnqueue(rq_queue=kwargs['rq_queue'])
timeout = 10000
if 'rq_queue' in kwargs:
del kwargs['rq_queue']
if 'timeout' in kwargs:
if type(kwargs['timeout']) == int:
timeout = kwargs['timeout']
del kwargs['timeout']
if 'object_auth' in kwargs:
kwargs['object_auth'] = (kwargs['object_auth'].split(':')[0],
kwargs['object_auth'].split(':')[1])
enq.queue_list_of_ids(doc_ids,
timeout,
harvest_image_for_doc,
force=True,
**kwargs
)
if __name__ == '__main__':
parser = def_args()
args = parser.parse_args(sys.argv[1:])
if not args.rq_queue or not args.doc_ids:
parser.print_help()
sys.exit(27)
id_list = [s for s in args.doc_ids.split(',')]
kwargs = vars(args)
del kwargs['doc_ids']
main(id_list,
**kwargs)
# Copyright © 2017, Regents of the University of California
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# - Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# - Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# - Neither the name of the University of California nor the names of its
# contributors may be used to endorse or promote products derived from this
# software without specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
| bsd-3-clause |
espadrine/opera | chromium/src/tools/telemetry/telemetry/core/chrome/browser_backend.py | 3 | 8702 | # Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import urllib2
import httplib
import socket
import json
import re
import sys
from telemetry.core import util
from telemetry.core import exceptions
from telemetry.core import user_agent
from telemetry.core import wpr_modes
from telemetry.core import wpr_server
from telemetry.core.chrome import extension_dict_backend
from telemetry.core.chrome import tab_list_backend
from telemetry.core.chrome import tracing_backend
from telemetry.core.chrome import misc_web_contents_backend
from telemetry.unittest import options_for_unittests
class ExtensionsNotSupportedException(Exception):
pass
class BrowserBackend(object):
"""A base class for browser backends. Provides basic functionality
once a remote-debugger port has been established."""
WEBPAGEREPLAY_HOST = '127.0.0.1'
def __init__(self, is_content_shell, supports_extensions, options):
self.browser_type = options.browser_type
self.is_content_shell = is_content_shell
self._supports_extensions = supports_extensions
self.options = options
self._browser = None
self._port = None
self._inspector_protocol_version = 0
self._chrome_branch_number = 0
self._tracing_backend = None
self.webpagereplay_local_http_port = util.GetAvailableLocalPort()
self.webpagereplay_local_https_port = util.GetAvailableLocalPort()
self.webpagereplay_remote_http_port = self.webpagereplay_local_http_port
self.webpagereplay_remote_https_port = self.webpagereplay_local_https_port
if options.dont_override_profile and not options_for_unittests.AreSet():
sys.stderr.write('Warning: Not overriding profile. This can cause '
'unexpected effects due to profile-specific settings, '
'such as about:flags settings, cookies, and '
'extensions.\n')
self._misc_web_contents_backend = (
misc_web_contents_backend.MiscWebContentsBackend(self))
self._tab_list_backend = tab_list_backend.TabListBackend(self)
self._extension_dict_backend = None
if supports_extensions:
self._extension_dict_backend = (
extension_dict_backend.ExtensionDictBackend(self))
def SetBrowser(self, browser):
self._browser = browser
self._tab_list_backend.Init()
@property
def browser(self):
return self._browser
@property
def supports_extensions(self):
"""True if this browser backend supports extensions."""
return self._supports_extensions
@property
def misc_web_contents_backend(self):
"""Access to chrome://oobe/login page which is neither a tab nor an
extension."""
return self._misc_web_contents_backend
@property
def tab_list_backend(self):
return self._tab_list_backend
@property
def extension_dict_backend(self):
return self._extension_dict_backend
def GetBrowserStartupArgs(self):
args = []
args.extend(self.options.extra_browser_args)
args.append('--disable-background-networking')
args.append('--metrics-recording-only')
args.append('--no-first-run')
if self.options.wpr_mode != wpr_modes.WPR_OFF:
args.extend(wpr_server.GetChromeFlags(
self.WEBPAGEREPLAY_HOST,
self.webpagereplay_remote_http_port,
self.webpagereplay_remote_https_port))
args.extend(user_agent.GetChromeUserAgentArgumentFromType(
self.options.browser_user_agent_type))
extensions = [extension.local_path for extension in
self.options.extensions_to_load if not extension.is_component]
extension_str = ','.join(extensions)
if len(extensions) > 0:
args.append('--load-extension=%s' % extension_str)
component_extensions = [extension.local_path for extension in
self.options.extensions_to_load if extension.is_component]
component_extension_str = ','.join(component_extensions)
if len(component_extensions) > 0:
args.append('--load-component-extension=%s' % component_extension_str)
if self.options.no_proxy_server:
args.append('--no-proxy-server')
return args
@property
def wpr_mode(self):
return self.options.wpr_mode
def _WaitForBrowserToComeUp(self, timeout=None):
def IsBrowserUp():
try:
self.Request('', timeout=timeout)
except (socket.error, httplib.BadStatusLine, urllib2.URLError):
return False
else:
return True
try:
util.WaitFor(IsBrowserUp, timeout=30)
except util.TimeoutException:
raise exceptions.BrowserGoneException()
def AllExtensionsLoaded():
# Extension pages are loaded from an about:blank page,
# so we need to check that the document URL is the extension
# page in addition to the ready state.
extension_ready_js = """
document.URL.lastIndexOf('chrome-extension://%s/', 0) == 0 &&
(document.readyState == 'complete' ||
document.readyState == 'interactive')
"""
for e in self.options.extensions_to_load:
if not e.extension_id in self._extension_dict_backend:
return False
extension_object = self._extension_dict_backend[e.extension_id]
res = extension_object.EvaluateJavaScript(
extension_ready_js % e.extension_id)
if not res:
return False
return True
if self._supports_extensions:
util.WaitFor(AllExtensionsLoaded, timeout=30)
def _PostBrowserStartupInitialization(self):
# Detect version information.
data = self.Request('version')
resp = json.loads(data)
if 'Protocol-Version' in resp:
self._inspector_protocol_version = resp['Protocol-Version']
if 'Browser' in resp:
branch_number_match = re.search('Chrome/\d+\.\d+\.(\d+)\.\d+',
resp['Browser'])
else:
branch_number_match = re.search(
'Chrome/\d+\.\d+\.(\d+)\.\d+ (Mobile )?Safari',
resp['User-Agent'])
if branch_number_match:
self._chrome_branch_number = int(branch_number_match.group(1))
else:
# Content Shell returns '' for Browser, for now we have to
# fall-back and assume branch 1025.
self._chrome_branch_number = 1025
return
# Detection has failed: assume 18.0.1025.168 ~= Chrome Android.
self._inspector_protocol_version = 1.0
self._chrome_branch_number = 1025
def Request(self, path, timeout=None):
url = 'http://localhost:%i/json' % self._port
if path:
url += '/' + path
req = urllib2.urlopen(url, timeout=timeout)
return req.read()
@property
def chrome_branch_number(self):
return self._chrome_branch_number
@property
def supports_tab_control(self):
return self._chrome_branch_number >= 1303
@property
def supports_tracing(self):
return self.is_content_shell or self._chrome_branch_number >= 1385
def StartTracing(self, custom_categories=None):
""" custom_categories is an optional string containing a list of
comma separated categories that will be traced instead of the
default category set. Example: use
"webkit,cc,disabled-by-default-cc.debug" to trace only those three
event categories.
"""
if self._tracing_backend is None:
self._tracing_backend = tracing_backend.TracingBackend(self._port)
self._tracing_backend.BeginTracing(custom_categories)
def StopTracing(self):
self._tracing_backend.EndTracing()
def GetTraceResultAndReset(self):
return self._tracing_backend.GetTraceResultAndReset()
def GetProcessName(self, cmd_line):
"""Returns a user-friendly name for the process of the given |cmd_line|."""
if 'nacl_helper_bootstrap' in cmd_line:
return 'nacl_helper_bootstrap'
if ':sandboxed_process' in cmd_line:
return 'renderer'
m = re.match(r'.* --type=([^\s]*) .*', cmd_line)
if not m:
return 'browser'
return m.group(1)
def GetRemotePort(self, _):
return util.GetAvailableLocalPort()
def Close(self):
if self._tracing_backend:
self._tracing_backend.Close()
self._tracing_backend = None
def CreateForwarder(self, *port_pairs):
raise NotImplementedError()
def IsBrowserRunning(self):
raise NotImplementedError()
def GetStandardOutput(self):
raise NotImplementedError()
class DoNothingForwarder(object):
def __init__(self, *port_pairs):
self._host_port = port_pairs[0].local_port
@property
def url(self):
assert self._host_port
return 'http://127.0.0.1:%i' % self._host_port
def Close(self):
self._host_port = None
| bsd-3-clause |
toshywoshy/ansible | lib/ansible/modules/cloud/vmware/vsphere_copy.py | 17 | 7469 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2015, Dag Wieers (@dagwieers) <[email protected]>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {
'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'
}
DOCUMENTATION = '''
---
module: vsphere_copy
short_description: Copy a file to a VMware datastore
description:
- Upload files to a VMware datastore through a vCenter REST API.
version_added: 2.0
author:
- Dag Wieers (@dagwieers)
options:
hostname:
version_added: "2.9"
aliases: ['host']
port:
version_added: "2.9"
username:
version_added: "2.9"
aliases: ['login']
src:
description:
- The file to push to vCenter.
required: true
type: str
datacenter:
description:
- The datacenter on the vCenter server that holds the datastore.
required: false
type: str
datastore:
description:
- The datastore to push files to.
required: true
type: str
path:
description:
- The file to push to the datastore.
required: true
type: str
timeout:
description:
- The timeout in seconds for the upload to the datastore.
default: 10
type: int
version_added: "2.8"
notes:
- "This module ought to be run from a system that can access the vCenter or the ESXi directly and has the file to transfer.
It can be the normal remote target or you can change it either by using C(transport: local) or using C(delegate_to)."
- Tested on vSphere 5.5 and ESXi 6.7
extends_documentation_fragment: vmware.documentation
'''
EXAMPLES = '''
- name: Copy file to datastore using delegate_to
vsphere_copy:
hostname: '{{ vcenter_hostname }}'
username: '{{ vcenter_username }}'
password: '{{ vcenter_password }}'
src: /some/local/file
datacenter: DC1 Someplace
datastore: datastore1
path: some/remote/file
delegate_to: localhost
- name: Copy file to datastore when datacenter is inside folder called devel
vsphere_copy:
hostname: '{{ vcenter_hostname }}'
username: '{{ vcenter_username }}'
password: '{{ vcenter_password }}'
src: /some/local/file
datacenter: devel/DC1
datastore: datastore1
path: some/remote/file
delegate_to: localhost
- name: Copy file to datastore using other_system
vsphere_copy:
hostname: '{{ vcenter_hostname }}'
username: '{{ vcenter_username }}'
password: '{{ vcenter_password }}'
src: /other/local/file
datacenter: DC2 Someplace
datastore: datastore2
path: other/remote/file
delegate_to: other_system
'''
import atexit
import errno
import mmap
import os
import socket
import traceback
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.six.moves.urllib.parse import urlencode, quote
from ansible.module_utils._text import to_native
from ansible.module_utils.urls import open_url
from ansible.module_utils.vmware import vmware_argument_spec
def vmware_path(datastore, datacenter, path):
''' Constructs a URL path that vSphere accepts reliably '''
path = "/folder/%s" % quote(path.lstrip("/"))
# Due to a software bug in vSphere, it fails to handle ampersand in datacenter names
# The solution is to do what vSphere does (when browsing) and double-encode ampersands, maybe others ?
if not path.startswith("/"):
path = "/" + path
params = dict(dsName=datastore)
if datacenter:
datacenter = datacenter.replace('&', '%26')
params["dcPath"] = datacenter
params = urlencode(params)
return "%s?%s" % (path, params)
def main():
argument_spec = vmware_argument_spec()
argument_spec.update(dict(
hostname=dict(required=False, aliases=['host']),
username=dict(required=False, aliases=['login']),
src=dict(required=True, aliases=['name']),
datacenter=dict(required=False),
datastore=dict(required=True),
dest=dict(required=True, aliases=['path']),
timeout=dict(default=10, type='int'))
)
module = AnsibleModule(
argument_spec=argument_spec,
# Implementing check-mode using HEAD is impossible, since size/date is not 100% reliable
supports_check_mode=False,
)
if module.params.get('host'):
module.deprecate("The 'host' option is being replaced by 'hostname'", version='2.12')
if module.params.get('login'):
module.deprecate("The 'login' option is being replaced by 'username'", version='2.12')
hostname = module.params['hostname']
username = module.params['username']
password = module.params.get('password')
src = module.params.get('src')
datacenter = module.params.get('datacenter')
datastore = module.params.get('datastore')
dest = module.params.get('dest')
validate_certs = module.params.get('validate_certs')
timeout = module.params.get('timeout')
try:
fd = open(src, "rb")
atexit.register(fd.close)
except Exception as e:
module.fail_json(msg="Failed to open src file %s" % to_native(e))
if os.stat(src).st_size == 0:
data = ''
else:
data = mmap.mmap(fd.fileno(), 0, access=mmap.ACCESS_READ)
atexit.register(data.close)
remote_path = vmware_path(datastore, datacenter, dest)
if not all([hostname, username, password]):
module.fail_json(msg="One of following parameter is missing - hostname, username, password")
url = 'https://%s%s' % (hostname, remote_path)
headers = {
"Content-Type": "application/octet-stream",
"Content-Length": str(len(data)),
}
try:
r = open_url(url, data=data, headers=headers, method='PUT', timeout=timeout,
url_username=username, url_password=password, validate_certs=validate_certs,
force_basic_auth=True)
except socket.error as e:
if isinstance(e.args, tuple):
if len(e.args) > 0:
if e[0] == errno.ECONNRESET:
# vSphere resets connection if the file is in use and cannot be replaced
module.fail_json(msg='Failed to upload, image probably in use', status=None, errno=e[0], reason=to_native(e), url=url)
else:
module.fail_json(msg=to_native(e))
else:
module.fail_json(msg=str(e), status=None, errno=e[0], reason=str(e),
url=url, exception=traceback.format_exc())
except Exception as e:
error_code = -1
try:
if isinstance(e[0], int):
error_code = e[0]
except (KeyError, TypeError):
pass
module.fail_json(msg=to_native(e), status=None, errno=error_code,
reason=to_native(e), url=url, exception=traceback.format_exc())
status = r.getcode()
if 200 <= status < 300:
module.exit_json(changed=True, status=status, reason=r.msg, url=url)
else:
length = r.headers.get('content-length', None)
if r.headers.get('transfer-encoding', '').lower() == 'chunked':
chunked = 1
else:
chunked = 0
module.fail_json(msg='Failed to upload', errno=None, status=status, reason=r.msg, length=length, headers=dict(r.headers), chunked=chunked, url=url)
if __name__ == '__main__':
main()
| gpl-3.0 |
newville/scikit-image | skimage/filter/__init__.py | 5 | 2789 | from .._shared.utils import skimage_deprecation
from warnings import warn
global _import_warned
warn(skimage_deprecation('The `skimage.filter` module has been renamed '
'to `skimage.filters`. This placeholder module '
'will be removed in v0.13.'))
_import_warned = True
del warn
del skimage_deprecation
from ..filters.lpi_filter import inverse, wiener, LPIFilter2D
from ..filters._gaussian import gaussian_filter
from ..filters.edges import (sobel, hsobel, vsobel, sobel_h, sobel_v,
scharr, hscharr, vscharr, scharr_h, scharr_v,
prewitt, hprewitt, vprewitt, prewitt_h, prewitt_v,
roberts, roberts_positive_diagonal,
roberts_negative_diagonal, roberts_pos_diag,
roberts_neg_diag)
from ..filters._rank_order import rank_order
from ..filters._gabor import gabor_kernel, gabor_filter
from ..filters.thresholding import (threshold_adaptive, threshold_otsu, threshold_yen,
threshold_isodata)
from ..filters import rank
from ..filters.rank import median
from skimage._shared.utils import deprecated
from skimage import restoration
denoise_bilateral = deprecated('skimage.restoration.denoise_bilateral')\
(restoration.denoise_bilateral)
denoise_tv_bregman = deprecated('skimage.restoration.denoise_tv_bregman')\
(restoration.denoise_tv_bregman)
denoise_tv_chambolle = deprecated('skimage.restoration.denoise_tv_chambolle')\
(restoration.denoise_tv_chambolle)
# Backward compatibility v<0.11
@deprecated('skimage.feature.canny')
def canny(*args, **kwargs):
# Hack to avoid circular import
from skimage.feature._canny import canny as canny_
return canny_(*args, **kwargs)
__all__ = ['inverse',
'wiener',
'LPIFilter2D',
'gaussian_filter',
'median',
'canny',
'sobel',
'hsobel',
'vsobel',
'sobel_h',
'sobel_v',
'scharr',
'hscharr',
'vscharr',
'scharr_h',
'scharr_v',
'prewitt',
'hprewitt',
'vprewitt',
'prewitt_h',
'prewitt_v',
'roberts',
'roberts_positive_diagonal',
'roberts_negative_diagonal',
'roberts_pos_diag',
'roberts_neg_diag',
'denoise_tv_chambolle',
'denoise_bilateral',
'denoise_tv_bregman',
'rank_order',
'gabor_kernel',
'gabor_filter',
'threshold_adaptive',
'threshold_otsu',
'threshold_yen',
'threshold_isodata',
'rank']
| bsd-3-clause |
guitarmanj/king-phisher | tests/server/database/manager.py | 4 | 3528 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# tests/server/database/manager.py
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of the project nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
import unittest
from king_phisher import testing
from king_phisher.server.database import manager as db_manager
from king_phisher.server.database import models as db_models
from king_phisher.utilities import random_string
class DatabaseManagerTests(testing.KingPhisherTestCase):
def _init_db(self):
try:
db_manager.init_database('sqlite://')
except Exception as error:
self.fail("failed to initialize the database (error: {0})".format(error.__class__.__name__))
def test_create_database(self):
self._init_db()
def test_get_meta_data(self):
self._init_db()
database_driver = db_manager.get_metadata('database_driver')
self.assertEqual(database_driver, 'sqlite')
schema_version = db_manager.get_metadata('schema_version')
self.assertEqual(schema_version, db_models.SCHEMA_VERSION)
def test_get_row_by_id(self):
self._init_db()
session = db_manager.Session()
user = db_models.User(name='alice')
session.add(user)
campaign_name = random_string(10)
campaign = db_models.Campaign(name=campaign_name, user=user)
session.add(campaign)
session.commit()
self.assertIsNotNone(campaign.id)
campaign_id = campaign.id
del campaign
row = db_manager.get_row_by_id(session, db_models.Campaign, campaign_id)
self.assertEqual(row.id, campaign_id)
self.assertEqual(row.name, campaign_name)
def test_set_meta_data(self):
self._init_db()
# set a new value
key = random_string(10)
value = random_string(20)
db_manager.set_metadata(key, value)
self.assertEqual(db_manager.get_metadata(key), value)
# update an existing value
value = random_string(30)
db_manager.set_metadata(key, value)
self.assertEqual(db_manager.get_metadata(key), value)
def test_models_convert_to_dictionaries(self):
model = db_models.User(name='alice')
dictionary = model.to_dict()
self.assertIsInstance(dictionary, dict)
self.assertIn('name', dictionary)
if __name__ == '__main__':
unittest.main()
| bsd-3-clause |
scith/htpc-manager_ynh | sources/libs/formencode/util/doctest24.py | 19 | 99378 | # Module doctest.
# Released to the public domain 16-Jan-2001, by Tim Peters ([email protected]).
# Major enhancements and refactoring by:
# Jim Fulton
# Edward Loper
# Provided as-is; use at your own risk; no warranty; no promises; enjoy!
r"""Module doctest -- a framework for running examples in docstrings.
In simplest use, end each module M to be tested with:
def _test():
import doctest
doctest.testmod()
if __name__ == "__main__":
_test()
Then running the module as a script will cause the examples in the
docstrings to get executed and verified:
python M.py
This won't display anything unless an example fails, in which case the
failing example(s) and the cause(s) of the failure(s) are printed to stdout
(why not stderr? because stderr is a lame hack <0.2 wink>), and the final
line of output is "Test failed.".
Run it with the -v switch instead:
python M.py -v
and a detailed report of all examples tried is printed to stdout, along
with assorted summaries at the end.
You can force verbose mode by passing "verbose=True" to testmod, or prohibit
it by passing "verbose=False". In either of those cases, sys.argv is not
examined by testmod.
There are a variety of other ways to run doctests, including integration
with the unittest framework, and support for running non-Python text
files containing doctests. There are also many ways to override parts
of doctest's default behaviors. See the Library Reference Manual for
details.
"""
__docformat__ = 'reStructuredText en'
__all__ = [
# 0, Option Flags
'register_optionflag',
'DONT_ACCEPT_TRUE_FOR_1',
'DONT_ACCEPT_BLANKLINE',
'NORMALIZE_WHITESPACE',
'ELLIPSIS',
'IGNORE_EXCEPTION_DETAIL',
'COMPARISON_FLAGS',
'REPORT_UDIFF',
'REPORT_CDIFF',
'REPORT_NDIFF',
'REPORT_ONLY_FIRST_FAILURE',
'REPORTING_FLAGS',
# 1. Utility Functions
'is_private',
# 2. Example & DocTest
'Example',
'DocTest',
# 3. Doctest Parser
'DocTestParser',
# 4. Doctest Finder
'DocTestFinder',
# 5. Doctest Runner
'DocTestRunner',
'OutputChecker',
'DocTestFailure',
'UnexpectedException',
'DebugRunner',
# 6. Test Functions
'testmod',
'testfile',
'run_docstring_examples',
# 7. Tester
'Tester',
# 8. Unittest Support
'DocTestSuite',
'DocFileSuite',
'set_unittest_reportflags',
# 9. Debugging Support
'script_from_examples',
'testsource',
'debug_src',
'debug',
]
import __future__
import sys, traceback, inspect, linecache, os, re, types
import unittest, difflib, pdb, tempfile
import warnings
from StringIO import StringIO
# Don't whine about the deprecated is_private function in this
# module's tests.
warnings.filterwarnings("ignore", "is_private", DeprecationWarning,
__name__, 0)
# There are 4 basic classes:
# - Example: a <source, want> pair, plus an intra-docstring line number.
# - DocTest: a collection of examples, parsed from a docstring, plus
# info about where the docstring came from (name, filename, lineno).
# - DocTestFinder: extracts DocTests from a given object's docstring and
# its contained objects' docstrings.
# - DocTestRunner: runs DocTest cases, and accumulates statistics.
#
# So the basic picture is:
#
# list of:
# +------+ +---------+ +-------+
# |object| --DocTestFinder-> | DocTest | --DocTestRunner-> |results|
# +------+ +---------+ +-------+
# | Example |
# | ... |
# | Example |
# +---------+
# Option constants.
OPTIONFLAGS_BY_NAME = {}
def register_optionflag(name):
flag = 1 << len(OPTIONFLAGS_BY_NAME)
OPTIONFLAGS_BY_NAME[name] = flag
return flag
DONT_ACCEPT_TRUE_FOR_1 = register_optionflag('DONT_ACCEPT_TRUE_FOR_1')
DONT_ACCEPT_BLANKLINE = register_optionflag('DONT_ACCEPT_BLANKLINE')
NORMALIZE_WHITESPACE = register_optionflag('NORMALIZE_WHITESPACE')
ELLIPSIS = register_optionflag('ELLIPSIS')
IGNORE_EXCEPTION_DETAIL = register_optionflag('IGNORE_EXCEPTION_DETAIL')
COMPARISON_FLAGS = (DONT_ACCEPT_TRUE_FOR_1 |
DONT_ACCEPT_BLANKLINE |
NORMALIZE_WHITESPACE |
ELLIPSIS |
IGNORE_EXCEPTION_DETAIL)
REPORT_UDIFF = register_optionflag('REPORT_UDIFF')
REPORT_CDIFF = register_optionflag('REPORT_CDIFF')
REPORT_NDIFF = register_optionflag('REPORT_NDIFF')
REPORT_ONLY_FIRST_FAILURE = register_optionflag('REPORT_ONLY_FIRST_FAILURE')
REPORTING_FLAGS = (REPORT_UDIFF |
REPORT_CDIFF |
REPORT_NDIFF |
REPORT_ONLY_FIRST_FAILURE)
# Special string markers for use in `want` strings:
BLANKLINE_MARKER = '<BLANKLINE>'
ELLIPSIS_MARKER = '...'
######################################################################
## Table of Contents
######################################################################
# 1. Utility Functions
# 2. Example & DocTest -- store test cases
# 3. DocTest Parser -- extracts examples from strings
# 4. DocTest Finder -- extracts test cases from objects
# 5. DocTest Runner -- runs test cases
# 6. Test Functions -- convenient wrappers for testing
# 7. Tester Class -- for backwards compatibility
# 8. Unittest Support
# 9. Debugging Support
# 10. Example Usage
######################################################################
## 1. Utility Functions
######################################################################
def is_private(prefix, base):
"""prefix, base -> true iff name prefix + "." + base is "private".
Prefix may be an empty string, and base does not contain a period.
Prefix is ignored (although functions you write conforming to this
protocol may make use of it).
Return true iff base begins with an (at least one) underscore, but
does not both begin and end with (at least) two underscores.
>>> is_private("a.b", "my_func")
False
>>> is_private("____", "_my_func")
True
>>> is_private("someclass", "__init__")
False
>>> is_private("sometypo", "__init_")
True
>>> is_private("x.y.z", "_")
True
>>> is_private("_x.y.z", "__")
False
>>> is_private("", "") # senseless but consistent
False
"""
warnings.warn("is_private is deprecated; it wasn't useful; "
"examine DocTestFinder.find() lists instead",
DeprecationWarning, stacklevel=2)
return base[:1] == "_" and not base[:2] == "__" == base[-2:]
def _extract_future_flags(globs):
"""
Return the compiler-flags associated with the future features that
have been imported into the given namespace (globs).
"""
flags = 0
for fname in __future__.all_feature_names:
feature = globs.get(fname, None)
if feature is getattr(__future__, fname):
flags |= feature.compiler_flag
return flags
def _normalize_module(module, depth=2):
"""
Return the module specified by `module`. In particular:
- If `module` is a module, then return module.
- If `module` is a string, then import and return the
module with that name.
- If `module` is None, then return the calling module.
The calling module is assumed to be the module of
the stack frame at the given depth in the call stack.
"""
if inspect.ismodule(module):
return module
elif isinstance(module, (str, unicode)):
return __import__(module, globals(), locals(), ["*"])
elif module is None:
return sys.modules[sys._getframe(depth).f_globals['__name__']]
else:
raise TypeError("Expected a module, string, or None")
def _indent(s, indent=4):
"""
Add the given number of space characters to the beginning every
non-blank line in `s`, and return the result.
"""
# This regexp matches the start of non-blank lines:
return re.sub('(?m)^(?!$)', indent*' ', s)
def _exception_traceback(exc_info):
"""
Return a string containing a traceback message for the given
exc_info tuple (as returned by sys.exc_info()).
"""
# Get a traceback message.
excout = StringIO()
exc_type, exc_val, exc_tb = exc_info
traceback.print_exception(exc_type, exc_val, exc_tb, file=excout)
return excout.getvalue()
# Override some StringIO methods.
class _SpoofOut(StringIO):
def getvalue(self):
result = StringIO.getvalue(self)
# If anything at all was written, make sure there's a trailing
# newline. There's no way for the expected output to indicate
# that a trailing newline is missing.
if result and not result.endswith("\n"):
result += "\n"
# Prevent softspace from screwing up the next test case, in
# case they used print with a trailing comma in an example.
if hasattr(self, "softspace"):
del self.softspace
return result
def truncate(self, size=None):
StringIO.truncate(self, size)
if hasattr(self, "softspace"):
del self.softspace
# Worst-case linear-time ellipsis matching.
def _ellipsis_match(want, got):
"""
Essentially the only subtle case:
>>> _ellipsis_match('aa...aa', 'aaa')
False
"""
if ELLIPSIS_MARKER not in want:
return want == got
# Find "the real" strings.
ws = want.split(ELLIPSIS_MARKER)
assert len(ws) >= 2
# Deal with exact matches possibly needed at one or both ends.
startpos, endpos = 0, len(got)
w = ws[0]
if w: # starts with exact match
if got.startswith(w):
startpos = len(w)
del ws[0]
else:
return False
w = ws[-1]
if w: # ends with exact match
if got.endswith(w):
endpos -= len(w)
del ws[-1]
else:
return False
if startpos > endpos:
# Exact end matches required more characters than we have, as in
# _ellipsis_match('aa...aa', 'aaa')
return False
# For the rest, we only need to find the leftmost non-overlapping
# match for each piece. If there's no overall match that way alone,
# there's no overall match period.
for w in ws:
# w may be '' at times, if there are consecutive ellipses, or
# due to an ellipsis at the start or end of `want`. That's OK.
# Search for an empty string succeeds, and doesn't change startpos.
startpos = got.find(w, startpos, endpos)
if startpos < 0:
return False
startpos += len(w)
return True
def _comment_line(line):
"Return a commented form of the given line"
line = line.rstrip()
if line:
return '# '+line
else:
return '#'
class _OutputRedirectingPdb(pdb.Pdb):
"""
A specialized version of the python debugger that redirects stdout
to a given stream when interacting with the user. Stdout is *not*
redirected when traced code is executed.
"""
def __init__(self, out):
self.__out = out
pdb.Pdb.__init__(self)
def trace_dispatch(self, *args):
# Redirect stdout to the given stream.
save_stdout = sys.stdout
sys.stdout = self.__out
# Call Pdb's trace dispatch method.
try:
return pdb.Pdb.trace_dispatch(self, *args)
finally:
sys.stdout = save_stdout
# [XX] Normalize with respect to os.path.pardir?
def _module_relative_path(module, path):
if not inspect.ismodule(module):
raise TypeError, 'Expected a module: %r' % module
if path.startswith('/'):
raise ValueError, 'Module-relative files may not have absolute paths'
# Find the base directory for the path.
if hasattr(module, '__file__'):
# A normal module/package
basedir = os.path.split(module.__file__)[0]
elif module.__name__ == '__main__':
# An interactive session.
if len(sys.argv)>0 and sys.argv[0] != '':
basedir = os.path.split(sys.argv[0])[0]
else:
basedir = os.curdir
else:
# A module w/o __file__ (this includes builtins)
raise ValueError("Can't resolve paths relative to the module " +
module + " (it has no __file__)")
# Combine the base directory and the path.
return os.path.join(basedir, *(path.split('/')))
######################################################################
## 2. Example & DocTest
######################################################################
## - An "example" is a <source, want> pair, where "source" is a
## fragment of source code, and "want" is the expected output for
## "source." The Example class also includes information about
## where the example was extracted from.
##
## - A "doctest" is a collection of examples, typically extracted from
## a string (such as an object's docstring). The DocTest class also
## includes information about where the string was extracted from.
class Example:
"""
A single doctest example, consisting of source code and expected
output. `Example` defines the following attributes:
- source: A single Python statement, always ending with a newline.
The constructor adds a newline if needed.
- want: The expected output from running the source code (either
from stdout, or a traceback in case of exception). `want` ends
with a newline unless it's empty, in which case it's an empty
string. The constructor adds a newline if needed.
- exc_msg: The exception message generated by the example, if
the example is expected to generate an exception; or `None` if
it is not expected to generate an exception. This exception
message is compared against the return value of
`traceback.format_exception_only()`. `exc_msg` ends with a
newline unless it's `None`. The constructor adds a newline
if needed.
- lineno: The line number within the DocTest string containing
this Example where the Example begins. This line number is
zero-based, with respect to the beginning of the DocTest.
- indent: The example's indentation in the DocTest string.
I.e., the number of space characters that preceed the
example's first prompt.
- options: A dictionary mapping from option flags to True or
False, which is used to override default options for this
example. Any option flags not contained in this dictionary
are left at their default value (as specified by the
DocTestRunner's optionflags). By default, no options are set.
"""
def __init__(self, source, want, exc_msg=None, lineno=0, indent=0,
options=None):
# Normalize inputs.
if not source.endswith('\n'):
source += '\n'
if want and not want.endswith('\n'):
want += '\n'
if exc_msg is not None and not exc_msg.endswith('\n'):
exc_msg += '\n'
# Store properties.
self.source = source
self.want = want
self.lineno = lineno
self.indent = indent
if options is None: options = {}
self.options = options
self.exc_msg = exc_msg
class DocTest:
"""
A collection of doctest examples that should be run in a single
namespace. Each `DocTest` defines the following attributes:
- examples: the list of examples.
- globs: The namespace (aka globals) that the examples should
be run in.
- name: A name identifying the DocTest (typically, the name of
the object whose docstring this DocTest was extracted from).
- filename: The name of the file that this DocTest was extracted
from, or `None` if the filename is unknown.
- lineno: The line number within filename where this DocTest
begins, or `None` if the line number is unavailable. This
line number is zero-based, with respect to the beginning of
the file.
- docstring: The string that the examples were extracted from,
or `None` if the string is unavailable.
"""
def __init__(self, examples, globs, name, filename, lineno, docstring):
"""
Create a new DocTest containing the given examples. The
DocTest's globals are initialized with a copy of `globs`.
"""
assert not isinstance(examples, basestring), \
"DocTest no longer accepts str; use DocTestParser instead"
self.examples = examples
self.docstring = docstring
self.globs = globs.copy()
self.name = name
self.filename = filename
self.lineno = lineno
def __repr__(self):
if len(self.examples) == 0:
examples = 'no examples'
elif len(self.examples) == 1:
examples = '1 example'
else:
examples = '%d examples' % len(self.examples)
return ('<DocTest %s from %s:%s (%s)>' %
(self.name, self.filename, self.lineno, examples))
# This lets us sort tests by name:
def __cmp__(self, other):
if not isinstance(other, DocTest):
return -1
return cmp((self.name, self.filename, self.lineno, id(self)),
(other.name, other.filename, other.lineno, id(other)))
######################################################################
## 3. DocTestParser
######################################################################
class DocTestParser:
"""
A class used to parse strings containing doctest examples.
"""
# This regular expression is used to find doctest examples in a
# string. It defines three groups: `source` is the source code
# (including leading indentation and prompts); `indent` is the
# indentation of the first (PS1) line of the source code; and
# `want` is the expected output (including leading indentation).
_EXAMPLE_RE = re.compile(r'''
# Source consists of a PS1 line followed by zero or more PS2 lines.
(?P<source>
(?:^(?P<indent> [ ]*) >>> .*) # PS1 line
(?:\n [ ]* \.\.\. .*)*) # PS2 lines
\n?
# Want consists of any non-blank lines that do not start with PS1.
(?P<want> (?:(?![ ]*$) # Not a blank line
(?![ ]*>>>) # Not a line starting with PS1
.*$\n? # But any other line
)*)
''', re.MULTILINE | re.VERBOSE)
# A regular expression for handling `want` strings that contain
# expected exceptions. It divides `want` into three pieces:
# - the traceback header line (`hdr`)
# - the traceback stack (`stack`)
# - the exception message (`msg`), as generated by
# traceback.format_exception_only()
# `msg` may have multiple lines. We assume/require that the
# exception message is the first non-indented line starting with a word
# character following the traceback header line.
_EXCEPTION_RE = re.compile(r"""
# Grab the traceback header. Different versions of Python have
# said different things on the first traceback line.
^(?P<hdr> Traceback\ \(
(?: most\ recent\ call\ last
| innermost\ last
) \) :
)
\s* $ # toss trailing whitespace on the header.
(?P<stack> .*?) # don't blink: absorb stuff until...
^ (?P<msg> \w+ .*) # a line *starts* with alphanum.
""", re.VERBOSE | re.MULTILINE | re.DOTALL)
# A callable returning a true value iff its argument is a blank line
# or contains a single comment.
_IS_BLANK_OR_COMMENT = re.compile(r'^[ ]*(#.*)?$').match
def parse(self, string, name='<string>'):
"""
Divide the given string into examples and intervening text,
and return them as a list of alternating Examples and strings.
Line numbers for the Examples are 0-based. The optional
argument `name` is a name identifying this string, and is only
used for error messages.
"""
string = string.expandtabs()
# If all lines begin with the same indentation, then strip it.
min_indent = self._min_indent(string)
if min_indent > 0:
string = '\n'.join([l[min_indent:] for l in string.split('\n')])
output = []
charno, lineno = 0, 0
# Find all doctest examples in the string:
for m in self._EXAMPLE_RE.finditer(string):
# Add the pre-example text to `output`.
output.append(string[charno:m.start()])
# Update lineno (lines before this example)
lineno += string.count('\n', charno, m.start())
# Extract info from the regexp match.
(source, options, want, exc_msg) = \
self._parse_example(m, name, lineno)
# Create an Example, and add it to the list.
if not self._IS_BLANK_OR_COMMENT(source):
output.append( Example(source, want, exc_msg,
lineno=lineno,
indent=min_indent+len(m.group('indent')),
options=options) )
# Update lineno (lines inside this example)
lineno += string.count('\n', m.start(), m.end())
# Update charno.
charno = m.end()
# Add any remaining post-example text to `output`.
output.append(string[charno:])
return output
def get_doctest(self, string, globs, name, filename, lineno):
"""
Extract all doctest examples from the given string, and
collect them into a `DocTest` object.
`globs`, `name`, `filename`, and `lineno` are attributes for
the new `DocTest` object. See the documentation for `DocTest`
for more information.
"""
return DocTest(self.get_examples(string, name), globs,
name, filename, lineno, string)
def get_examples(self, string, name='<string>'):
"""
Extract all doctest examples from the given string, and return
them as a list of `Example` objects. Line numbers are
0-based, because it's most common in doctests that nothing
interesting appears on the same line as opening triple-quote,
and so the first interesting line is called \"line 1\" then.
The optional argument `name` is a name identifying this
string, and is only used for error messages.
"""
return [x for x in self.parse(string, name)
if isinstance(x, Example)]
def _parse_example(self, m, name, lineno):
"""
Given a regular expression match from `_EXAMPLE_RE` (`m`),
return a pair `(source, want)`, where `source` is the matched
example's source code (with prompts and indentation stripped);
and `want` is the example's expected output (with indentation
stripped).
`name` is the string's name, and `lineno` is the line number
where the example starts; both are used for error messages.
"""
# Get the example's indentation level.
indent = len(m.group('indent'))
# Divide source into lines; check that they're properly
# indented; and then strip their indentation & prompts.
source_lines = m.group('source').split('\n')
self._check_prompt_blank(source_lines, indent, name, lineno)
self._check_prefix(source_lines[1:], ' '*indent + '.', name, lineno)
source = '\n'.join([sl[indent+4:] for sl in source_lines])
# Divide want into lines; check that it's properly indented; and
# then strip the indentation. Spaces before the last newline should
# be preserved, so plain rstrip() isn't good enough.
want = m.group('want')
want_lines = want.split('\n')
if len(want_lines) > 1 and re.match(r' *$', want_lines[-1]):
del want_lines[-1] # forget final newline & spaces after it
self._check_prefix(want_lines, ' '*indent, name,
lineno + len(source_lines))
want = '\n'.join([wl[indent:] for wl in want_lines])
# If `want` contains a traceback message, then extract it.
m = self._EXCEPTION_RE.match(want)
if m:
exc_msg = m.group('msg')
else:
exc_msg = None
# Extract options from the source.
options = self._find_options(source, name, lineno)
return source, options, want, exc_msg
# This regular expression looks for option directives in the
# source code of an example. Option directives are comments
# starting with "doctest:". Warning: this may give false
# positives for string-literals that contain the string
# "#doctest:". Eliminating these false positives would require
# actually parsing the string; but we limit them by ignoring any
# line containing "#doctest:" that is *followed* by a quote mark.
_OPTION_DIRECTIVE_RE = re.compile(r'#\s*doctest:\s*([^\n\'"]*)$',
re.MULTILINE)
def _find_options(self, source, name, lineno):
"""
Return a dictionary containing option overrides extracted from
option directives in the given source string.
`name` is the string's name, and `lineno` is the line number
where the example starts; both are used for error messages.
"""
options = {}
# (note: with the current regexp, this will match at most once:)
for m in self._OPTION_DIRECTIVE_RE.finditer(source):
option_strings = m.group(1).replace(',', ' ').split()
for option in option_strings:
if (option[0] not in '+-' or
option[1:] not in OPTIONFLAGS_BY_NAME):
raise ValueError('line %r of the doctest for %s '
'has an invalid option: %r' %
(lineno+1, name, option))
flag = OPTIONFLAGS_BY_NAME[option[1:]]
options[flag] = (option[0] == '+')
if options and self._IS_BLANK_OR_COMMENT(source):
raise ValueError('line %r of the doctest for %s has an option '
'directive on a line with no example: %r' %
(lineno, name, source))
return options
# This regular expression finds the indentation of every non-blank
# line in a string.
_INDENT_RE = re.compile('^([ ]*)(?=\S)', re.MULTILINE)
def _min_indent(self, s):
"Return the minimum indentation of any non-blank line in `s`"
indents = [len(indent) for indent in self._INDENT_RE.findall(s)]
if len(indents) > 0:
return min(indents)
else:
return 0
def _check_prompt_blank(self, lines, indent, name, lineno):
"""
Given the lines of a source string (including prompts and
leading indentation), check to make sure that every prompt is
followed by a space character. If any line is not followed by
a space character, then raise ValueError.
"""
for i, line in enumerate(lines):
if len(line) >= indent+4 and line[indent+3] != ' ':
raise ValueError('line %r of the docstring for %s '
'lacks blank after %s: %r' %
(lineno+i+1, name,
line[indent:indent+3], line))
def _check_prefix(self, lines, prefix, name, lineno):
"""
Check that every line in the given list starts with the given
prefix; if any line does not, then raise a ValueError.
"""
for i, line in enumerate(lines):
if line and not line.startswith(prefix):
raise ValueError('line %r of the docstring for %s has '
'inconsistent leading whitespace: %r' %
(lineno+i+1, name, line))
######################################################################
## 4. DocTest Finder
######################################################################
class DocTestFinder:
"""
A class used to extract the DocTests that are relevant to a given
object, from its docstring and the docstrings of its contained
objects. Doctests can currently be extracted from the following
object types: modules, functions, classes, methods, staticmethods,
classmethods, and properties.
"""
def __init__(self, verbose=False, parser=DocTestParser(),
recurse=True, _namefilter=None, exclude_empty=True):
"""
Create a new doctest finder.
The optional argument `parser` specifies a class or
function that should be used to create new DocTest objects (or
objects that implement the same interface as DocTest). The
signature for this factory function should match the signature
of the DocTest constructor.
If the optional argument `recurse` is false, then `find` will
only examine the given object, and not any contained objects.
If the optional argument `exclude_empty` is false, then `find`
will include tests for objects with empty docstrings.
"""
self._parser = parser
self._verbose = verbose
self._recurse = recurse
self._exclude_empty = exclude_empty
# _namefilter is undocumented, and exists only for temporary backward-
# compatibility support of testmod's deprecated isprivate mess.
self._namefilter = _namefilter
def find(self, obj, name=None, module=None, globs=None,
extraglobs=None):
"""
Return a list of the DocTests that are defined by the given
object's docstring, or by any of its contained objects'
docstrings.
The optional parameter `module` is the module that contains
the given object. If the module is not specified or is None, then
the test finder will attempt to automatically determine the
correct module. The object's module is used:
- As a default namespace, if `globs` is not specified.
- To prevent the DocTestFinder from extracting DocTests
from objects that are imported from other modules.
- To find the name of the file containing the object.
- To help find the line number of the object within its
file.
Contained objects whose module does not match `module` are ignored.
If `module` is False, no attempt to find the module will be made.
This is obscure, of use mostly in tests: if `module` is False, or
is None but cannot be found automatically, then all objects are
considered to belong to the (non-existent) module, so all contained
objects will (recursively) be searched for doctests.
The globals for each DocTest is formed by combining `globs`
and `extraglobs` (bindings in `extraglobs` override bindings
in `globs`). A new copy of the globals dictionary is created
for each DocTest. If `globs` is not specified, then it
defaults to the module's `__dict__`, if specified, or {}
otherwise. If `extraglobs` is not specified, then it defaults
to {}.
"""
# If name was not specified, then extract it from the object.
if name is None:
name = getattr(obj, '__name__', None)
if name is None:
raise ValueError("DocTestFinder.find: name must be given "
"when obj.__name__ doesn't exist: %r" %
(type(obj),))
# Find the module that contains the given object (if obj is
# a module, then module=obj.). Note: this may fail, in which
# case module will be None.
if module is False:
module = None
elif module is None:
module = inspect.getmodule(obj)
# Read the module's source code. This is used by
# DocTestFinder._find_lineno to find the line number for a
# given object's docstring.
try:
file = inspect.getsourcefile(obj) or inspect.getfile(obj)
source_lines = linecache.getlines(file)
if not source_lines:
source_lines = None
except TypeError:
source_lines = None
# Initialize globals, and merge in extraglobs.
if globs is None:
if module is None:
globs = {}
else:
globs = module.__dict__.copy()
else:
globs = globs.copy()
if extraglobs is not None:
globs.update(extraglobs)
# Recursively expore `obj`, extracting DocTests.
tests = []
self._find(tests, obj, name, module, source_lines, globs, {})
return tests
def _filter(self, obj, prefix, base):
"""
Return true if the given object should not be examined.
"""
return (self._namefilter is not None and
self._namefilter(prefix, base))
def _from_module(self, module, object):
"""
Return true if the given object is defined in the given
module.
"""
if module is None:
return True
elif inspect.isfunction(object):
return module.__dict__ is object.func_globals
elif inspect.isclass(object):
return module.__name__ == object.__module__
elif inspect.getmodule(object) is not None:
return module is inspect.getmodule(object)
elif hasattr(object, '__module__'):
return module.__name__ == object.__module__
elif isinstance(object, property):
return True # [XX] no way not be sure.
else:
raise ValueError("object must be a class or function")
def _find(self, tests, obj, name, module, source_lines, globs, seen):
"""
Find tests for the given object and any contained objects, and
add them to `tests`.
"""
if self._verbose:
print 'Finding tests in %s' % name
# If we've already processed this object, then ignore it.
if id(obj) in seen:
return
seen[id(obj)] = 1
# Find a test for this object, and add it to the list of tests.
test = self._get_test(obj, name, module, globs, source_lines)
if test is not None:
tests.append(test)
# Look for tests in a module's contained objects.
if inspect.ismodule(obj) and self._recurse:
for valname, val in obj.__dict__.items():
# Check if this contained object should be ignored.
if self._filter(val, name, valname):
continue
valname = '%s.%s' % (name, valname)
# Recurse to functions & classes.
if ((inspect.isfunction(val) or inspect.isclass(val)) and
self._from_module(module, val)):
self._find(tests, val, valname, module, source_lines,
globs, seen)
# Look for tests in a module's __test__ dictionary.
if inspect.ismodule(obj) and self._recurse:
for valname, val in getattr(obj, '__test__', {}).items():
if not isinstance(valname, basestring):
raise ValueError("DocTestFinder.find: __test__ keys "
"must be strings: %r" %
(type(valname),))
if not (inspect.isfunction(val) or inspect.isclass(val) or
inspect.ismethod(val) or inspect.ismodule(val) or
isinstance(val, basestring)):
raise ValueError("DocTestFinder.find: __test__ values "
"must be strings, functions, methods, "
"classes, or modules: %r" %
(type(val),))
valname = '%s.__test__.%s' % (name, valname)
self._find(tests, val, valname, module, source_lines,
globs, seen)
# Look for tests in a class's contained objects.
if inspect.isclass(obj) and self._recurse:
for valname, val in obj.__dict__.items():
# Check if this contained object should be ignored.
if self._filter(val, name, valname):
continue
# Special handling for staticmethod/classmethod.
if isinstance(val, staticmethod):
val = getattr(obj, valname)
if isinstance(val, classmethod):
val = getattr(obj, valname).im_func
# Recurse to methods, properties, and nested classes.
if ((inspect.isfunction(val) or inspect.isclass(val) or
isinstance(val, property)) and
self._from_module(module, val)):
valname = '%s.%s' % (name, valname)
self._find(tests, val, valname, module, source_lines,
globs, seen)
def _get_test(self, obj, name, module, globs, source_lines):
"""
Return a DocTest for the given object, if it defines a docstring;
otherwise, return None.
"""
# Extract the object's docstring. If it doesn't have one,
# then return None (no test for this object).
if isinstance(obj, basestring):
docstring = obj
else:
try:
if obj.__doc__ is None:
docstring = ''
else:
docstring = obj.__doc__
if not isinstance(docstring, basestring):
docstring = str(docstring)
except (TypeError, AttributeError):
docstring = ''
# Find the docstring's location in the file.
lineno = self._find_lineno(obj, source_lines)
# Don't bother if the docstring is empty.
if self._exclude_empty and not docstring:
return None
# Return a DocTest for this object.
if module is None:
filename = None
else:
filename = getattr(module, '__file__', module.__name__)
if filename[-4:] in (".pyc", ".pyo"):
filename = filename[:-1]
return self._parser.get_doctest(docstring, globs, name,
filename, lineno)
def _find_lineno(self, obj, source_lines):
"""
Return a line number of the given object's docstring. Note:
this method assumes that the object has a docstring.
"""
lineno = None
# Find the line number for modules.
if inspect.ismodule(obj):
lineno = 0
# Find the line number for classes.
# Note: this could be fooled if a class is defined multiple
# times in a single file.
if inspect.isclass(obj):
if source_lines is None:
return None
pat = re.compile(r'^\s*class\s*%s\b' %
getattr(obj, '__name__', '-'))
for i, line in enumerate(source_lines):
if pat.match(line):
lineno = i
break
# Find the line number for functions & methods.
if inspect.ismethod(obj): obj = obj.im_func
if inspect.isfunction(obj): obj = obj.func_code
if inspect.istraceback(obj): obj = obj.tb_frame
if inspect.isframe(obj): obj = obj.f_code
if inspect.iscode(obj):
lineno = getattr(obj, 'co_firstlineno', None)-1
# Find the line number where the docstring starts. Assume
# that it's the first line that begins with a quote mark.
# Note: this could be fooled by a multiline function
# signature, where a continuation line begins with a quote
# mark.
if lineno is not None:
if source_lines is None:
return lineno+1
pat = re.compile('(^|.*:)\s*\w*("|\')')
for lineno in range(lineno, len(source_lines)):
if pat.match(source_lines[lineno]):
return lineno
# We couldn't find the line number.
return None
######################################################################
## 5. DocTest Runner
######################################################################
class DocTestRunner:
"""
A class used to run DocTest test cases, and accumulate statistics.
The `run` method is used to process a single DocTest case. It
returns a tuple `(f, t)`, where `t` is the number of test cases
tried, and `f` is the number of test cases that failed.
>>> tests = DocTestFinder().find(_TestClass)
>>> runner = DocTestRunner(verbose=False)
>>> for test in tests:
... print runner.run(test)
(0, 2)
(0, 1)
(0, 2)
(0, 2)
The `summarize` method prints a summary of all the test cases that
have been run by the runner, and returns an aggregated `(f, t)`
tuple:
>>> runner.summarize(verbose=1)
4 items passed all tests:
2 tests in _TestClass
2 tests in _TestClass.__init__
2 tests in _TestClass.get
1 tests in _TestClass.square
7 tests in 4 items.
7 passed and 0 failed.
Test passed.
(0, 7)
The aggregated number of tried examples and failed examples is
also available via the `tries` and `failures` attributes:
>>> runner.tries
7
>>> runner.failures
0
The comparison between expected outputs and actual outputs is done
by an `OutputChecker`. This comparison may be customized with a
number of option flags; see the documentation for `testmod` for
more information. If the option flags are insufficient, then the
comparison may also be customized by passing a subclass of
`OutputChecker` to the constructor.
The test runner's display output can be controlled in two ways.
First, an output function (`out) can be passed to
`TestRunner.run`; this function will be called with strings that
should be displayed. It defaults to `sys.stdout.write`. If
capturing the output is not sufficient, then the display output
can be also customized by subclassing DocTestRunner, and
overriding the methods `report_start`, `report_success`,
`report_unexpected_exception`, and `report_failure`.
"""
# This divider string is used to separate failure messages, and to
# separate sections of the summary.
DIVIDER = "*" * 70
def __init__(self, checker=None, verbose=None, optionflags=0):
"""
Create a new test runner.
Optional keyword arg `checker` is the `OutputChecker` that
should be used to compare the expected outputs and actual
outputs of doctest examples.
Optional keyword arg 'verbose' prints lots of stuff if true,
only failures if false; by default, it's true iff '-v' is in
sys.argv.
Optional argument `optionflags` can be used to control how the
test runner compares expected output to actual output, and how
it displays failures. See the documentation for `testmod` for
more information.
"""
self._checker = checker or OutputChecker()
if verbose is None:
verbose = '-v' in sys.argv
self._verbose = verbose
self.optionflags = optionflags
self.original_optionflags = optionflags
# Keep track of the examples we've run.
self.tries = 0
self.failures = 0
self._name2ft = {}
# Create a fake output target for capturing doctest output.
self._fakeout = _SpoofOut()
#/////////////////////////////////////////////////////////////////
# Reporting methods
#/////////////////////////////////////////////////////////////////
def report_start(self, out, test, example):
"""
Report that the test runner is about to process the given
example. (Only displays a message if verbose=True)
"""
if self._verbose:
if example.want:
out('Trying:\n' + _indent(example.source) +
'Expecting:\n' + _indent(example.want))
else:
out('Trying:\n' + _indent(example.source) +
'Expecting nothing\n')
def report_success(self, out, test, example, got):
"""
Report that the given example ran successfully. (Only
displays a message if verbose=True)
"""
if self._verbose:
out("ok\n")
def report_failure(self, out, test, example, got):
"""
Report that the given example failed.
"""
out(self._failure_header(test, example) +
self._checker.output_difference(example, got, self.optionflags))
def report_unexpected_exception(self, out, test, example, exc_info):
"""
Report that the given example raised an unexpected exception.
"""
out(self._failure_header(test, example) +
'Exception raised:\n' + _indent(_exception_traceback(exc_info)))
def _failure_header(self, test, example):
out = [self.DIVIDER]
if test.filename:
if test.lineno is not None and example.lineno is not None:
lineno = test.lineno + example.lineno + 1
else:
lineno = '?'
out.append('File "%s", line %s, in %s' %
(test.filename, lineno, test.name))
else:
out.append('Line %s, in %s' % (example.lineno+1, test.name))
out.append('Failed example:')
source = example.source
out.append(_indent(source))
return '\n'.join(out)
#/////////////////////////////////////////////////////////////////
# DocTest Running
#/////////////////////////////////////////////////////////////////
def __run(self, test, compileflags, out):
"""
Run the examples in `test`. Write the outcome of each example
with one of the `DocTestRunner.report_*` methods, using the
writer function `out`. `compileflags` is the set of compiler
flags that should be used to execute examples. Return a tuple
`(f, t)`, where `t` is the number of examples tried, and `f`
is the number of examples that failed. The examples are run
in the namespace `test.globs`.
"""
# Keep track of the number of failures and tries.
failures = tries = 0
# Save the option flags (since option directives can be used
# to modify them).
original_optionflags = self.optionflags
SUCCESS, FAILURE, BOOM = range(3) # `outcome` state
check = self._checker.check_output
# Process each example.
for examplenum, example in enumerate(test.examples):
# If REPORT_ONLY_FIRST_FAILURE is set, then supress
# reporting after the first failure.
quiet = (self.optionflags & REPORT_ONLY_FIRST_FAILURE and
failures > 0)
# Merge in the example's options.
self.optionflags = original_optionflags
if example.options:
for (optionflag, val) in example.options.items():
if val:
self.optionflags |= optionflag
else:
self.optionflags &= ~optionflag
# Record that we started this example.
tries += 1
if not quiet:
self.report_start(out, test, example)
# Use a special filename for compile(), so we can retrieve
# the source code during interactive debugging (see
# __patched_linecache_getlines).
filename = '<doctest %s[%d]>' % (test.name, examplenum)
# Run the example in the given context (globs), and record
# any exception that gets raised. (But don't intercept
# keyboard interrupts.)
try:
# Don't blink! This is where the user's code gets run.
exec compile(example.source, filename, "single",
compileflags, 1) in test.globs
self.debugger.set_continue() # ==== Example Finished ====
exception = None
except KeyboardInterrupt:
raise
except:
exception = sys.exc_info()
self.debugger.set_continue() # ==== Example Finished ====
got = self._fakeout.getvalue() # the actual output
self._fakeout.truncate(0)
outcome = FAILURE # guilty until proved innocent or insane
# If the example executed without raising any exceptions,
# verify its output.
if exception is None:
if check(example.want, got, self.optionflags):
outcome = SUCCESS
# The example raised an exception: check if it was expected.
else:
exc_info = sys.exc_info()
exc_msg = traceback.format_exception_only(*exc_info[:2])[-1]
if not quiet:
got += _exception_traceback(exc_info)
# If `example.exc_msg` is None, then we weren't expecting
# an exception.
if example.exc_msg is None:
outcome = BOOM
# We expected an exception: see whether it matches.
elif check(example.exc_msg, exc_msg, self.optionflags):
outcome = SUCCESS
# Another chance if they didn't care about the detail.
elif self.optionflags & IGNORE_EXCEPTION_DETAIL:
m1 = re.match(r'[^:]*:', example.exc_msg)
m2 = re.match(r'[^:]*:', exc_msg)
if m1 and m2 and check(m1.group(0), m2.group(0),
self.optionflags):
outcome = SUCCESS
# Report the outcome.
if outcome is SUCCESS:
if not quiet:
self.report_success(out, test, example, got)
elif outcome is FAILURE:
if not quiet:
self.report_failure(out, test, example, got)
failures += 1
elif outcome is BOOM:
if not quiet:
self.report_unexpected_exception(out, test, example,
exc_info)
failures += 1
else:
assert False, ("unknown outcome", outcome)
# Restore the option flags (in case they were modified)
self.optionflags = original_optionflags
# Record and return the number of failures and tries.
self.__record_outcome(test, failures, tries)
return failures, tries
def __record_outcome(self, test, f, t):
"""
Record the fact that the given DocTest (`test`) generated `f`
failures out of `t` tried examples.
"""
f2, t2 = self._name2ft.get(test.name, (0,0))
self._name2ft[test.name] = (f+f2, t+t2)
self.failures += f
self.tries += t
__LINECACHE_FILENAME_RE = re.compile(r'<doctest '
r'(?P<name>[\w\.]+)'
r'\[(?P<examplenum>\d+)\]>$')
def __patched_linecache_getlines(self, filename):
m = self.__LINECACHE_FILENAME_RE.match(filename)
if m and m.group('name') == self.test.name:
example = self.test.examples[int(m.group('examplenum'))]
return example.source.splitlines(True)
else:
return self.save_linecache_getlines(filename)
def run(self, test, compileflags=None, out=None, clear_globs=True):
"""
Run the examples in `test`, and display the results using the
writer function `out`.
The examples are run in the namespace `test.globs`. If
`clear_globs` is true (the default), then this namespace will
be cleared after the test runs, to help with garbage
collection. If you would like to examine the namespace after
the test completes, then use `clear_globs=False`.
`compileflags` gives the set of flags that should be used by
the Python compiler when running the examples. If not
specified, then it will default to the set of future-import
flags that apply to `globs`.
The output of each example is checked using
`DocTestRunner.check_output`, and the results are formatted by
the `DocTestRunner.report_*` methods.
"""
self.test = test
if compileflags is None:
compileflags = _extract_future_flags(test.globs)
save_stdout = sys.stdout
if out is None:
out = save_stdout.write
sys.stdout = self._fakeout
# Patch pdb.set_trace to restore sys.stdout during interactive
# debugging (so it's not still redirected to self._fakeout).
# Note that the interactive output will go to *our*
# save_stdout, even if that's not the real sys.stdout; this
# allows us to write test cases for the set_trace behavior.
save_set_trace = pdb.set_trace
self.debugger = _OutputRedirectingPdb(save_stdout)
self.debugger.reset()
pdb.set_trace = self.debugger.set_trace
# Patch linecache.getlines, so we can see the example's source
# when we're inside the debugger.
self.save_linecache_getlines = linecache.getlines
linecache.getlines = self.__patched_linecache_getlines
try:
return self.__run(test, compileflags, out)
finally:
sys.stdout = save_stdout
pdb.set_trace = save_set_trace
linecache.getlines = self.save_linecache_getlines
if clear_globs:
test.globs.clear()
#/////////////////////////////////////////////////////////////////
# Summarization
#/////////////////////////////////////////////////////////////////
def summarize(self, verbose=None):
"""
Print a summary of all the test cases that have been run by
this DocTestRunner, and return a tuple `(f, t)`, where `f` is
the total number of failed examples, and `t` is the total
number of tried examples.
The optional `verbose` argument controls how detailed the
summary is. If the verbosity is not specified, then the
DocTestRunner's verbosity is used.
"""
if verbose is None:
verbose = self._verbose
notests = []
passed = []
failed = []
totalt = totalf = 0
for x in self._name2ft.items():
name, (f, t) = x
assert f <= t
totalt += t
totalf += f
if t == 0:
notests.append(name)
elif f == 0:
passed.append( (name, t) )
else:
failed.append(x)
if verbose:
if notests:
print len(notests), "items had no tests:"
notests.sort()
for thing in notests:
print " ", thing
if passed:
print len(passed), "items passed all tests:"
passed.sort()
for thing, count in passed:
print " %3d tests in %s" % (count, thing)
if failed:
print self.DIVIDER
print len(failed), "items had failures:"
failed.sort()
for thing, (f, t) in failed:
print " %3d of %3d in %s" % (f, t, thing)
if verbose:
print totalt, "tests in", len(self._name2ft), "items."
print totalt - totalf, "passed and", totalf, "failed."
if totalf:
print "***Test Failed***", totalf, "failures."
elif verbose:
print "Test passed."
return totalf, totalt
#/////////////////////////////////////////////////////////////////
# Backward compatibility cruft to maintain doctest.master.
#/////////////////////////////////////////////////////////////////
def merge(self, other):
d = self._name2ft
for name, (f, t) in other._name2ft.items():
if name in d:
print "*** DocTestRunner.merge: '" + name + "' in both" \
" testers; summing outcomes."
f2, t2 = d[name]
f = f + f2
t = t + t2
d[name] = f, t
class OutputChecker:
"""
A class used to check the whether the actual output from a doctest
example matches the expected output. `OutputChecker` defines two
methods: `check_output`, which compares a given pair of outputs,
and returns true if they match; and `output_difference`, which
returns a string describing the differences between two outputs.
"""
def check_output(self, want, got, optionflags):
"""
Return True iff the actual output from an example (`got`)
matches the expected output (`want`). These strings are
always considered to match if they are identical; but
depending on what option flags the test runner is using,
several non-exact match types are also possible. See the
documentation for `TestRunner` for more information about
option flags.
"""
# Handle the common case first, for efficiency:
# if they're string-identical, always return true.
if got == want:
return True
# The values True and False replaced 1 and 0 as the return
# value for boolean comparisons in Python 2.3.
if not (optionflags & DONT_ACCEPT_TRUE_FOR_1):
if (got,want) == ("True\n", "1\n"):
return True
if (got,want) == ("False\n", "0\n"):
return True
# <BLANKLINE> can be used as a special sequence to signify a
# blank line, unless the DONT_ACCEPT_BLANKLINE flag is used.
if not (optionflags & DONT_ACCEPT_BLANKLINE):
# Replace <BLANKLINE> in want with a blank line.
want = re.sub('(?m)^%s\s*?$' % re.escape(BLANKLINE_MARKER),
'', want)
# If a line in got contains only spaces, then remove the
# spaces.
got = re.sub('(?m)^\s*?$', '', got)
if got == want:
return True
# This flag causes doctest to ignore any differences in the
# contents of whitespace strings. Note that this can be used
# in conjunction with the ELLIPSIS flag.
if optionflags & NORMALIZE_WHITESPACE:
got = ' '.join(got.split())
want = ' '.join(want.split())
if got == want:
return True
# The ELLIPSIS flag says to let the sequence "..." in `want`
# match any substring in `got`.
if optionflags & ELLIPSIS:
if _ellipsis_match(want, got):
return True
# We didn't find any match; return false.
return False
# Should we do a fancy diff?
def _do_a_fancy_diff(self, want, got, optionflags):
# Not unless they asked for a fancy diff.
if not optionflags & (REPORT_UDIFF |
REPORT_CDIFF |
REPORT_NDIFF):
return False
# If expected output uses ellipsis, a meaningful fancy diff is
# too hard ... or maybe not. In two real-life failures Tim saw,
# a diff was a major help anyway, so this is commented out.
# [todo] _ellipsis_match() knows which pieces do and don't match,
# and could be the basis for a kick-ass diff in this case.
##if optionflags & ELLIPSIS and ELLIPSIS_MARKER in want:
## return False
# ndiff does intraline difference marking, so can be useful even
# for 1-line differences.
if optionflags & REPORT_NDIFF:
return True
# The other diff types need at least a few lines to be helpful.
return want.count('\n') > 2 and got.count('\n') > 2
def output_difference(self, example, got, optionflags):
"""
Return a string describing the differences between the
expected output for a given example (`example`) and the actual
output (`got`). `optionflags` is the set of option flags used
to compare `want` and `got`.
"""
want = example.want
# If <BLANKLINE>s are being used, then replace blank lines
# with <BLANKLINE> in the actual output string.
if not (optionflags & DONT_ACCEPT_BLANKLINE):
got = re.sub('(?m)^[ ]*(?=\n)', BLANKLINE_MARKER, got)
# Check if we should use diff.
if self._do_a_fancy_diff(want, got, optionflags):
# Split want & got into lines.
want_lines = want.splitlines(True) # True == keep line ends
got_lines = got.splitlines(True)
# Use difflib to find their differences.
if optionflags & REPORT_UDIFF:
diff = difflib.unified_diff(want_lines, got_lines, n=2)
diff = list(diff)[2:] # strip the diff header
kind = 'unified diff with -expected +actual'
elif optionflags & REPORT_CDIFF:
diff = difflib.context_diff(want_lines, got_lines, n=2)
diff = list(diff)[2:] # strip the diff header
kind = 'context diff with expected followed by actual'
elif optionflags & REPORT_NDIFF:
engine = difflib.Differ(charjunk=difflib.IS_CHARACTER_JUNK)
diff = list(engine.compare(want_lines, got_lines))
kind = 'ndiff with -expected +actual'
else:
assert 0, 'Bad diff option'
# Remove trailing whitespace on diff output.
diff = [line.rstrip() + '\n' for line in diff]
return 'Differences (%s):\n' % kind + _indent(''.join(diff))
# If we're not using diff, then simply list the expected
# output followed by the actual output.
if want and got:
return 'Expected:\n%sGot:\n%s' % (_indent(want), _indent(got))
elif want:
return 'Expected:\n%sGot nothing\n' % _indent(want)
elif got:
return 'Expected nothing\nGot:\n%s' % _indent(got)
else:
return 'Expected nothing\nGot nothing\n'
class DocTestFailure(Exception):
"""A DocTest example has failed in debugging mode.
The exception instance has variables:
- test: the DocTest object being run
- excample: the Example object that failed
- got: the actual output
"""
def __init__(self, test, example, got):
self.test = test
self.example = example
self.got = got
def __str__(self):
return str(self.test)
class UnexpectedException(Exception):
"""A DocTest example has encountered an unexpected exception
The exception instance has variables:
- test: the DocTest object being run
- excample: the Example object that failed
- exc_info: the exception info
"""
def __init__(self, test, example, exc_info):
self.test = test
self.example = example
self.exc_info = exc_info
def __str__(self):
return str(self.test)
class DebugRunner(DocTestRunner):
r"""Run doc tests but raise an exception as soon as there is a failure.
If an unexpected exception occurs, an UnexpectedException is raised.
It contains the test, the example, and the original exception:
>>> runner = DebugRunner(verbose=False)
>>> test = DocTestParser().get_doctest('>>> raise KeyError\n42',
... {}, 'foo', 'foo.py', 0)
>>> try:
... runner.run(test)
... except UnexpectedException, failure:
... pass
>>> failure.test is test
True
>>> failure.example.want
'42\n'
>>> exc_info = failure.exc_info
>>> raise exc_info[0], exc_info[1], exc_info[2]
Traceback (most recent call last):
...
KeyError
We wrap the original exception to give the calling application
access to the test and example information.
If the output doesn't match, then a DocTestFailure is raised:
>>> test = DocTestParser().get_doctest('''
... >>> x = 1
... >>> x
... 2
... ''', {}, 'foo', 'foo.py', 0)
>>> try:
... runner.run(test)
... except DocTestFailure, failure:
... pass
DocTestFailure objects provide access to the test:
>>> failure.test is test
True
As well as to the example:
>>> failure.example.want
'2\n'
and the actual output:
>>> failure.got
'1\n'
If a failure or error occurs, the globals are left intact:
>>> del test.globs['__builtins__']
>>> test.globs
{'x': 1}
>>> test = DocTestParser().get_doctest('''
... >>> x = 2
... >>> raise KeyError
... ''', {}, 'foo', 'foo.py', 0)
>>> runner.run(test)
Traceback (most recent call last):
...
UnexpectedException: <DocTest foo from foo.py:0 (2 examples)>
>>> del test.globs['__builtins__']
>>> test.globs
{'x': 2}
But the globals are cleared if there is no error:
>>> test = DocTestParser().get_doctest('''
... >>> x = 2
... ''', {}, 'foo', 'foo.py', 0)
>>> runner.run(test)
(0, 1)
>>> test.globs
{}
"""
def run(self, test, compileflags=None, out=None, clear_globs=True):
r = DocTestRunner.run(self, test, compileflags, out, False)
if clear_globs:
test.globs.clear()
return r
def report_unexpected_exception(self, out, test, example, exc_info):
raise UnexpectedException(test, example, exc_info)
def report_failure(self, out, test, example, got):
raise DocTestFailure(test, example, got)
######################################################################
## 6. Test Functions
######################################################################
# These should be backwards compatible.
# For backward compatibility, a global instance of a DocTestRunner
# class, updated by testmod.
master = None
def testmod(m=None, name=None, globs=None, verbose=None, isprivate=None,
report=True, optionflags=0, extraglobs=None,
raise_on_error=False, exclude_empty=False):
"""m=None, name=None, globs=None, verbose=None, isprivate=None,
report=True, optionflags=0, extraglobs=None, raise_on_error=False,
exclude_empty=False
Test examples in docstrings in functions and classes reachable
from module m (or the current module if m is not supplied), starting
with m.__doc__. Unless isprivate is specified, private names
are not skipped.
Also test examples reachable from dict m.__test__ if it exists and is
not None. m.__test__ maps names to functions, classes and strings;
function and class docstrings are tested even if the name is private;
strings are tested directly, as if they were docstrings.
Return (#failures, #tests).
See doctest.__doc__ for an overview.
Optional keyword arg "name" gives the name of the module; by default
use m.__name__.
Optional keyword arg "globs" gives a dict to be used as the globals
when executing examples; by default, use m.__dict__. A copy of this
dict is actually used for each docstring, so that each docstring's
examples start with a clean slate.
Optional keyword arg "extraglobs" gives a dictionary that should be
merged into the globals that are used to execute examples. By
default, no extra globals are used. This is new in 2.4.
Optional keyword arg "verbose" prints lots of stuff if true, prints
only failures if false; by default, it's true iff "-v" is in sys.argv.
Optional keyword arg "report" prints a summary at the end when true,
else prints nothing at the end. In verbose mode, the summary is
detailed, else very brief (in fact, empty if all tests passed).
Optional keyword arg "optionflags" or's together module constants,
and defaults to 0. This is new in 2.3. Possible values (see the
docs for details):
DONT_ACCEPT_TRUE_FOR_1
DONT_ACCEPT_BLANKLINE
NORMALIZE_WHITESPACE
ELLIPSIS
IGNORE_EXCEPTION_DETAIL
REPORT_UDIFF
REPORT_CDIFF
REPORT_NDIFF
REPORT_ONLY_FIRST_FAILURE
Optional keyword arg "raise_on_error" raises an exception on the
first unexpected exception or failure. This allows failures to be
post-mortem debugged.
Deprecated in Python 2.4:
Optional keyword arg "isprivate" specifies a function used to
determine whether a name is private. The default function is
treat all functions as public. Optionally, "isprivate" can be
set to doctest.is_private to skip over functions marked as private
using the underscore naming convention; see its docs for details.
Advanced tomfoolery: testmod runs methods of a local instance of
class doctest.Tester, then merges the results into (or creates)
global Tester instance doctest.master. Methods of doctest.master
can be called directly too, if you want to do something unusual.
Passing report=0 to testmod is especially useful then, to delay
displaying a summary. Invoke doctest.master.summarize(verbose)
when you're done fiddling.
"""
global master
if isprivate is not None:
warnings.warn("the isprivate argument is deprecated; "
"examine DocTestFinder.find() lists instead",
DeprecationWarning)
# If no module was given, then use __main__.
if m is None:
# DWA - m will still be None if this wasn't invoked from the command
# line, in which case the following TypeError is about as good an error
# as we should expect
m = sys.modules.get('__main__')
# Check that we were actually given a module.
if not inspect.ismodule(m):
raise TypeError("testmod: module required; %r" % (m,))
# If no name was given, then use the module's name.
if name is None:
name = m.__name__
# Find, parse, and run all tests in the given module.
finder = DocTestFinder(_namefilter=isprivate, exclude_empty=exclude_empty)
if raise_on_error:
runner = DebugRunner(verbose=verbose, optionflags=optionflags)
else:
runner = DocTestRunner(verbose=verbose, optionflags=optionflags)
for test in finder.find(m, name, globs=globs, extraglobs=extraglobs):
runner.run(test)
if report:
runner.summarize()
if master is None:
master = runner
else:
master.merge(runner)
return runner.failures, runner.tries
def testfile(filename, module_relative=True, name=None, package=None,
globs=None, verbose=None, report=True, optionflags=0,
extraglobs=None, raise_on_error=False, parser=DocTestParser()):
"""
Test examples in the given file. Return (#failures, #tests).
Optional keyword arg "module_relative" specifies how filenames
should be interpreted:
- If "module_relative" is True (the default), then "filename"
specifies a module-relative path. By default, this path is
relative to the calling module's directory; but if the
"package" argument is specified, then it is relative to that
package. To ensure os-independence, "filename" should use
"/" characters to separate path segments, and should not
be an absolute path (i.e., it may not begin with "/").
- If "module_relative" is False, then "filename" specifies an
os-specific path. The path may be absolute or relative (to
the current working directory).
Optional keyword arg "name" gives the name of the test; by default
use the file's basename.
Optional keyword argument "package" is a Python package or the
name of a Python package whose directory should be used as the
base directory for a module relative filename. If no package is
specified, then the calling module's directory is used as the base
directory for module relative filenames. It is an error to
specify "package" if "module_relative" is False.
Optional keyword arg "globs" gives a dict to be used as the globals
when executing examples; by default, use {}. A copy of this dict
is actually used for each docstring, so that each docstring's
examples start with a clean slate.
Optional keyword arg "extraglobs" gives a dictionary that should be
merged into the globals that are used to execute examples. By
default, no extra globals are used.
Optional keyword arg "verbose" prints lots of stuff if true, prints
only failures if false; by default, it's true iff "-v" is in sys.argv.
Optional keyword arg "report" prints a summary at the end when true,
else prints nothing at the end. In verbose mode, the summary is
detailed, else very brief (in fact, empty if all tests passed).
Optional keyword arg "optionflags" or's together module constants,
and defaults to 0. Possible values (see the docs for details):
DONT_ACCEPT_TRUE_FOR_1
DONT_ACCEPT_BLANKLINE
NORMALIZE_WHITESPACE
ELLIPSIS
IGNORE_EXCEPTION_DETAIL
REPORT_UDIFF
REPORT_CDIFF
REPORT_NDIFF
REPORT_ONLY_FIRST_FAILURE
Optional keyword arg "raise_on_error" raises an exception on the
first unexpected exception or failure. This allows failures to be
post-mortem debugged.
Optional keyword arg "parser" specifies a DocTestParser (or
subclass) that should be used to extract tests from the files.
Advanced tomfoolery: testmod runs methods of a local instance of
class doctest.Tester, then merges the results into (or creates)
global Tester instance doctest.master. Methods of doctest.master
can be called directly too, if you want to do something unusual.
Passing report=0 to testmod is especially useful then, to delay
displaying a summary. Invoke doctest.master.summarize(verbose)
when you're done fiddling.
"""
global master
if package and not module_relative:
raise ValueError("Package may only be specified for module-"
"relative paths.")
# Relativize the path
if module_relative:
package = _normalize_module(package)
filename = _module_relative_path(package, filename)
# If no name was given, then use the file's name.
if name is None:
name = os.path.basename(filename)
# Assemble the globals.
if globs is None:
globs = {}
else:
globs = globs.copy()
if extraglobs is not None:
globs.update(extraglobs)
if raise_on_error:
runner = DebugRunner(verbose=verbose, optionflags=optionflags)
else:
runner = DocTestRunner(verbose=verbose, optionflags=optionflags)
# Read the file, convert it to a test, and run it.
s = open(filename).read()
test = parser.get_doctest(s, globs, name, filename, 0)
runner.run(test)
if report:
runner.summarize()
if master is None:
master = runner
else:
master.merge(runner)
return runner.failures, runner.tries
def run_docstring_examples(f, globs, verbose=False, name="NoName",
compileflags=None, optionflags=0):
"""
Test examples in the given object's docstring (`f`), using `globs`
as globals. Optional argument `name` is used in failure messages.
If the optional argument `verbose` is true, then generate output
even if there are no failures.
`compileflags` gives the set of flags that should be used by the
Python compiler when running the examples. If not specified, then
it will default to the set of future-import flags that apply to
`globs`.
Optional keyword arg `optionflags` specifies options for the
testing and output. See the documentation for `testmod` for more
information.
"""
# Find, parse, and run all tests in the given module.
finder = DocTestFinder(verbose=verbose, recurse=False)
runner = DocTestRunner(verbose=verbose, optionflags=optionflags)
for test in finder.find(f, name, globs=globs):
runner.run(test, compileflags=compileflags)
######################################################################
## 7. Tester
######################################################################
# This is provided only for backwards compatibility. It's not
# actually used in any way.
class Tester:
def __init__(self, mod=None, globs=None, verbose=None,
isprivate=None, optionflags=0):
warnings.warn("class Tester is deprecated; "
"use class doctest.DocTestRunner instead",
DeprecationWarning, stacklevel=2)
if mod is None and globs is None:
raise TypeError("Tester.__init__: must specify mod or globs")
if mod is not None and not inspect.ismodule(mod):
raise TypeError("Tester.__init__: mod must be a module; %r" %
(mod,))
if globs is None:
globs = mod.__dict__
self.globs = globs
self.verbose = verbose
self.isprivate = isprivate
self.optionflags = optionflags
self.testfinder = DocTestFinder(_namefilter=isprivate)
self.testrunner = DocTestRunner(verbose=verbose,
optionflags=optionflags)
def runstring(self, s, name):
test = DocTestParser().get_doctest(s, self.globs, name, None, None)
if self.verbose:
print "Running string", name
(f,t) = self.testrunner.run(test)
if self.verbose:
print f, "of", t, "examples failed in string", name
return (f,t)
def rundoc(self, object, name=None, module=None):
f = t = 0
tests = self.testfinder.find(object, name, module=module,
globs=self.globs)
for test in tests:
(f2, t2) = self.testrunner.run(test)
(f,t) = (f+f2, t+t2)
return (f,t)
def rundict(self, d, name, module=None):
import new
m = new.module(name)
m.__dict__.update(d)
if module is None:
module = False
return self.rundoc(m, name, module)
def run__test__(self, d, name):
import new
m = new.module(name)
m.__test__ = d
return self.rundoc(m, name)
def summarize(self, verbose=None):
return self.testrunner.summarize(verbose)
def merge(self, other):
self.testrunner.merge(other.testrunner)
######################################################################
## 8. Unittest Support
######################################################################
_unittest_reportflags = 0
def set_unittest_reportflags(flags):
"""Sets the unittest option flags.
The old flag is returned so that a runner could restore the old
value if it wished to:
>>> old = _unittest_reportflags
>>> set_unittest_reportflags(REPORT_NDIFF |
... REPORT_ONLY_FIRST_FAILURE) == old
True
>>> import doctest
>>> doctest._unittest_reportflags == (REPORT_NDIFF |
... REPORT_ONLY_FIRST_FAILURE)
True
Only reporting flags can be set:
>>> set_unittest_reportflags(ELLIPSIS)
Traceback (most recent call last):
...
ValueError: ('Only reporting flags allowed', 8)
>>> set_unittest_reportflags(old) == (REPORT_NDIFF |
... REPORT_ONLY_FIRST_FAILURE)
True
"""
global _unittest_reportflags
if (flags & REPORTING_FLAGS) != flags:
raise ValueError("Only reporting flags allowed", flags)
old = _unittest_reportflags
_unittest_reportflags = flags
return old
class DocTestCase(unittest.TestCase):
def __init__(self, test, optionflags=0, setUp=None, tearDown=None,
checker=None):
unittest.TestCase.__init__(self)
self._dt_optionflags = optionflags
self._dt_checker = checker
self._dt_test = test
self._dt_setUp = setUp
self._dt_tearDown = tearDown
def setUp(self):
test = self._dt_test
if self._dt_setUp is not None:
self._dt_setUp(test)
def tearDown(self):
test = self._dt_test
if self._dt_tearDown is not None:
self._dt_tearDown(test)
test.globs.clear()
def runTest(self):
test = self._dt_test
old = sys.stdout
new = StringIO()
optionflags = self._dt_optionflags
if not (optionflags & REPORTING_FLAGS):
# The option flags don't include any reporting flags,
# so add the default reporting flags
optionflags |= _unittest_reportflags
runner = DocTestRunner(optionflags=optionflags,
checker=self._dt_checker, verbose=False)
try:
runner.DIVIDER = "-"*70
failures, tries = runner.run(
test, out=new.write, clear_globs=False)
finally:
sys.stdout = old
if failures:
raise self.failureException(self.format_failure(new.getvalue()))
def format_failure(self, err):
test = self._dt_test
if test.lineno is None:
lineno = 'unknown line number'
else:
lineno = '%s' % test.lineno
lname = '.'.join(test.name.split('.')[-1:])
return ('Failed doctest test for %s\n'
' File "%s", line %s, in %s\n\n%s'
% (test.name, test.filename, lineno, lname, err)
)
def debug(self):
r"""Run the test case without results and without catching exceptions
The unit test framework includes a debug method on test cases
and test suites to support post-mortem debugging. The test code
is run in such a way that errors are not caught. This way a
caller can catch the errors and initiate post-mortem debugging.
The DocTestCase provides a debug method that raises
UnexpectedException errors if there is an unexepcted
exception:
>>> test = DocTestParser().get_doctest('>>> raise KeyError\n42',
... {}, 'foo', 'foo.py', 0)
>>> case = DocTestCase(test)
>>> try:
... case.debug()
... except UnexpectedException, failure:
... pass
The UnexpectedException contains the test, the example, and
the original exception:
>>> failure.test is test
True
>>> failure.example.want
'42\n'
>>> exc_info = failure.exc_info
>>> raise exc_info[0], exc_info[1], exc_info[2]
Traceback (most recent call last):
...
KeyError
If the output doesn't match, then a DocTestFailure is raised:
>>> test = DocTestParser().get_doctest('''
... >>> x = 1
... >>> x
... 2
... ''', {}, 'foo', 'foo.py', 0)
>>> case = DocTestCase(test)
>>> try:
... case.debug()
... except DocTestFailure, failure:
... pass
DocTestFailure objects provide access to the test:
>>> failure.test is test
True
As well as to the example:
>>> failure.example.want
'2\n'
and the actual output:
>>> failure.got
'1\n'
"""
self.setUp()
runner = DebugRunner(optionflags=self._dt_optionflags,
checker=self._dt_checker, verbose=False)
runner.run(self._dt_test)
self.tearDown()
def id(self):
return self._dt_test.name
def __repr__(self):
name = self._dt_test.name.split('.')
return "%s (%s)" % (name[-1], '.'.join(name[:-1]))
__str__ = __repr__
def shortDescription(self):
return "Doctest: " + self._dt_test.name
def DocTestSuite(module=None, globs=None, extraglobs=None, test_finder=None,
**options):
"""
Convert doctest tests for a module to a unittest test suite.
This converts each documentation string in a module that
contains doctest tests to a unittest test case. If any of the
tests in a doc string fail, then the test case fails. An exception
is raised showing the name of the file containing the test and a
(sometimes approximate) line number.
The `module` argument provides the module to be tested. The argument
can be either a module or a module name.
If no argument is given, the calling module is used.
A number of options may be provided as keyword arguments:
setUp
A set-up function. This is called before running the
tests in each file. The setUp function will be passed a DocTest
object. The setUp function can access the test globals as the
globs attribute of the test passed.
tearDown
A tear-down function. This is called after running the
tests in each file. The tearDown function will be passed a DocTest
object. The tearDown function can access the test globals as the
globs attribute of the test passed.
globs
A dictionary containing initial global variables for the tests.
optionflags
A set of doctest option flags expressed as an integer.
"""
if test_finder is None:
test_finder = DocTestFinder()
module = _normalize_module(module)
tests = test_finder.find(module, globs=globs, extraglobs=extraglobs)
if globs is None:
globs = module.__dict__
if not tests:
# Why do we want to do this? Because it reveals a bug that might
# otherwise be hidden.
raise ValueError(module, "has no tests")
tests.sort()
suite = unittest.TestSuite()
for test in tests:
if len(test.examples) == 0:
continue
if not test.filename:
filename = module.__file__
if filename[-4:] in (".pyc", ".pyo"):
filename = filename[:-1]
test.filename = filename
suite.addTest(DocTestCase(test, **options))
return suite
class DocFileCase(DocTestCase):
def id(self):
return '_'.join(self._dt_test.name.split('.'))
def __repr__(self):
return self._dt_test.filename
__str__ = __repr__
def format_failure(self, err):
return ('Failed doctest test for %s\n File "%s", line 0\n\n%s'
% (self._dt_test.name, self._dt_test.filename, err)
)
def DocFileTest(path, module_relative=True, package=None,
globs=None, parser=DocTestParser(), **options):
if globs is None:
globs = {}
if package and not module_relative:
raise ValueError("Package may only be specified for module-"
"relative paths.")
# Relativize the path.
if module_relative:
package = _normalize_module(package)
path = _module_relative_path(package, path)
# Find the file and read it.
name = os.path.basename(path)
doc = open(path).read()
# Convert it to a test, and wrap it in a DocFileCase.
test = parser.get_doctest(doc, globs, name, path, 0)
return DocFileCase(test, **options)
def DocFileSuite(*paths, **kw):
"""A unittest suite for one or more doctest files.
The path to each doctest file is given as a string; the
interpretation of that string depends on the keyword argument
"module_relative".
A number of options may be provided as keyword arguments:
module_relative
If "module_relative" is True, then the given file paths are
interpreted as os-independent module-relative paths. By
default, these paths are relative to the calling module's
directory; but if the "package" argument is specified, then
they are relative to that package. To ensure os-independence,
"filename" should use "/" characters to separate path
segments, and may not be an absolute path (i.e., it may not
begin with "/").
If "module_relative" is False, then the given file paths are
interpreted as os-specific paths. These paths may be absolute
or relative (to the current working directory).
package
A Python package or the name of a Python package whose directory
should be used as the base directory for module relative paths.
If "package" is not specified, then the calling module's
directory is used as the base directory for module relative
filenames. It is an error to specify "package" if
"module_relative" is False.
setUp
A set-up function. This is called before running the
tests in each file. The setUp function will be passed a DocTest
object. The setUp function can access the test globals as the
globs attribute of the test passed.
tearDown
A tear-down function. This is called after running the
tests in each file. The tearDown function will be passed a DocTest
object. The tearDown function can access the test globals as the
globs attribute of the test passed.
globs
A dictionary containing initial global variables for the tests.
optionflags
A set of doctest option flags expressed as an integer.
parser
A DocTestParser (or subclass) that should be used to extract
tests from the files.
"""
suite = unittest.TestSuite()
# We do this here so that _normalize_module is called at the right
# level. If it were called in DocFileTest, then this function
# would be the caller and we might guess the package incorrectly.
if kw.get('module_relative', True):
kw['package'] = _normalize_module(kw.get('package'))
for path in paths:
suite.addTest(DocFileTest(path, **kw))
return suite
######################################################################
## 9. Debugging Support
######################################################################
def script_from_examples(s):
r"""Extract script from text with examples.
Converts text with examples to a Python script. Example input is
converted to regular code. Example output and all other words
are converted to comments:
>>> text = '''
... Here are examples of simple math.
...
... Python has super accurate integer addition
...
... >>> 2 + 2
... 5
...
... And very friendly error messages:
...
... >>> 1/0
... To Infinity
... And
... Beyond
...
... You can use logic if you want:
...
... >>> if 0:
... ... blah
... ... blah
... ...
...
... Ho hum
... '''
>>> print script_from_examples(text)
# Here are examples of simple math.
#
# Python has super accurate integer addition
#
2 + 2
# Expected:
## 5
#
# And very friendly error messages:
#
1/0
# Expected:
## To Infinity
## And
## Beyond
#
# You can use logic if you want:
#
if 0:
blah
blah
#
# Ho hum
"""
output = []
for piece in DocTestParser().parse(s):
if isinstance(piece, Example):
# Add the example's source code (strip trailing NL)
output.append(piece.source[:-1])
# Add the expected output:
want = piece.want
if want:
output.append('# Expected:')
output += ['## '+l for l in want.split('\n')[:-1]]
else:
# Add non-example text.
output += [_comment_line(l)
for l in piece.split('\n')[:-1]]
# Trim junk on both ends.
while output and output[-1] == '#':
output.pop()
while output and output[0] == '#':
output.pop(0)
# Combine the output, and return it.
return '\n'.join(output)
def testsource(module, name):
"""Extract the test sources from a doctest docstring as a script.
Provide the module (or dotted name of the module) containing the
test to be debugged and the name (within the module) of the object
with the doc string with tests to be debugged.
"""
module = _normalize_module(module)
tests = DocTestFinder().find(module)
test = [t for t in tests if t.name == name]
if not test:
raise ValueError(name, "not found in tests")
test = test[0]
testsrc = script_from_examples(test.docstring)
return testsrc
def debug_src(src, pm=False, globs=None):
"""Debug a single doctest docstring, in argument `src`'"""
testsrc = script_from_examples(src)
debug_script(testsrc, pm, globs)
def debug_script(src, pm=False, globs=None):
"Debug a test script. `src` is the script, as a string."
import pdb
# Note that tempfile.NameTemporaryFile() cannot be used. As the
# docs say, a file so created cannot be opened by name a second time
# on modern Windows boxes, and execfile() needs to open it.
srcfilename = tempfile.mktemp(".py", "doctestdebug")
f = open(srcfilename, 'w')
f.write(src)
f.close()
try:
if globs:
globs = globs.copy()
else:
globs = {}
if pm:
try:
execfile(srcfilename, globs, globs)
except:
print sys.exc_info()[1]
pdb.post_mortem(sys.exc_info()[2])
else:
# Note that %r is vital here. '%s' instead can, e.g., cause
# backslashes to get treated as metacharacters on Windows.
pdb.run("execfile(%r)" % srcfilename, globs, globs)
finally:
os.remove(srcfilename)
def debug(module, name, pm=False):
"""Debug a single doctest docstring.
Provide the module (or dotted name of the module) containing the
test to be debugged and the name (within the module) of the object
with the docstring with tests to be debugged.
"""
module = _normalize_module(module)
testsrc = testsource(module, name)
debug_script(testsrc, pm, module.__dict__)
######################################################################
## 10. Example Usage
######################################################################
class _TestClass:
"""
A pointless class, for sanity-checking of docstring testing.
Methods:
square()
get()
>>> _TestClass(13).get() + _TestClass(-12).get()
1
>>> hex(_TestClass(13).square().get())
'0xa9'
"""
def __init__(self, val):
"""val -> _TestClass object with associated value val.
>>> t = _TestClass(123)
>>> print t.get()
123
"""
self.val = val
def square(self):
"""square() -> square TestClass's associated value
>>> _TestClass(13).square().get()
169
"""
self.val = self.val ** 2
return self
def get(self):
"""get() -> return TestClass's associated value.
>>> x = _TestClass(-42)
>>> print x.get()
-42
"""
return self.val
__test__ = {"_TestClass": _TestClass,
"string": r"""
Example of a string object, searched as-is.
>>> x = 1; y = 2
>>> x + y, x * y
(3, 2)
""",
"bool-int equivalence": r"""
In 2.2, boolean expressions displayed
0 or 1. By default, we still accept
them. This can be disabled by passing
DONT_ACCEPT_TRUE_FOR_1 to the new
optionflags argument.
>>> 4 == 4
1
>>> 4 == 4
True
>>> 4 > 4
0
>>> 4 > 4
False
""",
"blank lines": r"""
Blank lines can be marked with <BLANKLINE>:
>>> print 'foo\n\nbar\n'
foo
<BLANKLINE>
bar
<BLANKLINE>
""",
"ellipsis": r"""
If the ellipsis flag is used, then '...' can be used to
elide substrings in the desired output:
>>> print range(1000) #doctest: +ELLIPSIS
[0, 1, 2, ..., 999]
""",
"whitespace normalization": r"""
If the whitespace normalization flag is used, then
differences in whitespace are ignored.
>>> print range(30) #doctest: +NORMALIZE_WHITESPACE
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26,
27, 28, 29]
""",
}
def _test():
r = unittest.TextTestRunner()
r.run(DocTestSuite())
if __name__ == "__main__":
_test()
| gpl-3.0 |
kbkailashbagaria/subliminal | subliminal/converters/tvsubtitles.py | 7 | 1140 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from babelfish import LanguageReverseConverter, language_converters
class TVsubtitlesConverter(LanguageReverseConverter):
def __init__(self):
self.alpha2_converter = language_converters['alpha2']
self.from_tvsubtitles = {'br': ('por', 'BR'), 'ua': ('ukr',), 'gr': ('ell',), 'cn': ('zho',), 'jp': ('jpn',),
'cz': ('ces',)}
self.to_tvsubtitles = {v: k for k, v in self.from_tvsubtitles.items()}
self.codes = self.alpha2_converter.codes | set(self.from_tvsubtitles.keys())
def convert(self, alpha3, country=None, script=None):
if (alpha3, country) in self.to_tvsubtitles:
return self.to_tvsubtitles[(alpha3, country)]
if (alpha3,) in self.to_tvsubtitles:
return self.to_tvsubtitles[(alpha3,)]
return self.alpha2_converter.convert(alpha3, country, script)
def reverse(self, tvsubtitles):
if tvsubtitles in self.from_tvsubtitles:
return self.from_tvsubtitles[tvsubtitles]
return self.alpha2_converter.reverse(tvsubtitles)
| mit |
bdh1011/cupeye | venv/lib/python2.7/site-packages/pip/_vendor/distlib/util.py | 203 | 51453 | #
# Copyright (C) 2012-2014 The Python Software Foundation.
# See LICENSE.txt and CONTRIBUTORS.txt.
#
import codecs
from collections import deque
import contextlib
import csv
from glob import iglob as std_iglob
import io
import json
import logging
import os
import py_compile
import re
import shutil
import socket
import ssl
import subprocess
import sys
import tarfile
import tempfile
try:
import threading
except ImportError:
import dummy_threading as threading
import time
from . import DistlibException
from .compat import (string_types, text_type, shutil, raw_input, StringIO,
cache_from_source, urlopen, httplib, xmlrpclib, splittype,
HTTPHandler, HTTPSHandler as BaseHTTPSHandler,
BaseConfigurator, valid_ident, Container, configparser,
URLError, match_hostname, CertificateError, ZipFile)
logger = logging.getLogger(__name__)
#
# Requirement parsing code for name + optional constraints + optional extras
#
# e.g. 'foo >= 1.2, < 2.0 [bar, baz]'
#
# The regex can seem a bit hairy, so we build it up out of smaller pieces
# which are manageable.
#
COMMA = r'\s*,\s*'
COMMA_RE = re.compile(COMMA)
IDENT = r'(\w|[.-])+'
EXTRA_IDENT = r'(\*|:(\*|\w+):|' + IDENT + ')'
VERSPEC = IDENT + r'\*?'
RELOP = '([<>=!~]=)|[<>]'
#
# The first relop is optional - if absent, will be taken as '~='
#
BARE_CONSTRAINTS = ('(' + RELOP + r')?\s*(' + VERSPEC + ')(' + COMMA + '(' +
RELOP + r')\s*(' + VERSPEC + '))*')
DIRECT_REF = '(from\s+(?P<diref>.*))'
#
# Either the bare constraints or the bare constraints in parentheses
#
CONSTRAINTS = (r'\(\s*(?P<c1>' + BARE_CONSTRAINTS + '|' + DIRECT_REF +
r')\s*\)|(?P<c2>' + BARE_CONSTRAINTS + '\s*)')
EXTRA_LIST = EXTRA_IDENT + '(' + COMMA + EXTRA_IDENT + ')*'
EXTRAS = r'\[\s*(?P<ex>' + EXTRA_LIST + r')?\s*\]'
REQUIREMENT = ('(?P<dn>' + IDENT + r')\s*(' + EXTRAS + r'\s*)?(\s*' +
CONSTRAINTS + ')?$')
REQUIREMENT_RE = re.compile(REQUIREMENT)
#
# Used to scan through the constraints
#
RELOP_IDENT = '(?P<op>' + RELOP + r')\s*(?P<vn>' + VERSPEC + ')'
RELOP_IDENT_RE = re.compile(RELOP_IDENT)
def parse_requirement(s):
def get_constraint(m):
d = m.groupdict()
return d['op'], d['vn']
result = None
m = REQUIREMENT_RE.match(s)
if m:
d = m.groupdict()
name = d['dn']
cons = d['c1'] or d['c2']
if not d['diref']:
url = None
else:
# direct reference
cons = None
url = d['diref'].strip()
if not cons:
cons = None
constr = ''
rs = d['dn']
else:
if cons[0] not in '<>!=':
cons = '~=' + cons
iterator = RELOP_IDENT_RE.finditer(cons)
cons = [get_constraint(m) for m in iterator]
rs = '%s (%s)' % (name, ', '.join(['%s %s' % con for con in cons]))
if not d['ex']:
extras = None
else:
extras = COMMA_RE.split(d['ex'])
result = Container(name=name, constraints=cons, extras=extras,
requirement=rs, source=s, url=url)
return result
def get_resources_dests(resources_root, rules):
"""Find destinations for resources files"""
def get_rel_path(base, path):
# normalizes and returns a lstripped-/-separated path
base = base.replace(os.path.sep, '/')
path = path.replace(os.path.sep, '/')
assert path.startswith(base)
return path[len(base):].lstrip('/')
destinations = {}
for base, suffix, dest in rules:
prefix = os.path.join(resources_root, base)
for abs_base in iglob(prefix):
abs_glob = os.path.join(abs_base, suffix)
for abs_path in iglob(abs_glob):
resource_file = get_rel_path(resources_root, abs_path)
if dest is None: # remove the entry if it was here
destinations.pop(resource_file, None)
else:
rel_path = get_rel_path(abs_base, abs_path)
rel_dest = dest.replace(os.path.sep, '/').rstrip('/')
destinations[resource_file] = rel_dest + '/' + rel_path
return destinations
def in_venv():
if hasattr(sys, 'real_prefix'):
# virtualenv venvs
result = True
else:
# PEP 405 venvs
result = sys.prefix != getattr(sys, 'base_prefix', sys.prefix)
return result
def get_executable():
# The __PYVENV_LAUNCHER__ dance is apparently no longer needed, as
# changes to the stub launcher mean that sys.executable always points
# to the stub on OS X
# if sys.platform == 'darwin' and ('__PYVENV_LAUNCHER__'
# in os.environ):
# result = os.environ['__PYVENV_LAUNCHER__']
# else:
# result = sys.executable
# return result
return sys.executable
def proceed(prompt, allowed_chars, error_prompt=None, default=None):
p = prompt
while True:
s = raw_input(p)
p = prompt
if not s and default:
s = default
if s:
c = s[0].lower()
if c in allowed_chars:
break
if error_prompt:
p = '%c: %s\n%s' % (c, error_prompt, prompt)
return c
def extract_by_key(d, keys):
if isinstance(keys, string_types):
keys = keys.split()
result = {}
for key in keys:
if key in d:
result[key] = d[key]
return result
def read_exports(stream):
if sys.version_info[0] >= 3:
# needs to be a text stream
stream = codecs.getreader('utf-8')(stream)
# Try to load as JSON, falling back on legacy format
data = stream.read()
stream = StringIO(data)
try:
data = json.load(stream)
result = data['extensions']['python.exports']['exports']
for group, entries in result.items():
for k, v in entries.items():
s = '%s = %s' % (k, v)
entry = get_export_entry(s)
assert entry is not None
entries[k] = entry
return result
except Exception:
stream.seek(0, 0)
cp = configparser.ConfigParser()
if hasattr(cp, 'read_file'):
cp.read_file(stream)
else:
cp.readfp(stream)
result = {}
for key in cp.sections():
result[key] = entries = {}
for name, value in cp.items(key):
s = '%s = %s' % (name, value)
entry = get_export_entry(s)
assert entry is not None
#entry.dist = self
entries[name] = entry
return result
def write_exports(exports, stream):
if sys.version_info[0] >= 3:
# needs to be a text stream
stream = codecs.getwriter('utf-8')(stream)
cp = configparser.ConfigParser()
for k, v in exports.items():
# TODO check k, v for valid values
cp.add_section(k)
for entry in v.values():
if entry.suffix is None:
s = entry.prefix
else:
s = '%s:%s' % (entry.prefix, entry.suffix)
if entry.flags:
s = '%s [%s]' % (s, ', '.join(entry.flags))
cp.set(k, entry.name, s)
cp.write(stream)
@contextlib.contextmanager
def tempdir():
td = tempfile.mkdtemp()
try:
yield td
finally:
shutil.rmtree(td)
@contextlib.contextmanager
def chdir(d):
cwd = os.getcwd()
try:
os.chdir(d)
yield
finally:
os.chdir(cwd)
@contextlib.contextmanager
def socket_timeout(seconds=15):
cto = socket.getdefaulttimeout()
try:
socket.setdefaulttimeout(seconds)
yield
finally:
socket.setdefaulttimeout(cto)
class cached_property(object):
def __init__(self, func):
self.func = func
#for attr in ('__name__', '__module__', '__doc__'):
# setattr(self, attr, getattr(func, attr, None))
def __get__(self, obj, cls=None):
if obj is None:
return self
value = self.func(obj)
object.__setattr__(obj, self.func.__name__, value)
#obj.__dict__[self.func.__name__] = value = self.func(obj)
return value
def convert_path(pathname):
"""Return 'pathname' as a name that will work on the native filesystem.
The path is split on '/' and put back together again using the current
directory separator. Needed because filenames in the setup script are
always supplied in Unix style, and have to be converted to the local
convention before we can actually use them in the filesystem. Raises
ValueError on non-Unix-ish systems if 'pathname' either starts or
ends with a slash.
"""
if os.sep == '/':
return pathname
if not pathname:
return pathname
if pathname[0] == '/':
raise ValueError("path '%s' cannot be absolute" % pathname)
if pathname[-1] == '/':
raise ValueError("path '%s' cannot end with '/'" % pathname)
paths = pathname.split('/')
while os.curdir in paths:
paths.remove(os.curdir)
if not paths:
return os.curdir
return os.path.join(*paths)
class FileOperator(object):
def __init__(self, dry_run=False):
self.dry_run = dry_run
self.ensured = set()
self._init_record()
def _init_record(self):
self.record = False
self.files_written = set()
self.dirs_created = set()
def record_as_written(self, path):
if self.record:
self.files_written.add(path)
def newer(self, source, target):
"""Tell if the target is newer than the source.
Returns true if 'source' exists and is more recently modified than
'target', or if 'source' exists and 'target' doesn't.
Returns false if both exist and 'target' is the same age or younger
than 'source'. Raise PackagingFileError if 'source' does not exist.
Note that this test is not very accurate: files created in the same
second will have the same "age".
"""
if not os.path.exists(source):
raise DistlibException("file '%r' does not exist" %
os.path.abspath(source))
if not os.path.exists(target):
return True
return os.stat(source).st_mtime > os.stat(target).st_mtime
def copy_file(self, infile, outfile, check=True):
"""Copy a file respecting dry-run and force flags.
"""
self.ensure_dir(os.path.dirname(outfile))
logger.info('Copying %s to %s', infile, outfile)
if not self.dry_run:
msg = None
if check:
if os.path.islink(outfile):
msg = '%s is a symlink' % outfile
elif os.path.exists(outfile) and not os.path.isfile(outfile):
msg = '%s is a non-regular file' % outfile
if msg:
raise ValueError(msg + ' which would be overwritten')
shutil.copyfile(infile, outfile)
self.record_as_written(outfile)
def copy_stream(self, instream, outfile, encoding=None):
assert not os.path.isdir(outfile)
self.ensure_dir(os.path.dirname(outfile))
logger.info('Copying stream %s to %s', instream, outfile)
if not self.dry_run:
if encoding is None:
outstream = open(outfile, 'wb')
else:
outstream = codecs.open(outfile, 'w', encoding=encoding)
try:
shutil.copyfileobj(instream, outstream)
finally:
outstream.close()
self.record_as_written(outfile)
def write_binary_file(self, path, data):
self.ensure_dir(os.path.dirname(path))
if not self.dry_run:
with open(path, 'wb') as f:
f.write(data)
self.record_as_written(path)
def write_text_file(self, path, data, encoding):
self.ensure_dir(os.path.dirname(path))
if not self.dry_run:
with open(path, 'wb') as f:
f.write(data.encode(encoding))
self.record_as_written(path)
def set_mode(self, bits, mask, files):
if os.name == 'posix':
# Set the executable bits (owner, group, and world) on
# all the files specified.
for f in files:
if self.dry_run:
logger.info("changing mode of %s", f)
else:
mode = (os.stat(f).st_mode | bits) & mask
logger.info("changing mode of %s to %o", f, mode)
os.chmod(f, mode)
set_executable_mode = lambda s, f: s.set_mode(0o555, 0o7777, f)
def ensure_dir(self, path):
path = os.path.abspath(path)
if path not in self.ensured and not os.path.exists(path):
self.ensured.add(path)
d, f = os.path.split(path)
self.ensure_dir(d)
logger.info('Creating %s' % path)
if not self.dry_run:
os.mkdir(path)
if self.record:
self.dirs_created.add(path)
def byte_compile(self, path, optimize=False, force=False, prefix=None):
dpath = cache_from_source(path, not optimize)
logger.info('Byte-compiling %s to %s', path, dpath)
if not self.dry_run:
if force or self.newer(path, dpath):
if not prefix:
diagpath = None
else:
assert path.startswith(prefix)
diagpath = path[len(prefix):]
py_compile.compile(path, dpath, diagpath, True) # raise error
self.record_as_written(dpath)
return dpath
def ensure_removed(self, path):
if os.path.exists(path):
if os.path.isdir(path) and not os.path.islink(path):
logger.debug('Removing directory tree at %s', path)
if not self.dry_run:
shutil.rmtree(path)
if self.record:
if path in self.dirs_created:
self.dirs_created.remove(path)
else:
if os.path.islink(path):
s = 'link'
else:
s = 'file'
logger.debug('Removing %s %s', s, path)
if not self.dry_run:
os.remove(path)
if self.record:
if path in self.files_written:
self.files_written.remove(path)
def is_writable(self, path):
result = False
while not result:
if os.path.exists(path):
result = os.access(path, os.W_OK)
break
parent = os.path.dirname(path)
if parent == path:
break
path = parent
return result
def commit(self):
"""
Commit recorded changes, turn off recording, return
changes.
"""
assert self.record
result = self.files_written, self.dirs_created
self._init_record()
return result
def rollback(self):
if not self.dry_run:
for f in list(self.files_written):
if os.path.exists(f):
os.remove(f)
# dirs should all be empty now, except perhaps for
# __pycache__ subdirs
# reverse so that subdirs appear before their parents
dirs = sorted(self.dirs_created, reverse=True)
for d in dirs:
flist = os.listdir(d)
if flist:
assert flist == ['__pycache__']
sd = os.path.join(d, flist[0])
os.rmdir(sd)
os.rmdir(d) # should fail if non-empty
self._init_record()
def resolve(module_name, dotted_path):
if module_name in sys.modules:
mod = sys.modules[module_name]
else:
mod = __import__(module_name)
if dotted_path is None:
result = mod
else:
parts = dotted_path.split('.')
result = getattr(mod, parts.pop(0))
for p in parts:
result = getattr(result, p)
return result
class ExportEntry(object):
def __init__(self, name, prefix, suffix, flags):
self.name = name
self.prefix = prefix
self.suffix = suffix
self.flags = flags
@cached_property
def value(self):
return resolve(self.prefix, self.suffix)
def __repr__(self):
return '<ExportEntry %s = %s:%s %s>' % (self.name, self.prefix,
self.suffix, self.flags)
def __eq__(self, other):
if not isinstance(other, ExportEntry):
result = False
else:
result = (self.name == other.name and
self.prefix == other.prefix and
self.suffix == other.suffix and
self.flags == other.flags)
return result
__hash__ = object.__hash__
ENTRY_RE = re.compile(r'''(?P<name>(\w|[-.])+)
\s*=\s*(?P<callable>(\w+)([:\.]\w+)*)
\s*(\[\s*(?P<flags>\w+(=\w+)?(,\s*\w+(=\w+)?)*)\s*\])?
''', re.VERBOSE)
def get_export_entry(specification):
m = ENTRY_RE.search(specification)
if not m:
result = None
if '[' in specification or ']' in specification:
raise DistlibException('Invalid specification '
'%r' % specification)
else:
d = m.groupdict()
name = d['name']
path = d['callable']
colons = path.count(':')
if colons == 0:
prefix, suffix = path, None
else:
if colons != 1:
raise DistlibException('Invalid specification '
'%r' % specification)
prefix, suffix = path.split(':')
flags = d['flags']
if flags is None:
if '[' in specification or ']' in specification:
raise DistlibException('Invalid specification '
'%r' % specification)
flags = []
else:
flags = [f.strip() for f in flags.split(',')]
result = ExportEntry(name, prefix, suffix, flags)
return result
def get_cache_base(suffix=None):
"""
Return the default base location for distlib caches. If the directory does
not exist, it is created. Use the suffix provided for the base directory,
and default to '.distlib' if it isn't provided.
On Windows, if LOCALAPPDATA is defined in the environment, then it is
assumed to be a directory, and will be the parent directory of the result.
On POSIX, and on Windows if LOCALAPPDATA is not defined, the user's home
directory - using os.expanduser('~') - will be the parent directory of
the result.
The result is just the directory '.distlib' in the parent directory as
determined above, or with the name specified with ``suffix``.
"""
if suffix is None:
suffix = '.distlib'
if os.name == 'nt' and 'LOCALAPPDATA' in os.environ:
result = os.path.expandvars('$localappdata')
else:
# Assume posix, or old Windows
result = os.path.expanduser('~')
# we use 'isdir' instead of 'exists', because we want to
# fail if there's a file with that name
if os.path.isdir(result):
usable = os.access(result, os.W_OK)
if not usable:
logger.warning('Directory exists but is not writable: %s', result)
else:
try:
os.makedirs(result)
usable = True
except OSError:
logger.warning('Unable to create %s', result, exc_info=True)
usable = False
if not usable:
result = tempfile.mkdtemp()
logger.warning('Default location unusable, using %s', result)
return os.path.join(result, suffix)
def path_to_cache_dir(path):
"""
Convert an absolute path to a directory name for use in a cache.
The algorithm used is:
#. On Windows, any ``':'`` in the drive is replaced with ``'---'``.
#. Any occurrence of ``os.sep`` is replaced with ``'--'``.
#. ``'.cache'`` is appended.
"""
d, p = os.path.splitdrive(os.path.abspath(path))
if d:
d = d.replace(':', '---')
p = p.replace(os.sep, '--')
return d + p + '.cache'
def ensure_slash(s):
if not s.endswith('/'):
return s + '/'
return s
def parse_credentials(netloc):
username = password = None
if '@' in netloc:
prefix, netloc = netloc.split('@', 1)
if ':' not in prefix:
username = prefix
else:
username, password = prefix.split(':', 1)
return username, password, netloc
def get_process_umask():
result = os.umask(0o22)
os.umask(result)
return result
def is_string_sequence(seq):
result = True
i = None
for i, s in enumerate(seq):
if not isinstance(s, string_types):
result = False
break
assert i is not None
return result
PROJECT_NAME_AND_VERSION = re.compile('([a-z0-9_]+([.-][a-z_][a-z0-9_]*)*)-'
'([a-z0-9_.+-]+)', re.I)
PYTHON_VERSION = re.compile(r'-py(\d\.?\d?)')
def split_filename(filename, project_name=None):
"""
Extract name, version, python version from a filename (no extension)
Return name, version, pyver or None
"""
result = None
pyver = None
m = PYTHON_VERSION.search(filename)
if m:
pyver = m.group(1)
filename = filename[:m.start()]
if project_name and len(filename) > len(project_name) + 1:
m = re.match(re.escape(project_name) + r'\b', filename)
if m:
n = m.end()
result = filename[:n], filename[n + 1:], pyver
if result is None:
m = PROJECT_NAME_AND_VERSION.match(filename)
if m:
result = m.group(1), m.group(3), pyver
return result
# Allow spaces in name because of legacy dists like "Twisted Core"
NAME_VERSION_RE = re.compile(r'(?P<name>[\w .-]+)\s*'
r'\(\s*(?P<ver>[^\s)]+)\)$')
def parse_name_and_version(p):
"""
A utility method used to get name and version from a string.
From e.g. a Provides-Dist value.
:param p: A value in a form 'foo (1.0)'
:return: The name and version as a tuple.
"""
m = NAME_VERSION_RE.match(p)
if not m:
raise DistlibException('Ill-formed name/version string: \'%s\'' % p)
d = m.groupdict()
return d['name'].strip().lower(), d['ver']
def get_extras(requested, available):
result = set()
requested = set(requested or [])
available = set(available or [])
if '*' in requested:
requested.remove('*')
result |= available
for r in requested:
if r == '-':
result.add(r)
elif r.startswith('-'):
unwanted = r[1:]
if unwanted not in available:
logger.warning('undeclared extra: %s' % unwanted)
if unwanted in result:
result.remove(unwanted)
else:
if r not in available:
logger.warning('undeclared extra: %s' % r)
result.add(r)
return result
#
# Extended metadata functionality
#
def _get_external_data(url):
result = {}
try:
# urlopen might fail if it runs into redirections,
# because of Python issue #13696. Fixed in locators
# using a custom redirect handler.
resp = urlopen(url)
headers = resp.info()
if headers.get('Content-Type') != 'application/json':
logger.debug('Unexpected response for JSON request')
else:
reader = codecs.getreader('utf-8')(resp)
#data = reader.read().decode('utf-8')
#result = json.loads(data)
result = json.load(reader)
except Exception as e:
logger.exception('Failed to get external data for %s: %s', url, e)
return result
def get_project_data(name):
url = ('https://www.red-dove.com/pypi/projects/'
'%s/%s/project.json' % (name[0].upper(), name))
result = _get_external_data(url)
return result
def get_package_data(name, version):
url = ('https://www.red-dove.com/pypi/projects/'
'%s/%s/package-%s.json' % (name[0].upper(), name, version))
return _get_external_data(url)
class Cache(object):
"""
A class implementing a cache for resources that need to live in the file system
e.g. shared libraries. This class was moved from resources to here because it
could be used by other modules, e.g. the wheel module.
"""
def __init__(self, base):
"""
Initialise an instance.
:param base: The base directory where the cache should be located.
"""
# we use 'isdir' instead of 'exists', because we want to
# fail if there's a file with that name
if not os.path.isdir(base):
os.makedirs(base)
if (os.stat(base).st_mode & 0o77) != 0:
logger.warning('Directory \'%s\' is not private', base)
self.base = os.path.abspath(os.path.normpath(base))
def prefix_to_dir(self, prefix):
"""
Converts a resource prefix to a directory name in the cache.
"""
return path_to_cache_dir(prefix)
def clear(self):
"""
Clear the cache.
"""
not_removed = []
for fn in os.listdir(self.base):
fn = os.path.join(self.base, fn)
try:
if os.path.islink(fn) or os.path.isfile(fn):
os.remove(fn)
elif os.path.isdir(fn):
shutil.rmtree(fn)
except Exception:
not_removed.append(fn)
return not_removed
class EventMixin(object):
"""
A very simple publish/subscribe system.
"""
def __init__(self):
self._subscribers = {}
def add(self, event, subscriber, append=True):
"""
Add a subscriber for an event.
:param event: The name of an event.
:param subscriber: The subscriber to be added (and called when the
event is published).
:param append: Whether to append or prepend the subscriber to an
existing subscriber list for the event.
"""
subs = self._subscribers
if event not in subs:
subs[event] = deque([subscriber])
else:
sq = subs[event]
if append:
sq.append(subscriber)
else:
sq.appendleft(subscriber)
def remove(self, event, subscriber):
"""
Remove a subscriber for an event.
:param event: The name of an event.
:param subscriber: The subscriber to be removed.
"""
subs = self._subscribers
if event not in subs:
raise ValueError('No subscribers: %r' % event)
subs[event].remove(subscriber)
def get_subscribers(self, event):
"""
Return an iterator for the subscribers for an event.
:param event: The event to return subscribers for.
"""
return iter(self._subscribers.get(event, ()))
def publish(self, event, *args, **kwargs):
"""
Publish a event and return a list of values returned by its
subscribers.
:param event: The event to publish.
:param args: The positional arguments to pass to the event's
subscribers.
:param kwargs: The keyword arguments to pass to the event's
subscribers.
"""
result = []
for subscriber in self.get_subscribers(event):
try:
value = subscriber(event, *args, **kwargs)
except Exception:
logger.exception('Exception during event publication')
value = None
result.append(value)
logger.debug('publish %s: args = %s, kwargs = %s, result = %s',
event, args, kwargs, result)
return result
#
# Simple sequencing
#
class Sequencer(object):
def __init__(self):
self._preds = {}
self._succs = {}
self._nodes = set() # nodes with no preds/succs
def add_node(self, node):
self._nodes.add(node)
def remove_node(self, node, edges=False):
if node in self._nodes:
self._nodes.remove(node)
if edges:
for p in set(self._preds.get(node, ())):
self.remove(p, node)
for s in set(self._succs.get(node, ())):
self.remove(node, s)
# Remove empties
for k, v in list(self._preds.items()):
if not v:
del self._preds[k]
for k, v in list(self._succs.items()):
if not v:
del self._succs[k]
def add(self, pred, succ):
assert pred != succ
self._preds.setdefault(succ, set()).add(pred)
self._succs.setdefault(pred, set()).add(succ)
def remove(self, pred, succ):
assert pred != succ
try:
preds = self._preds[succ]
succs = self._succs[pred]
except KeyError:
raise ValueError('%r not a successor of anything' % succ)
try:
preds.remove(pred)
succs.remove(succ)
except KeyError:
raise ValueError('%r not a successor of %r' % (succ, pred))
def is_step(self, step):
return (step in self._preds or step in self._succs or
step in self._nodes)
def get_steps(self, final):
if not self.is_step(final):
raise ValueError('Unknown: %r' % final)
result = []
todo = []
seen = set()
todo.append(final)
while todo:
step = todo.pop(0)
if step in seen:
# if a step was already seen,
# move it to the end (so it will appear earlier
# when reversed on return) ... but not for the
# final step, as that would be confusing for
# users
if step != final:
result.remove(step)
result.append(step)
else:
seen.add(step)
result.append(step)
preds = self._preds.get(step, ())
todo.extend(preds)
return reversed(result)
@property
def strong_connections(self):
#http://en.wikipedia.org/wiki/Tarjan%27s_strongly_connected_components_algorithm
index_counter = [0]
stack = []
lowlinks = {}
index = {}
result = []
graph = self._succs
def strongconnect(node):
# set the depth index for this node to the smallest unused index
index[node] = index_counter[0]
lowlinks[node] = index_counter[0]
index_counter[0] += 1
stack.append(node)
# Consider successors
try:
successors = graph[node]
except Exception:
successors = []
for successor in successors:
if successor not in lowlinks:
# Successor has not yet been visited
strongconnect(successor)
lowlinks[node] = min(lowlinks[node],lowlinks[successor])
elif successor in stack:
# the successor is in the stack and hence in the current
# strongly connected component (SCC)
lowlinks[node] = min(lowlinks[node],index[successor])
# If `node` is a root node, pop the stack and generate an SCC
if lowlinks[node] == index[node]:
connected_component = []
while True:
successor = stack.pop()
connected_component.append(successor)
if successor == node: break
component = tuple(connected_component)
# storing the result
result.append(component)
for node in graph:
if node not in lowlinks:
strongconnect(node)
return result
@property
def dot(self):
result = ['digraph G {']
for succ in self._preds:
preds = self._preds[succ]
for pred in preds:
result.append(' %s -> %s;' % (pred, succ))
for node in self._nodes:
result.append(' %s;' % node)
result.append('}')
return '\n'.join(result)
#
# Unarchiving functionality for zip, tar, tgz, tbz, whl
#
ARCHIVE_EXTENSIONS = ('.tar.gz', '.tar.bz2', '.tar', '.zip',
'.tgz', '.tbz', '.whl')
def unarchive(archive_filename, dest_dir, format=None, check=True):
def check_path(path):
if not isinstance(path, text_type):
path = path.decode('utf-8')
p = os.path.abspath(os.path.join(dest_dir, path))
if not p.startswith(dest_dir) or p[plen] != os.sep:
raise ValueError('path outside destination: %r' % p)
dest_dir = os.path.abspath(dest_dir)
plen = len(dest_dir)
archive = None
if format is None:
if archive_filename.endswith(('.zip', '.whl')):
format = 'zip'
elif archive_filename.endswith(('.tar.gz', '.tgz')):
format = 'tgz'
mode = 'r:gz'
elif archive_filename.endswith(('.tar.bz2', '.tbz')):
format = 'tbz'
mode = 'r:bz2'
elif archive_filename.endswith('.tar'):
format = 'tar'
mode = 'r'
else:
raise ValueError('Unknown format for %r' % archive_filename)
try:
if format == 'zip':
archive = ZipFile(archive_filename, 'r')
if check:
names = archive.namelist()
for name in names:
check_path(name)
else:
archive = tarfile.open(archive_filename, mode)
if check:
names = archive.getnames()
for name in names:
check_path(name)
if format != 'zip' and sys.version_info[0] < 3:
# See Python issue 17153. If the dest path contains Unicode,
# tarfile extraction fails on Python 2.x if a member path name
# contains non-ASCII characters - it leads to an implicit
# bytes -> unicode conversion using ASCII to decode.
for tarinfo in archive.getmembers():
if not isinstance(tarinfo.name, text_type):
tarinfo.name = tarinfo.name.decode('utf-8')
archive.extractall(dest_dir)
finally:
if archive:
archive.close()
def zip_dir(directory):
"""zip a directory tree into a BytesIO object"""
result = io.BytesIO()
dlen = len(directory)
with ZipFile(result, "w") as zf:
for root, dirs, files in os.walk(directory):
for name in files:
full = os.path.join(root, name)
rel = root[dlen:]
dest = os.path.join(rel, name)
zf.write(full, dest)
return result
#
# Simple progress bar
#
UNITS = ('', 'K', 'M', 'G','T','P')
class Progress(object):
unknown = 'UNKNOWN'
def __init__(self, minval=0, maxval=100):
assert maxval is None or maxval >= minval
self.min = self.cur = minval
self.max = maxval
self.started = None
self.elapsed = 0
self.done = False
def update(self, curval):
assert self.min <= curval
assert self.max is None or curval <= self.max
self.cur = curval
now = time.time()
if self.started is None:
self.started = now
else:
self.elapsed = now - self.started
def increment(self, incr):
assert incr >= 0
self.update(self.cur + incr)
def start(self):
self.update(self.min)
return self
def stop(self):
if self.max is not None:
self.update(self.max)
self.done = True
@property
def maximum(self):
return self.unknown if self.max is None else self.max
@property
def percentage(self):
if self.done:
result = '100 %'
elif self.max is None:
result = ' ?? %'
else:
v = 100.0 * (self.cur - self.min) / (self.max - self.min)
result = '%3d %%' % v
return result
def format_duration(self, duration):
if (duration <= 0) and self.max is None or self.cur == self.min:
result = '??:??:??'
#elif duration < 1:
# result = '--:--:--'
else:
result = time.strftime('%H:%M:%S', time.gmtime(duration))
return result
@property
def ETA(self):
if self.done:
prefix = 'Done'
t = self.elapsed
#import pdb; pdb.set_trace()
else:
prefix = 'ETA '
if self.max is None:
t = -1
elif self.elapsed == 0 or (self.cur == self.min):
t = 0
else:
#import pdb; pdb.set_trace()
t = float(self.max - self.min)
t /= self.cur - self.min
t = (t - 1) * self.elapsed
return '%s: %s' % (prefix, self.format_duration(t))
@property
def speed(self):
if self.elapsed == 0:
result = 0.0
else:
result = (self.cur - self.min) / self.elapsed
for unit in UNITS:
if result < 1000:
break
result /= 1000.0
return '%d %sB/s' % (result, unit)
#
# Glob functionality
#
RICH_GLOB = re.compile(r'\{([^}]*)\}')
_CHECK_RECURSIVE_GLOB = re.compile(r'[^/\\,{]\*\*|\*\*[^/\\,}]')
_CHECK_MISMATCH_SET = re.compile(r'^[^{]*\}|\{[^}]*$')
def iglob(path_glob):
"""Extended globbing function that supports ** and {opt1,opt2,opt3}."""
if _CHECK_RECURSIVE_GLOB.search(path_glob):
msg = """invalid glob %r: recursive glob "**" must be used alone"""
raise ValueError(msg % path_glob)
if _CHECK_MISMATCH_SET.search(path_glob):
msg = """invalid glob %r: mismatching set marker '{' or '}'"""
raise ValueError(msg % path_glob)
return _iglob(path_glob)
def _iglob(path_glob):
rich_path_glob = RICH_GLOB.split(path_glob, 1)
if len(rich_path_glob) > 1:
assert len(rich_path_glob) == 3, rich_path_glob
prefix, set, suffix = rich_path_glob
for item in set.split(','):
for path in _iglob(''.join((prefix, item, suffix))):
yield path
else:
if '**' not in path_glob:
for item in std_iglob(path_glob):
yield item
else:
prefix, radical = path_glob.split('**', 1)
if prefix == '':
prefix = '.'
if radical == '':
radical = '*'
else:
# we support both
radical = radical.lstrip('/')
radical = radical.lstrip('\\')
for path, dir, files in os.walk(prefix):
path = os.path.normpath(path)
for fn in _iglob(os.path.join(path, radical)):
yield fn
#
# HTTPSConnection which verifies certificates/matches domains
#
class HTTPSConnection(httplib.HTTPSConnection):
ca_certs = None # set this to the path to the certs file (.pem)
check_domain = True # only used if ca_certs is not None
# noinspection PyPropertyAccess
def connect(self):
sock = socket.create_connection((self.host, self.port), self.timeout)
if getattr(self, '_tunnel_host', False):
self.sock = sock
self._tunnel()
if not hasattr(ssl, 'SSLContext'):
# For 2.x
if self.ca_certs:
cert_reqs = ssl.CERT_REQUIRED
else:
cert_reqs = ssl.CERT_NONE
self.sock = ssl.wrap_socket(sock, self.key_file, self.cert_file,
cert_reqs=cert_reqs,
ssl_version=ssl.PROTOCOL_SSLv23,
ca_certs=self.ca_certs)
else:
context = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
context.options |= ssl.OP_NO_SSLv2
if self.cert_file:
context.load_cert_chain(self.cert_file, self.key_file)
kwargs = {}
if self.ca_certs:
context.verify_mode = ssl.CERT_REQUIRED
context.load_verify_locations(cafile=self.ca_certs)
if getattr(ssl, 'HAS_SNI', False):
kwargs['server_hostname'] = self.host
self.sock = context.wrap_socket(sock, **kwargs)
if self.ca_certs and self.check_domain:
try:
match_hostname(self.sock.getpeercert(), self.host)
logger.debug('Host verified: %s', self.host)
except CertificateError:
self.sock.shutdown(socket.SHUT_RDWR)
self.sock.close()
raise
class HTTPSHandler(BaseHTTPSHandler):
def __init__(self, ca_certs, check_domain=True):
BaseHTTPSHandler.__init__(self)
self.ca_certs = ca_certs
self.check_domain = check_domain
def _conn_maker(self, *args, **kwargs):
"""
This is called to create a connection instance. Normally you'd
pass a connection class to do_open, but it doesn't actually check for
a class, and just expects a callable. As long as we behave just as a
constructor would have, we should be OK. If it ever changes so that
we *must* pass a class, we'll create an UnsafeHTTPSConnection class
which just sets check_domain to False in the class definition, and
choose which one to pass to do_open.
"""
result = HTTPSConnection(*args, **kwargs)
if self.ca_certs:
result.ca_certs = self.ca_certs
result.check_domain = self.check_domain
return result
def https_open(self, req):
try:
return self.do_open(self._conn_maker, req)
except URLError as e:
if 'certificate verify failed' in str(e.reason):
raise CertificateError('Unable to verify server certificate '
'for %s' % req.host)
else:
raise
#
# To prevent against mixing HTTP traffic with HTTPS (examples: A Man-In-The-
# Middle proxy using HTTP listens on port 443, or an index mistakenly serves
# HTML containing a http://xyz link when it should be https://xyz),
# you can use the following handler class, which does not allow HTTP traffic.
#
# It works by inheriting from HTTPHandler - so build_opener won't add a
# handler for HTTP itself.
#
class HTTPSOnlyHandler(HTTPSHandler, HTTPHandler):
def http_open(self, req):
raise URLError('Unexpected HTTP request on what should be a secure '
'connection: %s' % req)
#
# XML-RPC with timeouts
#
_ver_info = sys.version_info[:2]
if _ver_info == (2, 6):
class HTTP(httplib.HTTP):
def __init__(self, host='', port=None, **kwargs):
if port == 0: # 0 means use port 0, not the default port
port = None
self._setup(self._connection_class(host, port, **kwargs))
class HTTPS(httplib.HTTPS):
def __init__(self, host='', port=None, **kwargs):
if port == 0: # 0 means use port 0, not the default port
port = None
self._setup(self._connection_class(host, port, **kwargs))
class Transport(xmlrpclib.Transport):
def __init__(self, timeout, use_datetime=0):
self.timeout = timeout
xmlrpclib.Transport.__init__(self, use_datetime)
def make_connection(self, host):
h, eh, x509 = self.get_host_info(host)
if _ver_info == (2, 6):
result = HTTP(h, timeout=self.timeout)
else:
if not self._connection or host != self._connection[0]:
self._extra_headers = eh
self._connection = host, httplib.HTTPConnection(h)
result = self._connection[1]
return result
class SafeTransport(xmlrpclib.SafeTransport):
def __init__(self, timeout, use_datetime=0):
self.timeout = timeout
xmlrpclib.SafeTransport.__init__(self, use_datetime)
def make_connection(self, host):
h, eh, kwargs = self.get_host_info(host)
if not kwargs:
kwargs = {}
kwargs['timeout'] = self.timeout
if _ver_info == (2, 6):
result = HTTPS(host, None, **kwargs)
else:
if not self._connection or host != self._connection[0]:
self._extra_headers = eh
self._connection = host, httplib.HTTPSConnection(h, None,
**kwargs)
result = self._connection[1]
return result
class ServerProxy(xmlrpclib.ServerProxy):
def __init__(self, uri, **kwargs):
self.timeout = timeout = kwargs.pop('timeout', None)
# The above classes only come into play if a timeout
# is specified
if timeout is not None:
scheme, _ = splittype(uri)
use_datetime = kwargs.get('use_datetime', 0)
if scheme == 'https':
tcls = SafeTransport
else:
tcls = Transport
kwargs['transport'] = t = tcls(timeout, use_datetime=use_datetime)
self.transport = t
xmlrpclib.ServerProxy.__init__(self, uri, **kwargs)
#
# CSV functionality. This is provided because on 2.x, the csv module can't
# handle Unicode. However, we need to deal with Unicode in e.g. RECORD files.
#
def _csv_open(fn, mode, **kwargs):
if sys.version_info[0] < 3:
mode += 'b'
else:
kwargs['newline'] = ''
return open(fn, mode, **kwargs)
class CSVBase(object):
defaults = {
'delimiter': str(','), # The strs are used because we need native
'quotechar': str('"'), # str in the csv API (2.x won't take
'lineterminator': str('\n') # Unicode)
}
def __enter__(self):
return self
def __exit__(self, *exc_info):
self.stream.close()
class CSVReader(CSVBase):
def __init__(self, **kwargs):
if 'stream' in kwargs:
stream = kwargs['stream']
if sys.version_info[0] >= 3:
# needs to be a text stream
stream = codecs.getreader('utf-8')(stream)
self.stream = stream
else:
self.stream = _csv_open(kwargs['path'], 'r')
self.reader = csv.reader(self.stream, **self.defaults)
def __iter__(self):
return self
def next(self):
result = next(self.reader)
if sys.version_info[0] < 3:
for i, item in enumerate(result):
if not isinstance(item, text_type):
result[i] = item.decode('utf-8')
return result
__next__ = next
class CSVWriter(CSVBase):
def __init__(self, fn, **kwargs):
self.stream = _csv_open(fn, 'w')
self.writer = csv.writer(self.stream, **self.defaults)
def writerow(self, row):
if sys.version_info[0] < 3:
r = []
for item in row:
if isinstance(item, text_type):
item = item.encode('utf-8')
r.append(item)
row = r
self.writer.writerow(row)
#
# Configurator functionality
#
class Configurator(BaseConfigurator):
value_converters = dict(BaseConfigurator.value_converters)
value_converters['inc'] = 'inc_convert'
def __init__(self, config, base=None):
super(Configurator, self).__init__(config)
self.base = base or os.getcwd()
def configure_custom(self, config):
def convert(o):
if isinstance(o, (list, tuple)):
result = type(o)([convert(i) for i in o])
elif isinstance(o, dict):
if '()' in o:
result = self.configure_custom(o)
else:
result = {}
for k in o:
result[k] = convert(o[k])
else:
result = self.convert(o)
return result
c = config.pop('()')
if not callable(c):
c = self.resolve(c)
props = config.pop('.', None)
# Check for valid identifiers
args = config.pop('[]', ())
if args:
args = tuple([convert(o) for o in args])
items = [(k, convert(config[k])) for k in config if valid_ident(k)]
kwargs = dict(items)
result = c(*args, **kwargs)
if props:
for n, v in props.items():
setattr(result, n, convert(v))
return result
def __getitem__(self, key):
result = self.config[key]
if isinstance(result, dict) and '()' in result:
self.config[key] = result = self.configure_custom(result)
return result
def inc_convert(self, value):
"""Default converter for the inc:// protocol."""
if not os.path.isabs(value):
value = os.path.join(self.base, value)
with codecs.open(value, 'r', encoding='utf-8') as f:
result = json.load(f)
return result
#
# Mixin for running subprocesses and capturing their output
#
class SubprocessMixin(object):
def __init__(self, verbose=False, progress=None):
self.verbose = verbose
self.progress = progress
def reader(self, stream, context):
"""
Read lines from a subprocess' output stream and either pass to a progress
callable (if specified) or write progress information to sys.stderr.
"""
progress = self.progress
verbose = self.verbose
while True:
s = stream.readline()
if not s:
break
if progress is not None:
progress(s, context)
else:
if not verbose:
sys.stderr.write('.')
else:
sys.stderr.write(s.decode('utf-8'))
sys.stderr.flush()
stream.close()
def run_command(self, cmd, **kwargs):
p = subprocess.Popen(cmd, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, **kwargs)
t1 = threading.Thread(target=self.reader, args=(p.stdout, 'stdout'))
t1.start()
t2 = threading.Thread(target=self.reader, args=(p.stderr, 'stderr'))
t2.start()
p.wait()
t1.join()
t2.join()
if self.progress is not None:
self.progress('done.', 'main')
elif self.verbose:
sys.stderr.write('done.\n')
return p
| bsd-3-clause |
Kiddinglife/geconet | thirdparty/googlemock/scripts/generator/cpp/gmock_class_test.py | 3 | 11804 | #!/usr/bin/env python
#
# Copyright 2009 Neal Norwitz All Rights Reserved.
# Portions Copyright 2009 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for gmock.scripts.generator.cpp.gmock_class."""
__author__ = '[email protected] (Neal Norwitz)'
import os
import sys
import unittest
# Allow the cpp imports below to work when run as a standalone script.
sys.path.append(os.path.join(os.path.dirname(__file__), '..'))
from cpp import ast
from cpp import gmock_class
class TestCase(unittest.TestCase):
"""Helper class that adds assert methods."""
def StripLeadingWhitespace(self, lines):
"""Strip leading whitespace in each line in 'lines'."""
return '\n'.join([s.lstrip() for s in lines.split('\n')])
def assertEqualIgnoreLeadingWhitespace(self, expected_lines, lines):
"""Specialized assert that ignores the indent level."""
self.assertEqual(expected_lines, self.StripLeadingWhitespace(lines))
class GenerateMethodsTest(TestCase):
def GenerateMethodSource(self, cpp_source):
"""Convert C++ source to Google Mock output source lines."""
method_source_lines = []
# <test> is a pseudo-filename, it is not read or written.
builder = ast.BuilderFromSource(cpp_source, '<test>')
ast_list = list(builder.Generate())
gmock_class._GenerateMethods(method_source_lines, cpp_source, ast_list[0])
return '\n'.join(method_source_lines)
def testSimpleMethod(self):
source = """
class Foo {
public:
virtual int Bar();
};
"""
self.assertEqualIgnoreLeadingWhitespace(
'MOCK_METHOD0(Bar,\nint());',
self.GenerateMethodSource(source))
def testSimpleConstructorsAndDestructor(self):
source = """
class Foo {
public:
Foo();
Foo(int x);
Foo(const Foo& f);
Foo(Foo&& f);
~Foo();
virtual int Bar() = 0;
};
"""
# The constructors and destructor should be ignored.
self.assertEqualIgnoreLeadingWhitespace(
'MOCK_METHOD0(Bar,\nint());',
self.GenerateMethodSource(source))
def testVirtualDestructor(self):
source = """
class Foo {
public:
virtual ~Foo();
virtual int Bar() = 0;
};
"""
# The destructor should be ignored.
self.assertEqualIgnoreLeadingWhitespace(
'MOCK_METHOD0(Bar,\nint());',
self.GenerateMethodSource(source))
def testExplicitlyDefaultedConstructorsAndDestructor(self):
source = """
class Foo {
public:
Foo() = default;
Foo(const Foo& f) = default;
Foo(Foo&& f) = default;
~Foo() = default;
virtual int Bar() = 0;
};
"""
# The constructors and destructor should be ignored.
self.assertEqualIgnoreLeadingWhitespace(
'MOCK_METHOD0(Bar,\nint());',
self.GenerateMethodSource(source))
def testExplicitlyDeletedConstructorsAndDestructor(self):
source = """
class Foo {
public:
Foo() = delete;
Foo(const Foo& f) = delete;
Foo(Foo&& f) = delete;
~Foo() = delete;
virtual int Bar() = 0;
};
"""
# The constructors and destructor should be ignored.
self.assertEqualIgnoreLeadingWhitespace(
'MOCK_METHOD0(Bar,\nint());',
self.GenerateMethodSource(source))
def testSimpleOverrideMethod(self):
source = """
class Foo {
public:
int Bar() override;
};
"""
self.assertEqualIgnoreLeadingWhitespace(
'MOCK_METHOD0(Bar,\nint());',
self.GenerateMethodSource(source))
def testSimpleConstMethod(self):
source = """
class Foo {
public:
virtual void Bar(bool flag) const;
};
"""
self.assertEqualIgnoreLeadingWhitespace(
'MOCK_CONST_METHOD1(Bar,\nvoid(bool flag));',
self.GenerateMethodSource(source))
def testExplicitVoid(self):
source = """
class Foo {
public:
virtual int Bar(void);
};
"""
self.assertEqualIgnoreLeadingWhitespace(
'MOCK_METHOD0(Bar,\nint(void));',
self.GenerateMethodSource(source))
def testStrangeNewlineInParameter(self):
source = """
class Foo {
public:
virtual void Bar(int
a) = 0;
};
"""
self.assertEqualIgnoreLeadingWhitespace(
'MOCK_METHOD1(Bar,\nvoid(int a));',
self.GenerateMethodSource(source))
def testDefaultParameters(self):
source = """
class Foo {
public:
virtual void Bar(int a, char c = 'x') = 0;
};
"""
self.assertEqualIgnoreLeadingWhitespace(
'MOCK_METHOD2(Bar,\nvoid(int, char));',
self.GenerateMethodSource(source))
def testMultipleDefaultParameters(self):
source = """
class Foo {
public:
virtual void Bar(int a = 42, char c = 'x') = 0;
};
"""
self.assertEqualIgnoreLeadingWhitespace(
'MOCK_METHOD2(Bar,\nvoid(int, char));',
self.GenerateMethodSource(source))
def testRemovesCommentsWhenDefaultsArePresent(self):
source = """
class Foo {
public:
virtual void Bar(int a = 42 /* a comment */,
char /* other comment */ c= 'x') = 0;
};
"""
self.assertEqualIgnoreLeadingWhitespace(
'MOCK_METHOD2(Bar,\nvoid(int, char));',
self.GenerateMethodSource(source))
def testDoubleSlashCommentsInParameterListAreRemoved(self):
source = """
class Foo {
public:
virtual void Bar(int a, // inline comments should be elided.
int b // inline comments should be elided.
) const = 0;
};
"""
self.assertEqualIgnoreLeadingWhitespace(
'MOCK_CONST_METHOD2(Bar,\nvoid(int a, int b));',
self.GenerateMethodSource(source))
def testCStyleCommentsInParameterListAreNotRemoved(self):
# NOTE(nnorwitz): I'm not sure if it's the best behavior to keep these
# comments. Also note that C style comments after the last parameter
# are still elided.
source = """
class Foo {
public:
virtual const string& Bar(int /* keeper */, int b);
};
"""
self.assertEqualIgnoreLeadingWhitespace(
'MOCK_METHOD2(Bar,\nconst string&(int /* keeper */, int b));',
self.GenerateMethodSource(source))
def testArgsOfTemplateTypes(self):
source = """
class Foo {
public:
virtual int Bar(const vector<int>& v, map<int, string>* output);
};"""
self.assertEqualIgnoreLeadingWhitespace(
'MOCK_METHOD2(Bar,\n'
'int(const vector<int>& v, map<int, string>* output));',
self.GenerateMethodSource(source))
def testReturnTypeWithOneTemplateArg(self):
source = """
class Foo {
public:
virtual vector<int>* Bar(int n);
};"""
self.assertEqualIgnoreLeadingWhitespace(
'MOCK_METHOD1(Bar,\nvector<int>*(int n));',
self.GenerateMethodSource(source))
def testReturnTypeWithManyTemplateArgs(self):
source = """
class Foo {
public:
virtual map<int, string> Bar();
};"""
# Comparing the comment text is brittle - we'll think of something
# better in case this gets annoying, but for now let's keep it simple.
self.assertEqualIgnoreLeadingWhitespace(
'// The following line won\'t really compile, as the return\n'
'// type has multiple template arguments. To fix it, use a\n'
'// typedef for the return type.\n'
'MOCK_METHOD0(Bar,\nmap<int, string>());',
self.GenerateMethodSource(source))
def testSimpleMethodInTemplatedClass(self):
source = """
template<class T>
class Foo {
public:
virtual int Bar();
};
"""
self.assertEqualIgnoreLeadingWhitespace(
'MOCK_METHOD0_T(Bar,\nint());',
self.GenerateMethodSource(source))
def testPointerArgWithoutNames(self):
source = """
class Foo {
virtual int Bar(C*);
};
"""
self.assertEqualIgnoreLeadingWhitespace(
'MOCK_METHOD1(Bar,\nint(C*));',
self.GenerateMethodSource(source))
def testReferenceArgWithoutNames(self):
source = """
class Foo {
virtual int Bar(C&);
};
"""
self.assertEqualIgnoreLeadingWhitespace(
'MOCK_METHOD1(Bar,\nint(C&));',
self.GenerateMethodSource(source))
def testArrayArgWithoutNames(self):
source = """
class Foo {
virtual int Bar(C[]);
};
"""
self.assertEqualIgnoreLeadingWhitespace(
'MOCK_METHOD1(Bar,\nint(C[]));',
self.GenerateMethodSource(source))
class GenerateMocksTest(TestCase):
def GenerateMocks(self, cpp_source):
"""Convert C++ source to complete Google Mock output source."""
# <test> is a pseudo-filename, it is not read or written.
filename = '<test>'
builder = ast.BuilderFromSource(cpp_source, filename)
ast_list = list(builder.Generate())
lines = gmock_class._GenerateMocks(filename, cpp_source, ast_list, None)
return '\n'.join(lines)
def testNamespaces(self):
source = """
namespace Foo {
namespace Bar { class Forward; }
namespace Baz {
class Test {
public:
virtual void Foo();
};
} // namespace Baz
} // namespace Foo
"""
expected = """\
namespace Foo {
namespace Baz {
class MockTest : public Test {
public:
MOCK_METHOD0(Foo,
void());
};
} // namespace Baz
} // namespace Foo
"""
self.assertEqualIgnoreLeadingWhitespace(
expected, self.GenerateMocks(source))
def testClassWithStorageSpecifierMacro(self):
source = """
class STORAGE_SPECIFIER Test {
public:
virtual void Foo();
};
"""
expected = """\
class MockTest : public Test {
public:
MOCK_METHOD0(Foo,
void());
};
"""
self.assertEqualIgnoreLeadingWhitespace(
expected, self.GenerateMocks(source))
def testTemplatedForwardDeclaration(self):
source = """
template <class T> class Forward; // Forward declaration should be ignored.
class Test {
public:
virtual void Foo();
};
"""
expected = """\
class MockTest : public Test {
public:
MOCK_METHOD0(Foo,
void());
};
"""
self.assertEqualIgnoreLeadingWhitespace(
expected, self.GenerateMocks(source))
def testTemplatedClass(self):
source = """
template <typename S, typename T>
class Test {
public:
virtual void Foo();
};
"""
expected = """\
template <typename T0, typename T1>
class MockTest : public Test<T0, T1> {
public:
MOCK_METHOD0_T(Foo,
void());
};
"""
self.assertEqualIgnoreLeadingWhitespace(
expected, self.GenerateMocks(source))
def testTemplateInATemplateTypedef(self):
source = """
class Test {
public:
typedef std::vector<std::list<int>> FooType;
virtual void Bar(const FooType& test_arg);
};
"""
expected = """\
class MockTest : public Test {
public:
MOCK_METHOD1(Bar,
void(const FooType& test_arg));
};
"""
self.assertEqualIgnoreLeadingWhitespace(
expected, self.GenerateMocks(source))
def testTemplateInATemplateTypedefWithComma(self):
source = """
class Test {
public:
typedef std::function<void(
const vector<std::list<int>>&, int> FooType;
virtual void Bar(const FooType& test_arg);
};
"""
expected = """\
class MockTest : public Test {
public:
MOCK_METHOD1(Bar,
void(const FooType& test_arg));
};
"""
self.assertEqualIgnoreLeadingWhitespace(
expected, self.GenerateMocks(source))
if __name__ == '__main__':
unittest.main()
| gpl-3.0 |
bsipocz/ccdproc | ccdproc/tests/test_ccdproc.py | 1 | 19488 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
# This module implements the base CCDData class
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import numpy as np
from astropy.io import fits
from astropy.modeling import models
from astropy.units.quantity import Quantity
import astropy.units as u
from astropy.nddata import StdDevUncertainty
from numpy.testing import assert_array_equal
from astropy.tests.helper import pytest
from ..ccddata import CCDData
from ..core import *
from ..core import _blkavg
# test creating deviation
# success expected if u_image * u_gain = u_readnoise
@pytest.mark.parametrize('u_image,u_gain,u_readnoise,expect_succes', [
(u.electron, None, u.electron, True),
(u.electron, u.electron, u.electron, False),
(u.adu, u.electron / u.adu, u.electron, True),
(u.electron, None, u.dimensionless_unscaled, False),
(u.electron, u.dimensionless_unscaled, u.electron, True),
(u.adu, u.dimensionless_unscaled, u.electron, False),
(u.adu, u.photon / u.adu, u.electron, False),
])
@pytest.mark.data_size(10)
def test_create_deviation(ccd_data, u_image, u_gain, u_readnoise,
expect_succes):
ccd_data.unit = u_image
if u_gain:
gain = 2.0 * u_gain
else:
gain = None
readnoise = 5 * u_readnoise
if expect_succes:
ccd_var = create_deviation(ccd_data, gain=gain, readnoise=readnoise)
assert ccd_var.uncertainty.array.shape == (10, 10)
assert ccd_var.uncertainty.array.size == 100
assert ccd_var.uncertainty.array.dtype == np.dtype(float)
if gain:
expected_var = np.sqrt(2 * ccd_data.data + 5 ** 2) / 2
else:
expected_var = np.sqrt(ccd_data.data + 5 ** 2)
np.testing.assert_array_equal(ccd_var.uncertainty.array,
expected_var)
assert ccd_var.unit == ccd_data.unit
# uncertainty should *not* have any units -- does it?
with pytest.raises(AttributeError):
ccd_var.uncertainty.array.unit
else:
with pytest.raises(u.UnitsError):
ccd_var = create_deviation(ccd_data, gain=gain, readnoise=readnoise)
def test_create_deviation_keywords_must_have_unit(ccd_data):
# gain must have units if provided
with pytest.raises(TypeError):
create_deviation(ccd_data, gain=3)
# readnoise must have units
with pytest.raises(TypeError):
create_deviation(ccd_data, readnoise=5)
# readnoise must be provided
with pytest.raises(ValueError):
create_deviation(ccd_data)
# tests for overscan
@pytest.mark.parametrize('median,transpose', [
(False, False),
(False, True),
(True, False), ])
def test_subtract_overscan(ccd_data, median, transpose):
# create the overscan region
oscan = 300.
oscan_region = (slice(None), slice(0, 10)) # indices 0 through 9
fits_section = '[1:10, :]'
science_region = (slice(None), slice(10, None))
overscan_axis = 1
if transpose:
# Put overscan in first axis, not second, a test for #70
oscan_region = oscan_region[::-1]
fits_section = '[:, 1:10]'
science_region = science_region[::-1]
overscan_axis = 0
ccd_data.data[oscan_region] = oscan
# Add a fake sky background so the "science" part of the image has a
# different average than the "overscan" part.
sky = 10.
original_mean = ccd_data.data[science_region].mean()
ccd_data.data[science_region] += oscan + sky
# Test once using the overscan argument to specify the overscan region
ccd_data_overscan = subtract_overscan(ccd_data,
overscan=ccd_data[oscan_region],
overscan_axis=overscan_axis,
median=median, model=None)
# Is the mean of the "science" region the sum of sky and the mean the
# "science" section had before backgrounds were added?
np.testing.assert_almost_equal(
ccd_data_overscan.data[science_region].mean(),
sky + original_mean)
# Is the overscan region zero?
assert (ccd_data_overscan.data[oscan_region] == 0).all()
# Now do what should be the same subtraction, with the overscan specified
# with the fits_section
ccd_data_fits_section = subtract_overscan(ccd_data,
overscan_axis=overscan_axis,
fits_section=fits_section,
median=median, model=None)
# Is the mean of the "science" region the sum of sky and the mean the
# "science" section had before backgrounds were added?
np.testing.assert_almost_equal(
ccd_data_fits_section.data[science_region].mean(),
sky + original_mean)
# Is the overscan region zero?
assert (ccd_data_fits_section.data[oscan_region] == 0).all()
# Do both ways of subtracting overscan give exactly the same result?
np.testing.assert_array_equal(ccd_data_overscan[science_region],
ccd_data_fits_section[science_region])
# A more substantial test of overscan modeling
@pytest.mark.parametrize('transpose', [
True,
False])
def test_subtract_overscan_model(ccd_data, transpose):
# create the overscan region
size = ccd_data.shape[0]
oscan_region = (slice(None), slice(0, 10))
science_region = (slice(None), slice(10, None))
yscan, xscan = np.mgrid[0:size, 0:size] / 10.0 + 300.0
if transpose:
oscan_region = oscan_region[::-1]
science_region = science_region[::-1]
scan = xscan
overscan_axis = 0
else:
overscan_axis = 1
scan = yscan
original_mean = ccd_data.data[science_region].mean()
ccd_data.data[oscan_region] = 0. # only want overscan in that region
ccd_data.data = ccd_data.data + scan
ccd_data = subtract_overscan(ccd_data, overscan=ccd_data[oscan_region],
overscan_axis=overscan_axis,
median=False, model=models.Polynomial1D(2))
np.testing.assert_almost_equal(ccd_data.data[science_region].mean(),
original_mean)
def test_subtract_overscan_fails(ccd_data):
# do we get an error if the *image* is neither CCDData nor an array?
with pytest.raises(TypeError):
subtract_overscan(3, np.zeros((5, 5)))
# do we get an error if the *overscan* is not an image or an array?
with pytest.raises(TypeError):
subtract_overscan(np.zeros((10, 10)), 3, median=False, model=None)
# Do we get an error if we specify both overscan and fits_section?
with pytest.raises(TypeError):
subtract_overscan(ccd_data, overscan=ccd_data[0:10],
fits_section='[1:10]')
# do we raise an error if we specify neither overscan nor fits_section?
with pytest.raises(TypeError):
subtract_overscan(ccd_data)
# Does a fits_section which is not a string raise an error?
with pytest.raises(TypeError):
subtract_overscan(ccd_data, fits_section=5)
def test_trim_image_fits_section_requires_string(ccd_data):
with pytest.raises(TypeError):
trim_image(ccd_data, fits_section=5)
@pytest.mark.parametrize('mask_data, uncertainty', [
(False, False),
(True, True)])
@pytest.mark.data_size(50)
def test_trim_image_fits_section(ccd_data, mask_data, uncertainty):
if mask_data:
ccd_data.mask = np.zeros_like(ccd_data)
if uncertainty:
err = np.random.normal(size=ccd_data.shape)
ccd_data.uncertainty = StdDevUncertainty(err)
trimmed = trim_image(ccd_data, fits_section='[20:40,:]')
# FITS reverse order, bounds are inclusive and starting index is 1-based
assert trimmed.shape == (50, 21)
np.testing.assert_array_equal(trimmed.data, ccd_data[:, 19:40])
if mask_data:
assert trimmed.shape == trimmed.mask.shape
if uncertainty:
assert trimmed.shape == trimmed.uncertainty.array.shape
@pytest.mark.data_size(50)
def test_trim_image_no_section(ccd_data):
trimmed = trim_image(ccd_data[:, 19:40])
assert trimmed.shape == (50, 21)
np.testing.assert_array_equal(trimmed.data, ccd_data[:, 19:40])
def test_subtract_bias(ccd_data):
data_avg = ccd_data.data.mean()
bias_level = 5.0
ccd_data.data = ccd_data.data + bias_level
ccd_data.header['key'] = 'value'
master_bias_array = np.zeros_like(ccd_data.data) + bias_level
master_bias = CCDData(master_bias_array, unit=ccd_data.unit)
no_bias = subtract_bias(ccd_data, master_bias, add_keyword=None)
# Does the data we are left with have the correct average?
np.testing.assert_almost_equal(no_bias.data.mean(), data_avg)
# With logging turned off, metadata should not change
assert no_bias.header == ccd_data.header
del no_bias.header['key']
assert 'key' in ccd_data.header
assert no_bias.header is not ccd_data.header
@pytest.mark.data_size(50)
def test_subtract_bias_fails(ccd_data):
# Should fail if shapes don't match
bias = CCDData(np.array([200, 200]), unit=u.adu)
with pytest.raises(ValueError):
subtract_bias(ccd_data, bias)
# should fail because units don't match
bias = CCDData(np.zeros_like(ccd_data), unit=u.meter)
with pytest.raises(ValueError):
subtract_bias(ccd_data, bias)
@pytest.mark.parametrize('exposure_keyword', [True, False])
@pytest.mark.parametrize('explicit_times', [True, False])
@pytest.mark.parametrize('scale', [True, False])
def test_subtract_dark(ccd_data, explicit_times, scale, exposure_keyword):
exptime = 30.0
exptime_key = 'exposure'
exposure_unit = u.second
dark_level = 1.7
master_dark_data = np.zeros_like(ccd_data.data) + dark_level
master_dark = CCDData(data=master_dark_data, unit=u.adu)
master_dark.header[exptime_key] = 2 * exptime
dark_exptime = master_dark.header[exptime_key]
ccd_data.header[exptime_key] = exptime
dark_exposure_unit = exposure_unit
if explicit_times:
# test case when units of dark and data exposures are different
dark_exposure_unit = u.minute
dark_sub = subtract_dark(ccd_data, master_dark,
dark_exposure=dark_exptime * dark_exposure_unit,
data_exposure=exptime * exposure_unit,
scale=scale, add_keyword=None)
elif exposure_keyword:
key = Keyword(exptime_key, unit=u.second)
dark_sub = subtract_dark(ccd_data, master_dark,
exposure_time=key,
scale=scale, add_keyword=None)
else:
dark_sub = subtract_dark(ccd_data, master_dark,
exposure_time=exptime_key,
exposure_unit=u.second,
scale=scale, add_keyword=None)
dark_scale = 1.0
if scale:
dark_scale = float((exptime / dark_exptime) *
(exposure_unit / dark_exposure_unit))
np.testing.assert_array_equal(ccd_data.data - dark_scale * dark_level,
dark_sub.data)
# Headers should have the same content...do they?
assert dark_sub.header == ccd_data.header
# But the headers should not be the same object -- a copy was made
assert dark_sub.header is not ccd_data.header
def test_subtract_dark_fails(ccd_data):
# None of these tests check a result so the content of the master
# can be anything.
ccd_data.header['exptime'] = 30.0
master = ccd_data.copy()
# Do we fail if we give one of dark_exposure, data_exposure but not both?
with pytest.raises(TypeError):
subtract_dark(ccd_data, master, dark_exposure=30 * u.second)
with pytest.raises(TypeError):
subtract_dark(ccd_data, master, data_exposure=30 * u.second)
# Do we fail if we supply dark_exposure and data_exposure and exposure_time
with pytest.raises(TypeError):
subtract_dark(ccd_data, master, dark_exposure=10 * u.second,
data_exposure=10 * u.second,
exposure_time='exptime')
# Fail if we supply none of the exposure-related arguments?
with pytest.raises(TypeError):
subtract_dark(ccd_data, master)
# Fail if we supply exposure time but not a unit?
with pytest.raises(TypeError):
subtract_dark(ccd_data, master, exposure_time='exptime')
# Fail if ccd_data or master are not CCDData objects?
with pytest.raises(TypeError):
subtract_dark(ccd_data.data, master, exposure_time='exptime')
with pytest.raises(TypeError):
subtract_dark(ccd_data, master.data, exposure_time='exptime')
# test for flat correction
@pytest.mark.data_scale(10)
def test_flat_correct(ccd_data):
# add metadata to header for a test below...
ccd_data.header['my_key'] = 42
size = ccd_data.shape[0]
# create the flat, with some scatter
data = 2 * np.random.normal(loc=1.0, scale=0.05, size=(size, size))
flat = CCDData(data, meta=fits.header.Header(), unit=ccd_data.unit)
flat_data = flat_correct(ccd_data, flat, add_keyword=None)
#check that the flat was normalized
# Should be the case that flat * flat_data = ccd_data * flat.data.mean
# if the normalization was done correctly.
np.testing.assert_almost_equal((flat_data.data * flat.data).mean(),
ccd_data.data.mean() * flat.data.mean())
np.testing.assert_allclose(ccd_data.data / flat_data.data,
flat.data / flat.data.mean())
# check that metadata is unchanged (since logging is turned off)
assert flat_data.header == ccd_data.header
# test for flat correction with min_value
@pytest.mark.data_scale(10)
def test_flat_correct_min_value(ccd_data):
size = ccd_data.shape[0]
# create the flat
data = 2 * np.random.normal(loc=1.0, scale=0.05, size=(size, size))
flat = CCDData(data, meta=fits.header.Header(), unit=ccd_data.unit)
flat_orig_data = flat.data.copy()
min_value = 2.1 # should replace some, but not all, values
flat_data = flat_correct(ccd_data, flat, min_value=min_value)
flat_with_min = flat.copy()
flat_with_min.data[flat_with_min.data < min_value] = min_value
#check that the flat was normalized
np.testing.assert_almost_equal((flat_data.data * flat_with_min.data).mean(),
ccd_data.data.mean() * flat_with_min.data.mean())
np.testing.assert_allclose(ccd_data.data / flat_data.data,
flat_with_min.data / flat_with_min.data.mean())
# Test that flat is not modified.
assert (flat_orig_data == flat.data).all()
# test for deviation and for flat correction
@pytest.mark.data_scale(10)
@pytest.mark.data_mean(300)
def test_flat_correct_deviation(ccd_data):
size = ccd_data.shape[0]
ccd_data.unit = u.electron
ccd_data = create_deviation(ccd_data, readnoise=5 * u.electron)
# create the flat
data = 2 * np.ones((size, size))
flat = CCDData(data, meta=fits.header.Header(), unit=ccd_data.unit)
flat = create_deviation(flat, readnoise=0.5 * u.electron)
ccd_data = flat_correct(ccd_data, flat)
# tests for gain correction
def test_gain_correct(ccd_data):
init_data = ccd_data.data
gain_data = gain_correct(ccd_data, gain=3, add_keyword=None)
assert_array_equal(gain_data.data, 3 * init_data)
assert ccd_data.meta == gain_data.meta
def test_gain_correct_quantity(ccd_data):
init_data = ccd_data.data
g = Quantity(3, u.electron / u.adu)
ccd_data = gain_correct(ccd_data, gain=g)
assert_array_equal(ccd_data.data, 3 * init_data)
assert ccd_data.unit == u.electron
#test transform is ccd
def test_transform_isccd(ccd_data):
with pytest.raises(TypeError):
transform_image(1, 1)
#test function is callable
def test_transform_isfunc(ccd_data):
with pytest.raises(TypeError):
transform_image(ccd_data, 1)
@pytest.mark.parametrize('mask_data, uncertainty', [
(False, False),
(True, True)])
@pytest.mark.data_size(50)
def test_transform_image(ccd_data, mask_data, uncertainty):
if mask_data:
ccd_data.mask = np.zeros_like(ccd_data)
ccd_data.mask[10, 10] = 1
if uncertainty:
err = np.random.normal(size=ccd_data.shape)
ccd_data.uncertainty = StdDevUncertainty(err)
def tran(arr):
return 10 * arr
tran = transform_image(ccd_data, tran)
assert_array_equal(10 * ccd_data.data, tran.data)
if mask_data:
assert tran.shape == tran.mask.shape
assert_array_equal(ccd_data.mask, tran.mask)
if uncertainty:
assert tran.shape == tran.uncertainty.array.shape
assert_array_equal(10 * ccd_data.uncertainty.array,
tran.uncertainty.array)
#test rebinning ndarray
def test_rebin_ndarray(ccd_data):
with pytest.raises(TypeError):
rebin(1, (5, 5))
#test rebinning dimensions
@pytest.mark.data_size(10)
def test_rebin_dimensions(ccd_data):
with pytest.raises(ValueError):
rebin(ccd_data.data, (5,))
#test rebinning dimensions
@pytest.mark.data_size(10)
def test_rebin_ccddata_dimensions(ccd_data):
with pytest.raises(ValueError):
rebin(ccd_data, (5,))
#test rebinning works
@pytest.mark.data_size(10)
def test_rebin_larger(ccd_data):
a = ccd_data.data
b = rebin(a, (20, 20))
assert b.shape == (20, 20)
np.testing.assert_almost_equal(b.sum(), 4 * a.sum())
#test rebinning is invariant
@pytest.mark.data_size(10)
def test_rebin_smaller(ccd_data):
a = ccd_data.data
b = rebin(a, (20, 20))
c = rebin(b, (10, 10))
assert c.shape == (10, 10)
assert (c-a).sum() == 0
#test rebinning with ccddata object
@pytest.mark.parametrize('mask_data, uncertainty', [
(False, False),
(True, True)])
@pytest.mark.data_size(10)
def test_rebin_ccddata(ccd_data, mask_data, uncertainty):
if mask_data:
ccd_data.mask = np.zeros_like(ccd_data)
if uncertainty:
err = np.random.normal(size=ccd_data.shape)
ccd_data.uncertainty = StdDevUncertainty(err)
b = rebin(ccd_data, (20, 20))
assert b.shape == (20, 20)
if mask_data:
assert b.mask.shape == (20, 20)
if uncertainty:
assert b.uncertainty.array.shape == (20, 20)
#test blockaveraging ndarray
def test__blkavg_ndarray(ccd_data):
with pytest.raises(TypeError):
_blkavg(1, (5, 5))
#test rebinning dimensions
@pytest.mark.data_size(10)
def test__blkavg_dimensions(ccd_data):
with pytest.raises(ValueError):
_blkavg(ccd_data.data, (5,))
#test blkavg works
@pytest.mark.data_size(20)
def test__blkavg_larger(ccd_data):
a = ccd_data.data
b = _blkavg(a, (10, 10))
assert b.shape == (10, 10)
np.testing.assert_almost_equal(b.sum(), 0.25 * a.sum())
| bsd-3-clause |
sugarsweetrobotics/wasanbon | wasanbon/core/plugins/admin/systeminstaller_plugin/__init__.py | 1 | 15530 | import os, sys, shutil
import wasanbon
from wasanbon.core.plugins import PluginFunction, manifest
class Plugin(PluginFunction):
""" This plugin provides APIs to install RTCs into System (automatically editting rtc.conf for rtcd to load RTCs when launched) """
def __init__(self):
#PluginFunction.__init__(self)
super(Plugin, self).__init__()
pass
def depends(self):
return ['admin.environment', 'admin.rtcconf', 'admin.rtc']
def get_installed_rtc_names(self, package, language='all', verbose=False):
rtcs = []
languages = ['C++', 'Java', 'Python']
if language != 'all':
languages = [language]
for lang in languages:
rtcconf = admin.rtcconf.RTCConf(package.rtcconf[lang])
key = 'manager.components.precreate'
rtcs = rtcs + [rtc.strip() for rtc in rtcconf[key].split(',') if len(rtc.strip()) != 0]
return rtcs
def __get_rtc_name_from_standalone_command(self, package, cmd):
rtc_launch_cmd = cmd.split()[0]
post_fix = 'Comp'
if sys.platform == 'win32':
post_fix = 'Comp.exe'
if rtc_launch_cmd.startswith(package.get_binpath(fullpath=False)) and rtc_launch_cmd.endswith(post_fix):
return rtc_launch_cmd[len(package.get_binpath(fullpath=False))+1:-(len(post_fix))]
elif rtc_launch_cmd.startswith(package.get_rtcpath(fullpath=False)) and rtc_launch_cmd.endswith('.py'):
elems = rtc_launch_cmd.split('/')
cmd = elems[len(elems)-1]
return cmd[:-(3)]
else:
return ""
def get_rtcd_nameservers(self, package, verbose=False):
all_nss = {}
for lang in ['C++', 'Java', 'Python']:
rtcconf = admin.rtcconf.RTCConf(package.rtcconf[lang])
key = 'corba.nameservers'
nss = []
for ns in [ns.strip() for ns in rtcconf[key].split(',')]:
if not ':' in ns: ns = ns + ':2809'
if not ns in nss: nss.append(ns)
all_nss[lang] = nss
return all_nss
def get_rtcd_manager_addresses(self, package, verbose=False):
all_nss = self.get_rtcd_nameservers(package, verbose=verbose)
manager_addrs = {}
for lang in ['C++', 'Java', 'Python']:
rtcconf = admin.rtcconf.RTCConf(package.rtcconf[lang])
naming_rule_of_manager = rtcconf['manager.naming_formats'] # %n_cpp.mgr
name = naming_rule_of_manager.replace('%n', 'manager').strip()
if len(all_nss[lang]) == 0:
manager_addrs[lang] = ['localhost:2809/%s' % name]
else:
manager_addrs[lang] = [ns + '/'+ name for ns in all_nss[lang]]
return manager_addrs
def get_installed_standalone_rtc_names(self, package, verbose=False):
rtcs = []
setting = package.setting
cmds = setting.get('standalone', [])
for cmd in cmds:
rtc_name = self.__get_rtc_name_from_standalone_command(package, cmd)
if len(rtc_name) == 0: continue
try:
rtc = admin.rtc.get_rtc_from_package(package, rtc_name, verbose=verbose)
rtcs.append(rtc_name)
except wasanbon.RTCNotFoundException:
pass
return rtcs
def is_installed(self, package, rtc, verbose=False, standalone=False):
name = rtc.rtcprofile.basicInfo.name
if not standalone:
return name in self.get_installed_rtc_names(package, verbose=verbose)
else:
return name in self.get_installed_standalone_rtc_names(package, verbose=verbose)
def install_rtc_in_package(self, package, rtc, verbose=False,
preload=True, precreate=True, copy_conf=True,
rtcconf_filename="",
copy_bin=True, standalone=False, conffile=None, allow_duplication=False):
if verbose: sys.stdout.write('# Installing RTC in package %s\n' % package.name)
if not standalone and self.is_installed(package, rtc, standalone=True, verbose=verbose):
if verbose:
sys.stdout.write('## RTC (%s) is already installed as standalone.\n' % rtc.name)
sys.stdout.write('## Install standalone again.\n')
standalone = True
if standalone:
name = rtc.rtcprofile.basicInfo.name
targetconf = os.path.join(package.get_confpath(), 'rtc_' + name + '.conf')
conffilepath = package.rtcconf[rtc.rtcprofile.language.kind]
shutil.copy(conffilepath, targetconf)
rtcconf = admin.rtcconf.RTCConf(targetconf)
rtcconf['manager.modules.load_path'] = ''
rtcconf['manager.modules.preload'] = ''
rtcconf['manager.components.precreate'] = ''
rtcconf['manager.is_master'] = 'NO'
rtcconf['logger.file_name'] = './log/standalonertc_%s' % name
for key in rtcconf.keys():
if key.find('config_file') > 0:
rtcconf.pop(key)
targetconf = os.path.join(package.get_confpath(fullpath=False), 'rtc_' + name + '.conf')
targetconf = targetconf.replace('\\', '/')
else: # not standalone
if len(rtcconf_filename) == 0:
rtcconf = admin.rtcconf.RTCConf(package.rtcconf[rtc.rtcprofile.language.kind])
else:
rtcconf = admin.rtcconf.RTCConf(rtcconf_filename)
targetfile = copy_binary_from_rtc(package, rtc, verbose=verbose, standalone=standalone)
if len(targetfile) == 0:
targetfile = os.path.join(package.get_binpath(fullpath=False), rtc.get_rtc_file_path())
pass
rtc_count = 0
if standalone:
backup_dir = os.path.join(package.path, 'backup')
if not os.path.isdir(backup_dir):
os.mkdir(backup_dir)
setting_filename = os.path.join(package.path, 'setting.yaml')
backup_filename = os.path.join(backup_dir, 'setting.yaml'+wasanbon.timestampstr())
shutil.copy(setting_filename, backup_filename)
import yaml
dic = yaml.load(open(backup_filename, 'r'))
cmd_list = [cmd for cmd in dic['application'].get('standalone', []) if cmd.startswith(targetfile)]
if len(cmd_list) == 0:
dic['application']['standalone'] = dic['application'].get('standalone', []) + [targetfile + ' -f ' + targetconf]
open(setting_filename, 'w').write(yaml.dump(dic, default_flow_style=False))
pass
else: # If not standalone
if verbose: sys.stdout.write('### Setting manager.modules.load_path:\n')
rtcconf.append('manager.modules.load_path', os.path.dirname(targetfile))
if verbose: sys.stdout.write('### OK.\n')
if preload:
if verbose: sys.stdout.write('### Setting manager.modules.preload:\n')
rtcconf.append('manager.modules.preload', os.path.basename(targetfile))
if verbose: sys.stdout.write('### OK.\n')
if precreate:
if verbose: sys.stdout.write('### Setting manager.components.precreate:\n')
rtc_count = rtcconf.append('manager.components.precreate', rtc.rtcprofile.basicInfo.name, verbose=verbose, allow_duplicate=allow_duplication)
if rtc_count > 0:
if verbose: sys.stdout.write('### OK.\n')
else:
if verbose: sys.stdout.write('### Failed.\n')
return -1
if conffile == None:
confpath = copy_conf_from_rtc(package, rtc, verbose=verbose, force=copy_conf, rtc_count=rtc_count-1)
else:
confpath = conffile
if confpath:
key = rtc.rtcprofile.basicInfo.category + '.' + rtc.rtcprofile.basicInfo.name + '%s.config_file' % (rtc_count-1)
if verbose:
sys.stdout.write('## Configuring System. Set (%s) to %s\n' % (key, confpath))
rtcconf.append(key, confpath)
rtcconf.sync()
return 0
def uninstall_rtc_from_package(self, package, rtc, rtcconf_filename=None, verbose=False):
if self.is_installed(package, rtc, standalone=True):
return self.uninstall_standalone_rtc_from_package(package, rtc, verbose=verbose)
if verbose: sys.stdout.write('## Uninstall RTC (%s) from package\n' % rtc.rtcprofile.basicInfo.name)
if rtcconf_filename:
rtcconf = admin.rtcconf.RTCConf(rtcconf_filename)
else:
rtcconf = admin.rtcconf.RTCConf(package.rtcconf[rtc.rtcprofile.language.kind])
name = rtc.rtcprofile.basicInfo.name
targetfile = os.path.join(package.get_binpath(), os.path.basename(rtc.get_rtc_file_path()))
language = rtc.rtcprofile.language.kind
if language == 'C++':
filename = name + wasanbon.get_bin_file_ext()
elif language == 'Java':
filename = name + '.jar'
elif language == 'Python':
filename = name + '.py'
else:
raise wasanbon.UnsupportedSystemException()
rtcconf.remove('manager.components.precreate', name, verbose=verbose)
if len(rtcconf['manager.components.precreate'].strip()) == 0:
rtcconf.remove('manager.components.precreate')
rtcconf.remove('manager.modules.preload', filename, verbose=verbose)
if len(rtcconf['manager.modules.preload'].strip()) == 0:
rtcconf.remove('manager.modules.preload')
rtcconf.remove('manager.modules.load_path')
keys = [rtc.rtcprofile.basicInfo.category + '.' + rtc.rtcprofile.basicInfo.name + '.config_file']
for i in range(0, 16):
keys.append(rtc.rtcprofile.basicInfo.category + '.' + rtc.rtcprofile.basicInfo.name + str(i) + '.config_file')
for k in keys:
rtcconf.remove(k)
rtcconf.sync()
def uninstall_all_rtc_from_package(self, package, rtcconf_filename=None, verbose=False):
if verbose: sys.stdout.write('## Uninstall All RTC from conf in package\n')
if rtcconf_filename:
rtcconf = admin.rtcconf.RTCConf(rtcconf_filename)
else:
rtcconf = admin.rtcconf.RTCConf(package.rtcconf[rtc.rtcprofile.language.kind])
rtcconf.remove('manager.components.precreate')
rtcconf.remove('manager.modules.preload')
rtcconf.remove('manager.modules.load_path')
"""
keys = [rtc.rtcprofile.basicInfo.category + '.' + rtc.rtcprofile.basicInfo.name + '.config_file']
for i in range(0, 16):
keys.append(rtc.rtcprofile.basicInfo.category + '.' + rtc.rtcprofile.basicInfo.name + str(i) + '.config_file')
for k in keys:
rtcconf.remove(k)
"""
rtcconf.sync()
def uninstall_standalone_rtc_from_package(self, package, rtc, verbose=False):
rtcs = []
cmds = setting = package.setting.get('standalone', [])
uninstall_cmd = None
for cmd in cmds:
if self.__get_rtc_name_from_standalone_command(package, cmd) == rtc.rtcprofile.basicInfo.name:
if verbose: sys.stdout.write('## Uninstalling RTC (%s) from package (--standalone mode)\n' % rtc.rtcprofile.basicInfo.name)
uninstall_cmd = cmd
backup_dir = os.path.join(package.path, 'backup')
if not os.path.isdir(backup_dir):
os.mkdir(backup_dir)
pass
setting_filename = os.path.join(package.path, 'setting.yaml')
backup_filename = os.path.join(backup_dir, 'setting.yaml'+wasanbon.timestampstr())
shutil.copy(setting_filename, backup_filename)
import yaml
dic = yaml.load(open(backup_filename, 'r'))
cmd_list = [cmd for cmd in dic['application'].get('standalone', []) if cmd != uninstall_cmd]
if len(cmd_list) == 0 and 'standalone' in dic['application'].keys():
del dic['application']['standalone']
open(setting_filename, 'w').write(yaml.dump(dic, default_flow_style=False))
return 0
def copy_binary_from_rtc(package, rtc, verbose=False, standalone=False):
if standalone:
filepath = rtc.get_rtc_executable_file_path(verbose=verbose)
else:
filepath = rtc.get_rtc_file_path(verbose=verbose)
if verbose: sys.stdout.write('## Copying RTC Binary File from %s to %s\n' % (filepath, 'bin'))
if len(filepath) == 0:
sys.stdout.write(" - Can not find RTC file in RTC's directory\n")
return ""
if verbose: sys.stdout.write('## Detect RTC binary %s\n' % filepath)
if rtc.rtcprofile.language.kind == 'Python':
norm_path = os.path.normcase(os.path.normpath(os.path.split(filepath)[0]))
prefix = os.path.commonprefix([package.path, norm_path])
bin_dir_rel = norm_path[len(package.path)+1:]
targetfile = os.path.join(bin_dir_rel, os.path.basename(filepath))
else:
bin_dir = package.get_binpath()
bin_dir_rel = package.get_binpath(fullpath=False)
if not os.path.isdir(bin_dir):
os.mkdir(bin_dir)
pass
if standalone:
target = os.path.join(bin_dir, os.path.basename(filepath))
shutil.copy(filepath, target)
pass
else:
if sys.platform == 'darwin':
ext = 'dylib'
elif sys.platform == 'win32':
ext = 'dll'
elif sys.platform == 'linux2':
ext = 'so'
pass
files = [filepath]
# dlls in the same directry must be copied with rtc's binary.
for file in os.listdir(os.path.dirname(filepath)):
if file.endswith(ext):
files.append(os.path.join(os.path.dirname(filepath), file))
pass
pass
for file in files:
target = os.path.join(bin_dir, os.path.basename(file))
shutil.copy(filepath, target)
pass
targetfile = os.path.join(bin_dir_rel, os.path.basename(filepath))
targetfile = targetfile.replace('\\', '/')
return targetfile
def copy_conf_from_rtc(package, rtc, verbose=False, force=False, rtc_count=0):
conffile = rtc.get_rtc_conf_path(verbose=verbose)
if len(conffile) == 0:
sys.stdout.write('## No configuration file for RTC (%s) is found.\n' % rtc.rtcprofile.basicInfo.name)
return []
targetconf = os.path.join(package.path, 'conf', os.path.basename(conffile))
targetconf = targetconf[:-5] + '%s' % rtc_count + '.conf'
if os.path.isfile(targetconf):
if verbose: sys.stdout.write('## Found %s.\n' % targetconf)
if force:
if verbose: sys.stdout.write('## Force Copying Config (%s -> %s)\n' % (conffile, targetconf))
shutil.copy(conffile, targetconf)
else:
if verbose: sys.stdout.write('## Do not copy.\n')
pass
else:
if verbose: sys.stdout.write('# Copying Config (%s -> %s)\n' % (conffile, targetconf))
shutil.copy(conffile, targetconf)
confpath = 'conf' + '/' + os.path.basename(targetconf)
if sys.platform == 'win32':
confpath.replace('\\', '\\\\')
return confpath
| gpl-3.0 |
DanielJRaine/Portfolio | node_modules/node-gyp/gyp/pylib/gyp/MSVSNew.py | 1835 | 12124 | # Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""New implementation of Visual Studio project generation."""
import os
import random
import gyp.common
# hashlib is supplied as of Python 2.5 as the replacement interface for md5
# and other secure hashes. In 2.6, md5 is deprecated. Import hashlib if
# available, avoiding a deprecation warning under 2.6. Import md5 otherwise,
# preserving 2.4 compatibility.
try:
import hashlib
_new_md5 = hashlib.md5
except ImportError:
import md5
_new_md5 = md5.new
# Initialize random number generator
random.seed()
# GUIDs for project types
ENTRY_TYPE_GUIDS = {
'project': '{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}',
'folder': '{2150E333-8FDC-42A3-9474-1A3956D46DE8}',
}
#------------------------------------------------------------------------------
# Helper functions
def MakeGuid(name, seed='msvs_new'):
"""Returns a GUID for the specified target name.
Args:
name: Target name.
seed: Seed for MD5 hash.
Returns:
A GUID-line string calculated from the name and seed.
This generates something which looks like a GUID, but depends only on the
name and seed. This means the same name/seed will always generate the same
GUID, so that projects and solutions which refer to each other can explicitly
determine the GUID to refer to explicitly. It also means that the GUID will
not change when the project for a target is rebuilt.
"""
# Calculate a MD5 signature for the seed and name.
d = _new_md5(str(seed) + str(name)).hexdigest().upper()
# Convert most of the signature to GUID form (discard the rest)
guid = ('{' + d[:8] + '-' + d[8:12] + '-' + d[12:16] + '-' + d[16:20]
+ '-' + d[20:32] + '}')
return guid
#------------------------------------------------------------------------------
class MSVSSolutionEntry(object):
def __cmp__(self, other):
# Sort by name then guid (so things are in order on vs2008).
return cmp((self.name, self.get_guid()), (other.name, other.get_guid()))
class MSVSFolder(MSVSSolutionEntry):
"""Folder in a Visual Studio project or solution."""
def __init__(self, path, name = None, entries = None,
guid = None, items = None):
"""Initializes the folder.
Args:
path: Full path to the folder.
name: Name of the folder.
entries: List of folder entries to nest inside this folder. May contain
Folder or Project objects. May be None, if the folder is empty.
guid: GUID to use for folder, if not None.
items: List of solution items to include in the folder project. May be
None, if the folder does not directly contain items.
"""
if name:
self.name = name
else:
# Use last layer.
self.name = os.path.basename(path)
self.path = path
self.guid = guid
# Copy passed lists (or set to empty lists)
self.entries = sorted(list(entries or []))
self.items = list(items or [])
self.entry_type_guid = ENTRY_TYPE_GUIDS['folder']
def get_guid(self):
if self.guid is None:
# Use consistent guids for folders (so things don't regenerate).
self.guid = MakeGuid(self.path, seed='msvs_folder')
return self.guid
#------------------------------------------------------------------------------
class MSVSProject(MSVSSolutionEntry):
"""Visual Studio project."""
def __init__(self, path, name = None, dependencies = None, guid = None,
spec = None, build_file = None, config_platform_overrides = None,
fixpath_prefix = None):
"""Initializes the project.
Args:
path: Absolute path to the project file.
name: Name of project. If None, the name will be the same as the base
name of the project file.
dependencies: List of other Project objects this project is dependent
upon, if not None.
guid: GUID to use for project, if not None.
spec: Dictionary specifying how to build this project.
build_file: Filename of the .gyp file that the vcproj file comes from.
config_platform_overrides: optional dict of configuration platforms to
used in place of the default for this target.
fixpath_prefix: the path used to adjust the behavior of _fixpath
"""
self.path = path
self.guid = guid
self.spec = spec
self.build_file = build_file
# Use project filename if name not specified
self.name = name or os.path.splitext(os.path.basename(path))[0]
# Copy passed lists (or set to empty lists)
self.dependencies = list(dependencies or [])
self.entry_type_guid = ENTRY_TYPE_GUIDS['project']
if config_platform_overrides:
self.config_platform_overrides = config_platform_overrides
else:
self.config_platform_overrides = {}
self.fixpath_prefix = fixpath_prefix
self.msbuild_toolset = None
def set_dependencies(self, dependencies):
self.dependencies = list(dependencies or [])
def get_guid(self):
if self.guid is None:
# Set GUID from path
# TODO(rspangler): This is fragile.
# 1. We can't just use the project filename sans path, since there could
# be multiple projects with the same base name (for example,
# foo/unittest.vcproj and bar/unittest.vcproj).
# 2. The path needs to be relative to $SOURCE_ROOT, so that the project
# GUID is the same whether it's included from base/base.sln or
# foo/bar/baz/baz.sln.
# 3. The GUID needs to be the same each time this builder is invoked, so
# that we don't need to rebuild the solution when the project changes.
# 4. We should be able to handle pre-built project files by reading the
# GUID from the files.
self.guid = MakeGuid(self.name)
return self.guid
def set_msbuild_toolset(self, msbuild_toolset):
self.msbuild_toolset = msbuild_toolset
#------------------------------------------------------------------------------
class MSVSSolution(object):
"""Visual Studio solution."""
def __init__(self, path, version, entries=None, variants=None,
websiteProperties=True):
"""Initializes the solution.
Args:
path: Path to solution file.
version: Format version to emit.
entries: List of entries in solution. May contain Folder or Project
objects. May be None, if the folder is empty.
variants: List of build variant strings. If none, a default list will
be used.
websiteProperties: Flag to decide if the website properties section
is generated.
"""
self.path = path
self.websiteProperties = websiteProperties
self.version = version
# Copy passed lists (or set to empty lists)
self.entries = list(entries or [])
if variants:
# Copy passed list
self.variants = variants[:]
else:
# Use default
self.variants = ['Debug|Win32', 'Release|Win32']
# TODO(rspangler): Need to be able to handle a mapping of solution config
# to project config. Should we be able to handle variants being a dict,
# or add a separate variant_map variable? If it's a dict, we can't
# guarantee the order of variants since dict keys aren't ordered.
# TODO(rspangler): Automatically write to disk for now; should delay until
# node-evaluation time.
self.Write()
def Write(self, writer=gyp.common.WriteOnDiff):
"""Writes the solution file to disk.
Raises:
IndexError: An entry appears multiple times.
"""
# Walk the entry tree and collect all the folders and projects.
all_entries = set()
entries_to_check = self.entries[:]
while entries_to_check:
e = entries_to_check.pop(0)
# If this entry has been visited, nothing to do.
if e in all_entries:
continue
all_entries.add(e)
# If this is a folder, check its entries too.
if isinstance(e, MSVSFolder):
entries_to_check += e.entries
all_entries = sorted(all_entries)
# Open file and print header
f = writer(self.path)
f.write('Microsoft Visual Studio Solution File, '
'Format Version %s\r\n' % self.version.SolutionVersion())
f.write('# %s\r\n' % self.version.Description())
# Project entries
sln_root = os.path.split(self.path)[0]
for e in all_entries:
relative_path = gyp.common.RelativePath(e.path, sln_root)
# msbuild does not accept an empty folder_name.
# use '.' in case relative_path is empty.
folder_name = relative_path.replace('/', '\\') or '.'
f.write('Project("%s") = "%s", "%s", "%s"\r\n' % (
e.entry_type_guid, # Entry type GUID
e.name, # Folder name
folder_name, # Folder name (again)
e.get_guid(), # Entry GUID
))
# TODO(rspangler): Need a way to configure this stuff
if self.websiteProperties:
f.write('\tProjectSection(WebsiteProperties) = preProject\r\n'
'\t\tDebug.AspNetCompiler.Debug = "True"\r\n'
'\t\tRelease.AspNetCompiler.Debug = "False"\r\n'
'\tEndProjectSection\r\n')
if isinstance(e, MSVSFolder):
if e.items:
f.write('\tProjectSection(SolutionItems) = preProject\r\n')
for i in e.items:
f.write('\t\t%s = %s\r\n' % (i, i))
f.write('\tEndProjectSection\r\n')
if isinstance(e, MSVSProject):
if e.dependencies:
f.write('\tProjectSection(ProjectDependencies) = postProject\r\n')
for d in e.dependencies:
f.write('\t\t%s = %s\r\n' % (d.get_guid(), d.get_guid()))
f.write('\tEndProjectSection\r\n')
f.write('EndProject\r\n')
# Global section
f.write('Global\r\n')
# Configurations (variants)
f.write('\tGlobalSection(SolutionConfigurationPlatforms) = preSolution\r\n')
for v in self.variants:
f.write('\t\t%s = %s\r\n' % (v, v))
f.write('\tEndGlobalSection\r\n')
# Sort config guids for easier diffing of solution changes.
config_guids = []
config_guids_overrides = {}
for e in all_entries:
if isinstance(e, MSVSProject):
config_guids.append(e.get_guid())
config_guids_overrides[e.get_guid()] = e.config_platform_overrides
config_guids.sort()
f.write('\tGlobalSection(ProjectConfigurationPlatforms) = postSolution\r\n')
for g in config_guids:
for v in self.variants:
nv = config_guids_overrides[g].get(v, v)
# Pick which project configuration to build for this solution
# configuration.
f.write('\t\t%s.%s.ActiveCfg = %s\r\n' % (
g, # Project GUID
v, # Solution build configuration
nv, # Project build config for that solution config
))
# Enable project in this solution configuration.
f.write('\t\t%s.%s.Build.0 = %s\r\n' % (
g, # Project GUID
v, # Solution build configuration
nv, # Project build config for that solution config
))
f.write('\tEndGlobalSection\r\n')
# TODO(rspangler): Should be able to configure this stuff too (though I've
# never seen this be any different)
f.write('\tGlobalSection(SolutionProperties) = preSolution\r\n')
f.write('\t\tHideSolutionNode = FALSE\r\n')
f.write('\tEndGlobalSection\r\n')
# Folder mappings
# Omit this section if there are no folders
if any([e.entries for e in all_entries if isinstance(e, MSVSFolder)]):
f.write('\tGlobalSection(NestedProjects) = preSolution\r\n')
for e in all_entries:
if not isinstance(e, MSVSFolder):
continue # Does not apply to projects, only folders
for subentry in e.entries:
f.write('\t\t%s = %s\r\n' % (subentry.get_guid(), e.get_guid()))
f.write('\tEndGlobalSection\r\n')
f.write('EndGlobal\r\n')
f.close()
| mit |
wkschwartz/django | tests/csrf_tests/views.py | 40 | 1236 | from django.http import HttpResponse
from django.middleware.csrf import get_token
from django.template import Context, RequestContext, Template
from django.template.context_processors import csrf
from django.views.decorators.csrf import ensure_csrf_cookie
def post_form_view(request):
"""Return a POST form (without a token)."""
return HttpResponse(content="""
<html><body><h1>\u00a1Unicode!<form method="post"><input type="text"></form></body></html>
""")
@ensure_csrf_cookie
def ensure_csrf_cookie_view(request):
# Doesn't insert a token or anything.
return HttpResponse()
def token_view(request):
context = RequestContext(request, processors=[csrf])
template = Template('{% csrf_token %}')
return HttpResponse(template.render(context))
def non_token_view_using_request_processor(request):
"""Use the csrf view processor instead of the token."""
context = RequestContext(request, processors=[csrf])
template = Template('')
return HttpResponse(template.render(context))
def csrf_token_error_handler(request, **kwargs):
"""This error handler accesses the CSRF token."""
template = Template(get_token(request))
return HttpResponse(template.render(Context()), status=599)
| bsd-3-clause |
apanju/odoo | openerp/addons/base/ir/ir_cron.py | 276 | 15096 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-TODAY OpenERP S.A. <http://www.openerp.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import logging
import threading
import time
import psycopg2
from datetime import datetime
from dateutil.relativedelta import relativedelta
import pytz
import openerp
from openerp import SUPERUSER_ID, netsvc, api
from openerp.osv import fields, osv
from openerp.tools import DEFAULT_SERVER_DATETIME_FORMAT
from openerp.tools.safe_eval import safe_eval as eval
from openerp.tools.translate import _
from openerp.modules import load_information_from_description_file
_logger = logging.getLogger(__name__)
BASE_VERSION = load_information_from_description_file('base')['version']
def str2tuple(s):
return eval('tuple(%s)' % (s or ''))
_intervalTypes = {
'work_days': lambda interval: relativedelta(days=interval),
'days': lambda interval: relativedelta(days=interval),
'hours': lambda interval: relativedelta(hours=interval),
'weeks': lambda interval: relativedelta(days=7*interval),
'months': lambda interval: relativedelta(months=interval),
'minutes': lambda interval: relativedelta(minutes=interval),
}
class ir_cron(osv.osv):
""" Model describing cron jobs (also called actions or tasks).
"""
# TODO: perhaps in the future we could consider a flag on ir.cron jobs
# that would cause database wake-up even if the database has not been
# loaded yet or was already unloaded (e.g. 'force_db_wakeup' or something)
# See also openerp.cron
_name = "ir.cron"
_order = 'name'
_columns = {
'name': fields.char('Name', required=True),
'user_id': fields.many2one('res.users', 'User', required=True),
'active': fields.boolean('Active'),
'interval_number': fields.integer('Interval Number',help="Repeat every x."),
'interval_type': fields.selection( [('minutes', 'Minutes'),
('hours', 'Hours'), ('work_days','Work Days'), ('days', 'Days'),('weeks', 'Weeks'), ('months', 'Months')], 'Interval Unit'),
'numbercall': fields.integer('Number of Calls', help='How many times the method is called,\na negative number indicates no limit.'),
'doall' : fields.boolean('Repeat Missed', help="Specify if missed occurrences should be executed when the server restarts."),
'nextcall' : fields.datetime('Next Execution Date', required=True, help="Next planned execution date for this job."),
'model': fields.char('Object', help="Model name on which the method to be called is located, e.g. 'res.partner'."),
'function': fields.char('Method', help="Name of the method to be called when this job is processed."),
'args': fields.text('Arguments', help="Arguments to be passed to the method, e.g. (uid,)."),
'priority': fields.integer('Priority', help='The priority of the job, as an integer: 0 means higher priority, 10 means lower priority.')
}
_defaults = {
'nextcall' : lambda *a: time.strftime(DEFAULT_SERVER_DATETIME_FORMAT),
'priority' : 5,
'user_id' : lambda obj,cr,uid,context: uid,
'interval_number' : 1,
'interval_type' : 'months',
'numbercall' : 1,
'active' : 1,
}
def _check_args(self, cr, uid, ids, context=None):
try:
for this in self.browse(cr, uid, ids, context):
str2tuple(this.args)
except Exception:
return False
return True
_constraints = [
(_check_args, 'Invalid arguments', ['args']),
]
def _handle_callback_exception(self, cr, uid, model_name, method_name, args, job_id, job_exception):
""" Method called when an exception is raised by a job.
Simply logs the exception and rollback the transaction.
:param model_name: model name on which the job method is located.
:param method_name: name of the method to call when this job is processed.
:param args: arguments of the method (without the usual self, cr, uid).
:param job_id: job id.
:param job_exception: exception raised by the job.
"""
cr.rollback()
_logger.exception("Call of self.pool.get('%s').%s(cr, uid, *%r) failed in Job %s" % (model_name, method_name, args, job_id))
def _callback(self, cr, uid, model_name, method_name, args, job_id):
""" Run the method associated to a given job
It takes care of logging and exception handling.
:param model_name: model name on which the job method is located.
:param method_name: name of the method to call when this job is processed.
:param args: arguments of the method (without the usual self, cr, uid).
:param job_id: job id.
"""
try:
args = str2tuple(args)
openerp.modules.registry.RegistryManager.check_registry_signaling(cr.dbname)
registry = openerp.registry(cr.dbname)
if model_name in registry:
model = registry[model_name]
if hasattr(model, method_name):
log_depth = (None if _logger.isEnabledFor(logging.DEBUG) else 1)
netsvc.log(_logger, logging.DEBUG, 'cron.object.execute', (cr.dbname,uid,'*',model_name,method_name)+tuple(args), depth=log_depth)
if _logger.isEnabledFor(logging.DEBUG):
start_time = time.time()
getattr(model, method_name)(cr, uid, *args)
if _logger.isEnabledFor(logging.DEBUG):
end_time = time.time()
_logger.debug('%.3fs (%s, %s)' % (end_time - start_time, model_name, method_name))
openerp.modules.registry.RegistryManager.signal_caches_change(cr.dbname)
else:
msg = "Method `%s.%s` does not exist." % (model_name, method_name)
_logger.warning(msg)
else:
msg = "Model `%s` does not exist." % model_name
_logger.warning(msg)
except Exception, e:
self._handle_callback_exception(cr, uid, model_name, method_name, args, job_id, e)
def _process_job(self, job_cr, job, cron_cr):
""" Run a given job taking care of the repetition.
:param job_cr: cursor to use to execute the job, safe to commit/rollback
:param job: job to be run (as a dictionary).
:param cron_cr: cursor holding lock on the cron job row, to use to update the next exec date,
must not be committed/rolled back!
"""
try:
with api.Environment.manage():
now = fields.datetime.context_timestamp(job_cr, job['user_id'], datetime.now())
nextcall = fields.datetime.context_timestamp(job_cr, job['user_id'], datetime.strptime(job['nextcall'], DEFAULT_SERVER_DATETIME_FORMAT))
numbercall = job['numbercall']
ok = False
while nextcall < now and numbercall:
if numbercall > 0:
numbercall -= 1
if not ok or job['doall']:
self._callback(job_cr, job['user_id'], job['model'], job['function'], job['args'], job['id'])
if numbercall:
nextcall += _intervalTypes[job['interval_type']](job['interval_number'])
ok = True
addsql = ''
if not numbercall:
addsql = ', active=False'
cron_cr.execute("UPDATE ir_cron SET nextcall=%s, numbercall=%s"+addsql+" WHERE id=%s",
(nextcall.astimezone(pytz.UTC).strftime(DEFAULT_SERVER_DATETIME_FORMAT), numbercall, job['id']))
self.invalidate_cache(job_cr, SUPERUSER_ID)
finally:
job_cr.commit()
cron_cr.commit()
@classmethod
def _acquire_job(cls, db_name):
# TODO remove 'check' argument from addons/base_action_rule/base_action_rule.py
""" Try to process one cron job.
This selects in database all the jobs that should be processed. It then
tries to lock each of them and, if it succeeds, run the cron job (if it
doesn't succeed, it means the job was already locked to be taken care
of by another thread) and return.
If a job was processed, returns True, otherwise returns False.
"""
db = openerp.sql_db.db_connect(db_name)
threading.current_thread().dbname = db_name
cr = db.cursor()
jobs = []
try:
# Make sure the database we poll has the same version as the code of base
cr.execute("SELECT 1 FROM ir_module_module WHERE name=%s AND latest_version=%s", ('base', BASE_VERSION))
if cr.fetchone():
# Careful to compare timestamps with 'UTC' - everything is UTC as of v6.1.
cr.execute("""SELECT * FROM ir_cron
WHERE numbercall != 0
AND active AND nextcall <= (now() at time zone 'UTC')
ORDER BY priority""")
jobs = cr.dictfetchall()
else:
_logger.warning('Skipping database %s as its base version is not %s.', db_name, BASE_VERSION)
except psycopg2.ProgrammingError, e:
if e.pgcode == '42P01':
# Class 42 — Syntax Error or Access Rule Violation; 42P01: undefined_table
# The table ir_cron does not exist; this is probably not an OpenERP database.
_logger.warning('Tried to poll an undefined table on database %s.', db_name)
else:
raise
except Exception:
_logger.warning('Exception in cron:', exc_info=True)
finally:
cr.close()
for job in jobs:
lock_cr = db.cursor()
try:
# Try to grab an exclusive lock on the job row from within the task transaction
# Restrict to the same conditions as for the search since the job may have already
# been run by an other thread when cron is running in multi thread
lock_cr.execute("""SELECT *
FROM ir_cron
WHERE numbercall != 0
AND active
AND nextcall <= (now() at time zone 'UTC')
AND id=%s
FOR UPDATE NOWAIT""",
(job['id'],), log_exceptions=False)
locked_job = lock_cr.fetchone()
if not locked_job:
_logger.debug("Job `%s` already executed by another process/thread. skipping it", job['name'])
continue
# Got the lock on the job row, run its code
_logger.debug('Starting job `%s`.', job['name'])
job_cr = db.cursor()
try:
registry = openerp.registry(db_name)
registry[cls._name]._process_job(job_cr, job, lock_cr)
except Exception:
_logger.exception('Unexpected exception while processing cron job %r', job)
finally:
job_cr.close()
except psycopg2.OperationalError, e:
if e.pgcode == '55P03':
# Class 55: Object not in prerequisite state; 55P03: lock_not_available
_logger.debug('Another process/thread is already busy executing job `%s`, skipping it.', job['name'])
continue
else:
# Unexpected OperationalError
raise
finally:
# we're exiting due to an exception while acquiring the lock
lock_cr.close()
if hasattr(threading.current_thread(), 'dbname'): # cron job could have removed it as side-effect
del threading.current_thread().dbname
def _try_lock(self, cr, uid, ids, context=None):
"""Try to grab a dummy exclusive write-lock to the rows with the given ids,
to make sure a following write() or unlink() will not block due
to a process currently executing those cron tasks"""
try:
cr.execute("""SELECT id FROM "%s" WHERE id IN %%s FOR UPDATE NOWAIT""" % self._table,
(tuple(ids),), log_exceptions=False)
except psycopg2.OperationalError:
cr.rollback() # early rollback to allow translations to work for the user feedback
raise osv.except_osv(_("Record cannot be modified right now"),
_("This cron task is currently being executed and may not be modified, "
"please try again in a few minutes"))
def create(self, cr, uid, vals, context=None):
res = super(ir_cron, self).create(cr, uid, vals, context=context)
return res
def write(self, cr, uid, ids, vals, context=None):
self._try_lock(cr, uid, ids, context)
res = super(ir_cron, self).write(cr, uid, ids, vals, context=context)
return res
def unlink(self, cr, uid, ids, context=None):
self._try_lock(cr, uid, ids, context)
res = super(ir_cron, self).unlink(cr, uid, ids, context=context)
return res
def try_write(self, cr, uid, ids, values, context=None):
try:
with cr.savepoint():
cr.execute("""SELECT id FROM "%s" WHERE id IN %%s FOR UPDATE NOWAIT""" % self._table,
(tuple(ids),), log_exceptions=False)
except psycopg2.OperationalError:
pass
else:
return super(ir_cron, self).write(cr, uid, ids, values, context=context)
return False
def toggle(self, cr, uid, ids, model, domain, context=None):
active = bool(self.pool[model].search_count(cr, uid, domain, context=context))
return self.try_write(cr, uid, ids, {'active': active}, context=context)
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
liyu1990/tensorflow | tensorflow/python/kernel_tests/dense_update_ops_test.py | 5 | 5333 | """Tests for tensorflow.ops.tf.Assign*."""
import tensorflow.python.platform
import numpy as np
import tensorflow as tf
class AssignOpTest(tf.test.TestCase):
def _initAssignFetch(self, x, y, use_gpu=False):
"""Initialize a param to init and update it with y."""
super(AssignOpTest, self).setUp()
with self.test_session(use_gpu=use_gpu):
p = tf.Variable(x)
assign = tf.assign(p, y)
p.initializer.run()
new_value = assign.eval()
return p.eval(), new_value
def _initAssignAddFetch(self, x, y, use_gpu=False):
"""Initialize a param to init, and compute param += y."""
with self.test_session(use_gpu=use_gpu):
p = tf.Variable(x)
add = tf.assign_add(p, y)
p.initializer.run()
new_value = add.eval()
return p.eval(), new_value
def _initAssignSubFetch(self, x, y, use_gpu=False):
"""Initialize a param to init, and compute param -= y."""
with self.test_session(use_gpu=use_gpu):
p = tf.Variable(x)
sub = tf.assign_sub(p, y)
p.initializer.run()
new_value = sub.eval()
return p.eval(), new_value
def _testTypes(self, vals):
for dtype in [np.float32, np.float64, np.int32, np.int64]:
x = np.zeros(vals.shape).astype(dtype)
y = vals.astype(dtype)
var_value, op_value = self._initAssignFetch(x, y, use_gpu=False)
self.assertAllEqual(y, var_value)
self.assertAllEqual(y, op_value)
var_value, op_value = self._initAssignAddFetch(x, y, use_gpu=False)
self.assertAllEqual(x + y, var_value)
self.assertAllEqual(x + y, op_value)
var_value, op_value = self._initAssignSubFetch(x, y, use_gpu=False)
self.assertAllEqual(x - y, var_value)
self.assertAllEqual(x - y, op_value)
if tf.test.IsBuiltWithCuda() and dtype in [np.float32, np.float64]:
var_value, op_value = self._initAssignFetch(x, y, use_gpu=True)
self.assertAllEqual(y, var_value)
self.assertAllEqual(y, op_value)
var_value, op_value = self._initAssignAddFetch(x, y, use_gpu=True)
self.assertAllEqual(x + y, var_value)
self.assertAllEqual(x + y, op_value)
var_value, op_value = self._initAssignSubFetch(x, y, use_gpu=False)
self.assertAllEqual(x - y, var_value)
self.assertAllEqual(x - y, op_value)
def testBasic(self):
self._testTypes(np.arange(0, 20).reshape([4, 5]))
def testAssignNonStrictShapeChecking(self):
with self.test_session():
data = tf.fill([1024, 1024], 0)
p = tf.Variable([1])
a = tf.assign(p, data, validate_shape=False)
a.op.run()
self.assertAllEqual(p.eval(), data.eval())
# Assign to yet another shape
data2 = tf.fill([10, 10], 1)
a2 = tf.assign(p, data2, validate_shape=False)
a2.op.run()
self.assertAllEqual(p.eval(), data2.eval())
def testInitRequiredAssignAdd(self):
with self.test_session():
p = tf.Variable(tf.fill([1024, 1024], 1),
tf.int32)
a = tf.assign_add(p, tf.fill([1024, 1024], 0))
with self.assertRaisesOpError("use uninitialized"):
a.op.run()
def testInitRequiredAssignSub(self):
with self.test_session():
p = tf.Variable(tf.fill([1024, 1024], 1),
tf.int32)
a = tf.assign_sub(p, tf.fill([1024, 1024], 0))
with self.assertRaisesOpError("use uninitialized"):
a.op.run()
# NOTE(mrry): See also
# dense_update_ops_no_tsan_test.AssignOpTest, which contains a benign
# data race and must run without TSAN.
def testParallelUpdateWithLocking(self):
with self.test_session() as sess:
zeros_t = tf.fill([1024, 1024], 0.0)
ones_t = tf.fill([1024, 1024], 1.0)
p = tf.Variable(zeros_t)
adds = [tf.assign_add(p, ones_t, use_locking=True)
for _ in range(20)]
p.initializer.run()
def run_add(add_op):
sess.run(add_op)
threads = [
self.checkedThread(target=run_add, args=(add_op,)) for add_op in adds]
for t in threads:
t.start()
for t in threads:
t.join()
vals = p.eval()
ones = np.ones((1024, 1024)).astype(np.float32)
self.assertAllEqual(vals, ones * 20)
# NOTE(mrry): See also
# dense_update_ops_no_tsan_test.[...].testParallelAssignWithoutLocking,
# which contains a benign data race and must run without TSAN.
def testParallelAssignWithLocking(self):
with self.test_session() as sess:
zeros_t = tf.fill([1024, 1024], 0.0)
ones_t = tf.fill([1024, 1024], 1.0)
p = tf.Variable(zeros_t)
assigns = [tf.assign(p, tf.mul(ones_t, float(i)),
use_locking=True)
for i in range(1, 21)]
p.initializer.run()
def run_assign(assign_op):
sess.run(assign_op)
threads = [self.checkedThread(target=run_assign, args=(assign_op,))
for assign_op in assigns]
for t in threads:
t.start()
for t in threads:
t.join()
vals = p.eval()
# Assert every element is the same, and taken from one of the assignments.
self.assertTrue(vals[0, 0] > 0)
self.assertTrue(vals[0, 0] <= 20)
self.assertAllEqual(vals, np.ones([1024, 1024]) * vals[0, 0])
if __name__ == "__main__":
tf.test.main()
| apache-2.0 |
rigdenlab/SIMBAD | simbad/mr/molrep_mr.py | 1 | 18677 | #!/usr/bin/env ccp4-python
"""Module to run molrep on a model"""
__author__ = "Adam Simpkin"
__date__ = "02 May 2017"
__version__ = "1.0"
import os
import operator
import shutil
from simbad.mr.options import SGAlternatives
from simbad.util import mtz_util
from pyjob import cexec
from pyjob.script import EXE_EXT
def check_contrast(logfile):
"""Check the logfile of the job for the contrast value
Parameters
----------
logfile : str
Path to the logfile
Returns
-------
float
Contrast score in log file
"""
contrasts = []
with open(logfile, "r") as f:
for line in f:
if "Contrast" in line:
fields = line.split()
if len(fields) > 3:
pass
else:
contrasts.append(float(fields[-1]))
if len(contrasts) < 1:
return 0.0
else:
return max(contrasts)
class Molrep(object):
"""Class to run Molrep
Attributes
----------
hklin : str
Path to input hkl file
logfile : str
Path to output log file
pdbin : str
Path to the input pdb file
pdbout : str
Path to the output pdb file
sgalternative : str
Specify whether to try alternative space groups (all | enant)
space_group : str
The space group of the input hkl file
work_dir : str
Path to the working directory were you want MOLREP to run
Example
-------
>>> from simbad.mr.molrep_mr import Molrep
>>> molrep = Molrep('<hklin>', '<hklout>', '<logfile>', '<nmol>', '<pdbin>', '<pdbout>', '<sgalternative>',
>>> '<space_group>', '<work_dir>')
>>> molrep.run()
Files relating to the MOLREP run will be contained within the work_dir however the location of the output pdb and
logfile can be specified.
"""
def __init__(self, hklin, hklout, logfile, nmol, pdbin, pdbout, sgalternative, space_group, work_dir):
self._hklin = None
self._logfile = None
self._nmol = None
self._pdbin = None
self._pdbout = None
self._work_dir = None
self._sgalternative = None
self._space_group = None
self.hklin = hklin
self.hklout = hklout
self.logfile = logfile
self.nmol = nmol
self.pdbin = pdbin
self.pdbout = pdbout
self.sgalternative = sgalternative
self.space_group = space_group
self.work_dir = work_dir
self.all_sg_codes = {
"P2": "3",
"P21": "4",
"C2": "5",
"P222": "16",
"P2221": "17",
"P21212": "18",
"P212121": "19",
"C2221": "20",
"C222": "21",
"F222": "22",
"I222": "23",
"I212121": "24",
"P4": "75",
"P41": "76",
"P42": "77",
"P43": "78",
"I4": "79",
"I41": "80",
"P422": "89",
"P4212": "90",
"P4122": "91",
"P41212": "92",
"P4222": "93",
"P42212": "94",
"P4322": "95",
"P43212": "96",
"I422": "97",
"I4122": "98",
"P3": "143",
"P31": "144",
"P32": "145",
"R3": "146",
"P312": "149",
"P321": "150",
"P3112": "151",
"P3121": "152",
"P3212": "153",
"P3221": "154",
"R32": "155",
"P6": "168",
"P61": "169",
"P65": "170",
"P62": "171",
"P64": "172",
"P63": "173",
"P622": "177",
"P6122": "178",
"P6522": "179",
"P6222": "180",
"P6422": "181",
"P6322": "182",
"P23": "195",
"F23": "196",
"I23": "197",
"P213": "198",
"I213": "199",
"P432": "207",
"P4232": "208",
"F432": "209",
"F4132": "210",
"I432": "211",
"P4332": "212",
"P4132": "213",
"I4132": "214",
}
self.enant_sg_codes = {
"P31": "144",
"P32": "145",
"P3112": "151",
"P3212": "153",
"P3121": "152",
"P3221": "154",
"P41": "76",
"P43": "78",
"P4122": "91",
"P4322": "95",
"P41212": "92",
"P43212": "96",
"P61": "169",
"P65": "170",
"P62": "171",
"P64": "172",
"P6122": "178",
"P6522": "179",
"P6222": "180",
"P6422": "181",
"P4332": "212",
"P4132": "213",
}
self.enant_sg = {
"144": "145",
"145": "144",
"151": "153",
"153": "151",
"152": "154",
"154": "152",
"76": "78",
"78": "76",
"91": "95",
"95": "91",
"92": "96",
"96": "92",
"169": "170",
"170": "169",
"171": "172",
"172": "171",
"178": "179",
"179": "178",
"180": "181",
"181": "180",
"212": "213",
"213": "212",
}
self.all_alt_sg = {
"3": ["4", "5"],
"4": ["3", "5"],
"5": ["3", "4"],
"16": ["17", "18", "19", "20", "21", "22", "23", "24"],
"17": ["16", "18", "19", "20", "21", "22", "23", "24"],
"18": ["16", "17", "19", "20", "21", "22", "23", "24"],
"19": ["16", "17", "18", "20", "21", "22", "23", "24"],
"20": ["16", "17", "18", "19", "21", "22", "23", "24"],
"21": ["16", "17", "18", "19", "20", "22", "23", "24"],
"22": ["16", "17", "18", "19", "20", "21", "23", "24"],
"23": ["16", "17", "18", "19", "20", "21", "22", "24"],
"24": ["16", "17", "18", "19", "20", "21", "22", "23"],
"75": ["76", "77", "78", "79", "80"],
"76": ["75", "77", "78", "79", "80"],
"77": ["75", "76", "78", "79", "80"],
"78": ["75", "76", "77", "79", "80"],
"79": ["75", "76", "77", "78", "80"],
"80": ["75", "76", "77", "78", "79"],
"89": ["90", "91", "92", "93", "94", "95", "96", "97", "98"],
"90": ["89", "91", "92", "93", "94", "95", "96", "97", "98"],
"91": ["89", "90", "92", "93", "94", "95", "96", "97", "98"],
"92": ["89", "90", "91", "93", "94", "95", "96", "97", "98"],
"93": ["89", "90", "91", "92", "94", "95", "96", "97", "98"],
"94": ["89", "90", "91", "92", "93", "95", "96", "97", "98"],
"95": ["89", "90", "91", "92", "93", "94", "96", "97", "98"],
"96": ["89", "90", "91", "92", "93", "94", "95", "97", "98"],
"97": ["89", "90", "91", "92", "93", "94", "95", "96", "98"],
"98": ["89", "90", "91", "92", "93", "94", "95", "96", "97"],
"143": ["144", "145", "146"],
"144": ["143", "145", "146"],
"145": ["143", "144", "146"],
"146": ["143", "144", "145"],
"149": ["150", "151", "152", "153", "154", "155"],
"150": ["149", "151", "152", "153", "154", "155"],
"151": ["149", "150", "152", "153", "154", "155"],
"152": ["149", "150", "151", "153", "154", "155"],
"153": ["149", "150", "151", "152", "154", "155"],
"154": ["149", "150", "151", "152", "153", "155"],
"155": ["149", "150", "151", "152", "153", "154"],
"168": ["169", "170", "171", "172", "173"],
"169": ["168", "170", "171", "172", "173"],
"170": ["168", "169", "171", "172", "173"],
"171": ["168", "169", "170", "172", "173"],
"172": ["168", "169", "170", "171", "173"],
"173": ["168", "169", "170", "171", "172"],
"177": ["178", "179", "180", "181", "182"],
"178": ["177", "179", "180", "181", "182"],
"179": ["177", "178", "180", "181", "182"],
"180": ["177", "178", "179", "181", "182"],
"181": ["177", "178", "179", "180", "182"],
"182": ["177", "178", "179", "180", "181"],
"195": ["196", "197", "198", "199"],
"196": ["195", "197", "198", "199"],
"197": ["195", "196", "198", "199"],
"198": ["195", "196", "197", "199"],
"199": ["195", "196", "197", "198"],
"207": ["208", "209", "210", "211", "212", "213", "214"],
"208": ["207", "209", "210", "211", "212", "213", "214"],
"209": ["207", "208", "210", "211", "212", "213", "214"],
"210": ["207", "208", "209", "211", "212", "213", "214"],
"211": ["207", "208", "209", "210", "212", "213", "214"],
"212": ["207", "208", "209", "210", "211", "213", "214"],
"213": ["207", "208", "209", "210", "211", "212", "214"],
"214": ["207", "208", "209", "210", "211", "212", "213"],
}
@property
def hklin(self):
"""The input hkl file"""
return self._hklin
@hklin.setter
def hklin(self, hklin):
"""Define the input hkl file"""
self._hklin = hklin
@property
def logfile(self):
"""The logfile output"""
return self._logfile
@logfile.setter
def logfile(self, logfile):
"""Define the output logfile"""
self._logfile = logfile
@property
def nmol(self):
"""The number of molecules to look for"""
return self._nmol
@nmol.setter
def nmol(self, nmol):
"""Define the number of molecules to look for"""
self._nmol = nmol
@property
def pdbin(self):
"""The input pdb file"""
return self._pdbin
@pdbin.setter
def pdbin(self, pdbin):
"""Define the input pdb file"""
self._pdbin = pdbin
@property
def pdbout(self):
"""The output pdb file"""
return self._pdbout
@pdbout.setter
def pdbout(self, pdbout):
"""Define the output pdb file"""
self._pdbout = pdbout
@property
def sgalternative(self):
"""Whether to check for alternative space groups"""
return self._sgalternative
@sgalternative.setter
def sgalternative(self, sgalternative):
"""Define whether to check for alternative space groups"""
if sgalternative:
self._sgalternative = sgalternative.lower()
else:
self._sgalternative = sgalternative
@property
def space_group(self):
"""The input space group"""
return self._space_group
@space_group.setter
def space_group(self, space_group):
"""Define the input space group"""
self._space_group = space_group
@property
def work_dir(self):
"""The path to the working directory"""
return self._work_dir
@work_dir.setter
def work_dir(self, work_dir):
"""Define the working directory"""
self._work_dir = work_dir
def run(self):
"""Function to run molecular replacement using MOLREP
Returns
-------
file
The output pdb from MOLREP
"""
# Make a note of the current working directory
current_work_dir = os.getcwd()
# Change to the MOLREP working directory
if os.path.exists(self.work_dir):
os.chdir(self.work_dir)
else:
os.makedirs(self.work_dir)
os.chdir(self.work_dir)
# Copy hklin and pdbin to working dir for efficient running of MOLREP
hklin = os.path.join(self.work_dir, os.path.basename(self.hklin))
shutil.copyfile(self.hklin, hklin)
pdbin = os.path.join(self.work_dir, os.path.basename(self.pdbin))
shutil.copyfile(self.pdbin, pdbin)
logfile = os.path.join(self.work_dir, "molrep_out_{0}.log".format(self.space_group))
template_key = """
FILE_F {0}
FILE_M {1}
NMON {2}
{3}
END"""
key = template_key.format(os.path.relpath(hklin), os.path.relpath(pdbin), self.nmol, "")
self.molrep(key, logfile)
# Move output pdb to specified name
if os.path.isfile(os.path.join(self.work_dir, "molrep.pdb")):
shutil.move(os.path.join(self.work_dir, "molrep.pdb"), os.path.join(self.work_dir, "molrep_out_{0}.pdb".format(self.space_group)))
if self.sgalternative == "enant" and self.space_group in self.enant_sg_codes:
hklin_sg_code = self.enant_sg_codes[self.space_group]
enant_sg_code = self.enant_sg[hklin_sg_code]
contrast = check_contrast(os.path.join(self.work_dir, "molrep_out_{0}.log".format(self.space_group)))
contrasts = {self.space_group: contrast}
key = template_key.format(os.path.relpath(hklin), os.path.relpath(pdbin), self.nmol, "NOSG {0}".format(enant_sg_code))
logfile = os.path.join(self.work_dir, "molrep_out_{0}.log".format(enant_sg_code))
self.molrep(key, logfile)
contrasts[enant_sg_code] = check_contrast(logfile)
if os.path.isfile(os.path.join(self.work_dir, "molrep.pdb")):
shutil.move(os.path.join(self.work_dir, "molrep.pdb"), os.path.join(self.work_dir, "molrep_out_{0}.pdb".format(enant_sg_code)))
self.evaluate_results(contrasts)
elif self.sgalternative == "all" and self.space_group in self.all_sg_codes:
hklin_sg_code = self.all_sg_codes[self.space_group]
all_alt_sg_codes = self.all_alt_sg[hklin_sg_code]
contrast = check_contrast(os.path.join(self.work_dir, "molrep_out_{0}.log".format(self.space_group)))
contrasts = {self.space_group: contrast}
for alt_sg_code in all_alt_sg_codes:
key = template_key.format(os.path.relpath(hklin), os.path.relpath(pdbin), self.nmol, "NOSG {0}".format(alt_sg_code))
logfile = os.path.join(self.work_dir, "molrep_out_{0}.log".format(alt_sg_code))
self.molrep(key, logfile)
contrasts[alt_sg_code] = check_contrast(logfile)
if os.path.isfile(os.path.join(self.work_dir, "molrep.pdb")):
shutil.move(os.path.join(self.work_dir, "molrep.pdb"), os.path.join(self.work_dir, "molrep_out_{0}.pdb".format(alt_sg_code)))
self.evaluate_results(contrasts)
else:
# Move output pdb to specified name
if os.path.isfile(os.path.join(self.work_dir, "molrep_out_{0}.pdb".format(self.space_group))):
shutil.move(os.path.join(self.work_dir, "molrep_out_{0}.pdb".format(self.space_group)), self.pdbout)
# Move output hkl to specified name
if os.path.isfile(self.hklin):
shutil.copy(self.hklin, self.hklout)
# Move log file to specified name
if os.path.isfile(os.path.join(self.work_dir, "molrep_out_{0}.log".format(self.space_group))):
shutil.move(os.path.join(self.work_dir, "molrep_out_{0}.log".format(self.space_group)), self.logfile)
# Return to original working directory
os.chdir(current_work_dir)
# Delete any files copied across
if os.path.isfile(os.path.join(self.work_dir, os.path.basename(self.hklin))):
os.remove(os.path.join(self.work_dir, os.path.basename(self.hklin)))
if os.path.isfile(os.path.join(self.work_dir, os.path.basename(self.pdbin))):
os.remove(os.path.join(self.work_dir, os.path.basename(self.pdbin)))
return
def evaluate_results(self, results):
"""Function to evaluate molrep results and move the result with the best contrast score to the output pdb
Parameters
----------
results : dict
Dictionary containing space group code with the corresponding contrast score
Returns
-------
file
The output pdb for the best result
file
The output log for the best result
"""
top_sg_code = max(results.iteritems(), key=operator.itemgetter(1))[0]
if os.path.isfile(os.path.join(self.work_dir, "molrep_out_{0}.pdb".format(top_sg_code))):
shutil.move(os.path.join(self.work_dir, "molrep_out_{0}.pdb".format(top_sg_code)), self.pdbout)
if os.path.isfile(os.path.join(self.work_dir, "molrep_out_{0}.log".format(top_sg_code))):
shutil.move(os.path.join(self.work_dir, "molrep_out_{0}.log".format(top_sg_code)), self.logfile)
mtz_util.reindex(self.hklin, self.hklout, top_sg_code)
@staticmethod
def molrep(key, logfile):
"""Function to run molecular replacement using MOLREP
Parameters
----------
key : str
MOLREP keywords
logfile :
Path to output log file
Returns
-------
file
The output pdb from MOLREP
file
The output log file
"""
cmd = ["molrep" + EXE_EXT]
stdout = cexec(cmd, stdin=key)
with open(logfile, "w") as f_out:
f_out.write(stdout)
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(description="Runs MR using MOLREP", prefix_chars="-")
group = parser.add_argument_group()
group.add_argument("-hklin", type=str, help="Path the input hkl file")
group.add_argument("-hklout", type=str, help="Path the output hkl file")
group.add_argument("-logfile", type=str, help="Path to the ouput log file")
group.add_argument("-nmol", type=int, help="The predicted number of molecules to build")
group.add_argument("-pdbin", type=str, help="Path to the input pdb file")
group.add_argument("-pdbout", type=str, help="Path to the output pdb file")
group.add_argument("-sgalternative", choices=SGAlternatives.__members__.keys(), help="Try alternative space groups")
group.add_argument("-space_group", type=str, help="The input space group")
group.add_argument("-work_dir", type=str, help="Path to the working directory")
args = parser.parse_args()
molrep = Molrep(args.hklin, args.hklout, args.logfile, args.nmol, args.pdbin, args.pdbout, args.sgalternative, args.space_group, args.work_dir)
molrep.run()
| bsd-3-clause |
hfp/tensorflow-xsmm | tensorflow/python/debug/lib/session_debug_testlib.py | 11 | 64583 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for debugger functionalities in tf.Session."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import functools
import glob
import os
import shutil
import tempfile
import threading
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.core.protobuf import config_pb2
from tensorflow.core.protobuf import rewriter_config_pb2
from tensorflow.core.util import event_pb2
from tensorflow.python.client import session
from tensorflow.python.debug.lib import debug_data
from tensorflow.python.debug.lib import debug_graphs
from tensorflow.python.debug.lib import debug_utils
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import parsing_ops
from tensorflow.python.ops import rnn
from tensorflow.python.ops import rnn_cell_impl
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variables
import tensorflow.python.ops.tensor_array_grad # pylint: disable=unused-import
from tensorflow.python.platform import googletest
from tensorflow.python.platform import test
from tensorflow.python.training import gradient_descent
def no_rewrite_session_config():
rewriter_config = rewriter_config_pb2.RewriterConfig(
disable_model_pruning=True,
arithmetic_optimization=rewriter_config_pb2.RewriterConfig.OFF,
dependency_optimization=rewriter_config_pb2.RewriterConfig.OFF)
graph_options = config_pb2.GraphOptions(rewrite_options=rewriter_config)
return config_pb2.ConfigProto(graph_options=graph_options)
class _RNNCellForTest(rnn_cell_impl.RNNCell):
"""RNN cell for testing."""
def __init__(self, input_output_size, state_size):
self._input_output_size = input_output_size
self._state_size = state_size
self._w = variables.VariableV1(1.0, dtype=dtypes.float32, name="w")
@property
def output_size(self):
return self._input_output_size
@property
def state_size(self):
return self._state_size
def __call__(self, input_, state, scope=None):
return (math_ops.multiply(self._w, input_), state)
@test_util.run_v1_only("b/120545219")
class SessionDebugTestBase(test_util.TensorFlowTestCase):
"""Base class for unit tests of tfdbg running with tf.Session."""
@classmethod
def setUpClass(cls):
if test.is_gpu_available():
cls._expected_partition_graph_count = 2
cls._expected_num_devices = 2
gpu_name = test_util.gpu_device_name()
cls._main_device = "/job:localhost/replica:0/task:0" + gpu_name
else:
cls._expected_partition_graph_count = 1
cls._expected_num_devices = 1
cls._main_device = "/job:localhost/replica:0/task:0/device:CPU:0"
@classmethod
def tearDownClass(cls):
pass
def setUp(self):
self._dump_root = tempfile.mkdtemp()
def tearDown(self):
ops.reset_default_graph()
# Tear down temporary dump directory.
if os.path.isdir(self._dump_root):
shutil.rmtree(self._dump_root)
def _debug_urls(self, run_number=None):
raise NotImplementedError(
"_debug_urls() method is not implemented in the base test class.")
def _debug_dump_dir(self, run_number=None):
raise NotImplementedError(
"_debug_dump_dir() method is not implemented in the base test class.")
def _debug_run_and_get_dump(self,
sess,
fetches,
feed_dict=None,
debug_ops="DebugIdentity",
tolerate_debug_op_creation_failures=False,
global_step=-1,
validate=True,
expected_partition_graph_count=None):
"""Run fetches with debugging and obtain DebugDumpDir.
Args:
sess: the tf.Session to be used.
fetches: fetches of the Session.run().
feed_dict: feed dict for the Session.run().
debug_ops: name(s) of the debug ops to be used.
tolerate_debug_op_creation_failures: whether to tolerate debug op
creation failures.
global_step: Optional global step.
validate: whether to validate dumped tensors against graph.
expected_partition_graph_count: optional count of partition graphs to
assert on.
Returns:
1. Return values of the Session.run().
2. The DebugDumpDir object from the debugged run().
"""
run_options = config_pb2.RunOptions(output_partition_graphs=True)
debug_utils.watch_graph(
run_options,
sess.graph,
debug_ops=debug_ops,
debug_urls=self._debug_urls(),
tolerate_debug_op_creation_failures=tolerate_debug_op_creation_failures,
global_step=global_step)
run_metadata = config_pb2.RunMetadata()
run_output = sess.run(fetches,
feed_dict=feed_dict,
options=run_options,
run_metadata=run_metadata)
if expected_partition_graph_count is not None:
self.assertEqual(expected_partition_graph_count,
len(run_metadata.partition_graphs))
return run_output, debug_data.DebugDumpDir(
self._dump_root, partition_graphs=run_metadata.partition_graphs,
validate=validate)
def _generate_dump_from_simple_addition_graph(self):
with session.Session(config=no_rewrite_session_config()) as sess:
u_init_val = np.array([[5.0, 3.0], [-1.0, 0.0]])
v_init_val = np.array([[2.0], [-1.0]])
# Use node names with overlapping namespace (i.e., parent directory) to
# test concurrent, non-racing directory creation.
u_name = "u"
v_name = "v"
w_name = "w"
u_init = constant_op.constant(u_init_val, shape=[2, 2])
u = variables.VariableV1(u_init, name=u_name)
v_init = constant_op.constant(v_init_val, shape=[2, 1])
v = variables.VariableV1(v_init, name=v_name)
w = math_ops.matmul(u, v, name=w_name)
u.initializer.run()
v.initializer.run()
run_options = config_pb2.RunOptions(output_partition_graphs=True)
debug_urls = "file://%s" % self._dump_root
# Add debug tensor watch for u.
debug_utils.add_debug_tensor_watch(
run_options, "%s/read" % u_name, 0, debug_urls=debug_urls)
# Add debug tensor watch for v.
debug_utils.add_debug_tensor_watch(
run_options, "%s/read" % v_name, 0, debug_urls=debug_urls)
run_metadata = config_pb2.RunMetadata()
# Invoke Session.run().
sess.run(w, options=run_options, run_metadata=run_metadata)
self.assertEqual(self._expected_partition_graph_count,
len(run_metadata.partition_graphs))
dump = debug_data.DebugDumpDir(
self._dump_root, partition_graphs=run_metadata.partition_graphs)
simple_add_results = collections.namedtuple("SimpleAddResults", [
"u_init_val", "v_init_val", "u", "v", "w", "u_name", "v_name", "w_name",
"dump"
])
return simple_add_results(u_init_val, v_init_val, u, v, w, u_name, v_name,
w_name, dump)
def testCopyNodesHaveCorrectDebugOpsAndURLsAttributeValues(self):
with session.Session() as sess:
u = variables.VariableV1(2.1, name="u")
v = variables.VariableV1(20.0, name="v")
w = math_ops.multiply(u, v, name="w")
sess.run(variables.global_variables_initializer())
run_options = config_pb2.RunOptions(output_partition_graphs=True)
debug_urls = self._debug_urls()
debug_utils.add_debug_tensor_watch(
run_options,
"u",
0, ["DebugNumericSummary(gated_grpc=True)", "DebugIdentity"],
debug_urls=debug_urls)
debug_utils.add_debug_tensor_watch(
run_options, "v", 0, ["DebugNumericSummary"], debug_urls=debug_urls)
run_metadata = config_pb2.RunMetadata()
r = sess.run(w, options=run_options, run_metadata=run_metadata)
self.assertAllClose(42.0, r)
u_copy_node_def = None
v_copy_node_def = None
for partition_graph in run_metadata.partition_graphs:
for node_def in partition_graph.node:
if debug_graphs.is_copy_node(node_def.name):
if node_def.name == "__copy_u_0":
u_copy_node_def = node_def
elif node_def.name == "__copy_v_0":
v_copy_node_def = node_def
self.assertIsNotNone(u_copy_node_def)
debug_ops_spec = u_copy_node_def.attr["debug_ops_spec"].list.s
self.assertEqual(2, len(debug_ops_spec))
self.assertEqual("DebugNumericSummary;%s;1" % debug_urls[0],
debug_ops_spec[0].decode("utf-8"))
self.assertEqual("DebugIdentity;%s;0" % debug_urls[0],
debug_ops_spec[1].decode("utf-8"))
self.assertIsNotNone(v_copy_node_def)
debug_ops_spec = v_copy_node_def.attr["debug_ops_spec"].list.s
self.assertEqual(1, len(debug_ops_spec))
self.assertEqual("DebugNumericSummary;%s;0" % debug_urls[0],
debug_ops_spec[0].decode("utf-8"))
def testConcurrentDumpingToPathsWithOverlappingParentDirsWorks(self):
results = self._generate_dump_from_simple_addition_graph()
self.assertTrue(results.dump.loaded_partition_graphs())
# Since global_step is not explicitly specified, it should take its default
# value: -1.
self.assertEqual(-1, results.dump.core_metadata.global_step)
self.assertGreaterEqual(results.dump.core_metadata.session_run_index, 0)
self.assertGreaterEqual(results.dump.core_metadata.executor_step_index, 0)
self.assertEqual([], results.dump.core_metadata.input_names)
self.assertEqual([results.w.name], results.dump.core_metadata.output_names)
self.assertEqual([], results.dump.core_metadata.target_nodes)
# Verify the dumped tensor values for u and v.
self.assertEqual(2, results.dump.size)
self.assertAllClose([results.u_init_val],
results.dump.get_tensors("%s/read" % results.u_name, 0,
"DebugIdentity"))
self.assertAllClose([results.v_init_val],
results.dump.get_tensors("%s/read" % results.v_name, 0,
"DebugIdentity"))
self.assertGreaterEqual(
results.dump.get_rel_timestamps("%s/read" % results.u_name, 0,
"DebugIdentity")[0], 0)
self.assertGreaterEqual(
results.dump.get_rel_timestamps("%s/read" % results.v_name, 0,
"DebugIdentity")[0], 0)
self.assertGreater(
results.dump.get_dump_sizes_bytes("%s/read" % results.u_name, 0,
"DebugIdentity")[0], 0)
self.assertGreater(
results.dump.get_dump_sizes_bytes("%s/read" % results.v_name, 0,
"DebugIdentity")[0], 0)
def testGetOpTypeWorks(self):
results = self._generate_dump_from_simple_addition_graph()
self.assertEqual(results.u.op.type,
results.dump.node_op_type(results.u_name))
self.assertIn(results.v.op.type, results.dump.node_op_type(results.v_name))
self.assertIn(results.w.op.type, results.dump.node_op_type(results.w_name))
with self.assertRaisesRegexp(
ValueError, r"None of the .* device\(s\) has a node named "):
results.dump.node_op_type("foo_bar")
def testDumpStringTensorsWorks(self):
with session.Session(config=no_rewrite_session_config()) as sess:
str1_init_val = np.array(b"abc")
str2_init_val = np.array(b"def")
str1_init = constant_op.constant(str1_init_val)
str2_init = constant_op.constant(str2_init_val)
str1_name = "str1"
str2_name = "str2"
str1 = variables.VariableV1(str1_init, name=str1_name)
str2 = variables.VariableV1(str2_init, name=str2_name)
# Concatenate str1 and str2
str_concat = math_ops.add(str1, str2, name="str_concat")
str1.initializer.run()
str2.initializer.run()
run_options = config_pb2.RunOptions(output_partition_graphs=True)
debug_urls = self._debug_urls()
# Add debug tensor watch for u.
debug_utils.add_debug_tensor_watch(
run_options, "%s/read" % str1_name, 0, debug_urls=debug_urls)
# Add debug tensor watch for v.
debug_utils.add_debug_tensor_watch(
run_options, "%s/read" % str2_name, 0, debug_urls=debug_urls)
run_metadata = config_pb2.RunMetadata()
sess.run(str_concat, options=run_options, run_metadata=run_metadata)
# String ops are located on CPU.
self.assertEqual(1, len(run_metadata.partition_graphs))
dump = debug_data.DebugDumpDir(
self._dump_root, partition_graphs=run_metadata.partition_graphs)
self.assertIn(str1_name, dump.nodes())
self.assertIn(str2_name, dump.nodes())
self.assertEqual(2, dump.size)
self.assertEqual([str1_init_val],
dump.get_tensors("%s/read" % str1_name, 0,
"DebugIdentity"))
self.assertEqual([str2_init_val],
dump.get_tensors("%s/read" % str2_name, 0,
"DebugIdentity"))
self.assertGreaterEqual(
dump.get_rel_timestamps("%s/read" % str1_name, 0, "DebugIdentity")[0],
0)
self.assertGreaterEqual(
dump.get_rel_timestamps("%s/read" % str2_name, 0, "DebugIdentity")[0],
0)
self.assertGreater(
dump.get_dump_sizes_bytes("%s/read" % str1_name, 0,
"DebugIdentity")[0], 0)
self.assertGreater(
dump.get_dump_sizes_bytes("%s/read" % str2_name, 0,
"DebugIdentity")[0], 0)
def testDumpUninitializedVariable(self):
op_namespace = "testDumpUninitializedVariable"
with session.Session() as sess:
u_init_val = np.array([[5.0, 3.0], [-1.0, 0.0]])
s_init_val = b"str1"
u_name = "%s/u" % op_namespace
s_name = "%s/s" % op_namespace
u_init = constant_op.constant(u_init_val, shape=[2, 2])
u = variables.VariableV1(u_init, name=u_name)
s_init = constant_op.constant(s_init_val)
s = variables.VariableV1(s_init, name=s_name)
run_options = config_pb2.RunOptions(output_partition_graphs=True)
debug_urls = self._debug_urls()
# Add debug tensor watch for u.
debug_utils.add_debug_tensor_watch(
run_options, u_name, 0, debug_urls=debug_urls)
debug_utils.add_debug_tensor_watch(
run_options, s_name, 0, debug_urls=debug_urls)
run_metadata = config_pb2.RunMetadata()
# Initialize u and s.
sess.run(variables.global_variables_initializer(),
options=run_options,
run_metadata=run_metadata)
# Verify the dump file for the uninitialized value of u.
dump = debug_data.DebugDumpDir(
self._dump_root, partition_graphs=run_metadata.partition_graphs)
self.assertEqual(2, dump.size)
self.assertEqual(self._expected_partition_graph_count,
len(run_metadata.partition_graphs))
# Verify that the variable is properly initialized by the run() call.
u_vals = dump.get_tensors(u_name, 0, "DebugIdentity")
s_vals = dump.get_tensors(s_name, 0, "DebugIdentity")
self.assertEqual(1, len(u_vals))
self.assertIsInstance(u_vals[0], debug_data.InconvertibleTensorProto)
self.assertFalse(u_vals[0].initialized)
self.assertEqual(1, len(s_vals))
self.assertIsInstance(s_vals[0], debug_data.InconvertibleTensorProto)
self.assertFalse(s_vals[0].initialized)
# Call run() again, to check that u is initialized properly.
self.assertAllClose(u_init_val, sess.run(u))
self.assertEqual(s_init_val, sess.run(s))
def testDebugWhileLoopGeneratesMultipleDumps(self):
with session.Session(config=no_rewrite_session_config()) as sess:
num_iter = 10
# "u" is the Variable being updated in the loop.
u_name = "testDumpToFileWhileLoop/u"
u_namespace = u_name.split("/")[0]
u_init_val = np.array(11.0)
u_init = constant_op.constant(u_init_val)
u = variables.VariableV1(u_init, name=u_name)
# "v" is the increment.
v_name = "testDumpToFileWhileLoop/v"
v_namespace = v_name.split("/")[0]
v_init_val = np.array(2.0)
v_init = constant_op.constant(v_init_val)
v = variables.VariableV1(v_init, name=v_name)
u.initializer.run()
v.initializer.run()
i = constant_op.constant(0, name="testDumpToFileWhileLoop/i")
def cond(i):
return math_ops.less(i, num_iter)
def body(i):
new_u = state_ops.assign_add(u, v)
new_i = math_ops.add(i, 1)
op = control_flow_ops.group(new_u)
new_i = control_flow_ops.with_dependencies([op], new_i)
return [new_i]
loop = control_flow_ops.while_loop(
cond, body, [i], parallel_iterations=10)
# Create RunOptions for debug-watching tensors
run_options = config_pb2.RunOptions(output_partition_graphs=True)
debug_urls = self._debug_urls()
# Add debug tensor watch for u.
debug_utils.add_debug_tensor_watch(
run_options, u_name, 0, debug_urls=debug_urls)
# Add debug tensor watch for v.
debug_utils.add_debug_tensor_watch(
run_options, "%s/read" % v_name, 0, debug_urls=debug_urls)
# Add debug tensor watch for while/Identity.
debug_utils.add_debug_tensor_watch(
run_options, "while/Identity", 0, debug_urls=debug_urls)
# Add debug tensor watch for while/Add/y.
debug_utils.add_debug_tensor_watch(
run_options, "while/Add/y", 0, debug_urls=debug_urls)
run_metadata = config_pb2.RunMetadata()
r = sess.run(loop, options=run_options, run_metadata=run_metadata)
self.assertEqual(self._expected_partition_graph_count,
len(run_metadata.partition_graphs))
self.assertEqual(num_iter, r)
u_val_final = sess.run(u)
self.assertAllClose(u_init_val + num_iter * v_init_val, u_val_final)
# Verify dump files
self.assertTrue(os.path.isdir(self._dump_root))
u_glob_out = glob.glob(os.path.join(self._dump_root, "*", u_namespace))
v_glob_out = glob.glob(os.path.join(
self._dump_root, "*", v_namespace, "v"))
self.assertTrue(os.path.isdir(u_glob_out[0]))
self.assertTrue(os.path.isdir(v_glob_out[0]))
dump = debug_data.DebugDumpDir(
self._dump_root, partition_graphs=run_metadata.partition_graphs)
# Expected dumped tensors: u, v/read, 10 iterations of while/Identity,
# and 10 iterations of while/Add/y.
self.assertEqual(1 + 1 + num_iter + num_iter, dump.size)
# Verify tensor values.
self.assertAllClose([u_init_val],
dump.get_tensors(u_name, 0, "DebugIdentity"))
self.assertAllClose([v_init_val],
dump.get_tensors("%s/read" % v_name, 0,
"DebugIdentity"))
while_id_tensors = dump.get_tensors("while/Identity", 0, "DebugIdentity")
self.assertEqual(10, len(while_id_tensors))
for k in xrange(len(while_id_tensors)):
self.assertAllClose(np.array(k), while_id_tensors[k])
# Verify ascending timestamps from the while loops.
while_id_rel_timestamps = dump.get_rel_timestamps("while/Identity", 0,
"DebugIdentity")
while_id_dump_sizes_bytes = dump.get_dump_sizes_bytes("while/Identity", 0,
"DebugIdentity")
self.assertEqual(10, len(while_id_rel_timestamps))
prev_rel_time = 0
prev_dump_size_bytes = while_id_dump_sizes_bytes[0]
for rel_time, dump_size_bytes in zip(while_id_rel_timestamps,
while_id_dump_sizes_bytes):
self.assertGreaterEqual(rel_time, prev_rel_time)
self.assertEqual(dump_size_bytes, prev_dump_size_bytes)
prev_rel_time = rel_time
prev_dump_size_bytes = dump_size_bytes
# Test querying debug watch keys from node name.
watch_keys = dump.debug_watch_keys("while/Identity")
self.assertEqual(["while/Identity:0:DebugIdentity"], watch_keys)
# Test querying debug datum instances from debug watch key.
self.assertEqual(10, len(dump.watch_key_to_data(watch_keys[0])))
self.assertEqual([], dump.watch_key_to_data("foo"))
def testDebugWhileLoopWatchingWholeGraphWorks(self):
with session.Session() as sess:
loop_body = lambda i: math_ops.add(i, 2)
loop_cond = lambda i: math_ops.less(i, 16)
i = constant_op.constant(10, name="i")
loop = control_flow_ops.while_loop(loop_cond, loop_body, [i])
loop_result, dump = self._debug_run_and_get_dump(sess, loop)
self.assertEqual(16, loop_result)
self.assertEqual(
[[10]], dump.get_tensors("while/Enter", 0, "DebugIdentity"))
self.assertEqual(
[[12], [14], [16]],
dump.get_tensors("while/NextIteration", 0, "DebugIdentity"))
def testDebugTrainingDynamicRNNWorks(self):
with session.Session() as sess:
input_size = 3
state_size = 2
time_steps = 4
batch_size = 2
input_values = np.random.randn(time_steps, batch_size, input_size)
sequence_length = np.random.randint(0, time_steps, size=batch_size)
concat_inputs = array_ops.placeholder(
dtypes.float32, shape=(time_steps, batch_size, input_size))
outputs_dynamic, _ = rnn.dynamic_rnn(
_RNNCellForTest(input_size, state_size),
inputs=concat_inputs,
sequence_length=sequence_length,
time_major=True,
dtype=dtypes.float32)
toy_loss = math_ops.reduce_sum(outputs_dynamic * outputs_dynamic)
train_op = gradient_descent.GradientDescentOptimizer(
learning_rate=0.1).minimize(toy_loss, name="train_op")
sess.run(variables.global_variables_initializer())
run_options = config_pb2.RunOptions(output_partition_graphs=True)
debug_utils.watch_graph_with_blacklists(
run_options,
sess.graph,
node_name_regex_blacklist="(.*rnn/while/.*|.*TensorArray.*)",
debug_urls=self._debug_urls())
# b/36870549: Nodes with these name patterns need to be excluded from
# tfdbg in order to prevent MSAN warnings of uninitialized Tensors
# under both file:// and grpc:// debug URL schemes.
run_metadata = config_pb2.RunMetadata()
sess.run(train_op, feed_dict={concat_inputs: input_values},
options=run_options, run_metadata=run_metadata)
debug_data.DebugDumpDir(
self._dump_root, partition_graphs=run_metadata.partition_graphs)
def testDebugCondWatchingWholeGraphWorks(self):
with session.Session() as sess:
x = variables.VariableV1(10.0, name="x")
y = variables.VariableV1(20.0, name="y")
cond = control_flow_ops.cond(
x > y, lambda: math_ops.add(x, 1), lambda: math_ops.add(y, 1))
sess.run(variables.global_variables_initializer())
cond_result, dump = self._debug_run_and_get_dump(sess, cond)
self.assertEqual(21, cond_result)
self.assertAllClose(
[21.0], dump.get_tensors("cond/Merge", 0, "DebugIdentity"))
def testFindNodesWithBadTensorValues(self):
with session.Session() as sess:
u_name = "testFindNodesWithBadTensorValues/u"
v_name = "testFindNodesWithBadTensorValues/v"
w_name = "testFindNodesWithBadTensorValues/w"
x_name = "testFindNodesWithBadTensorValues/x"
y_name = "testFindNodesWithBadTensorValues/y"
z_name = "testFindNodesWithBadTensorValues/z"
u_init = constant_op.constant([2.0, 4.0])
u = variables.VariableV1(u_init, name=u_name)
v_init = constant_op.constant([2.0, 1.0])
v = variables.VariableV1(v_init, name=v_name)
# Expected output: [0.0, 3.0]
w = math_ops.subtract(u, v, name=w_name)
# Expected output: [inf, 1.3333]
x = math_ops.div(u, w, name=x_name)
# Expected output: [nan, 4.0]
y = math_ops.multiply(w, x, name=y_name)
z = math_ops.multiply(y, y, name=z_name)
u.initializer.run()
v.initializer.run()
_, dump = self._debug_run_and_get_dump(
sess, z,
expected_partition_graph_count=self._expected_partition_graph_count)
def has_bad_value(_, tensor):
return np.any(np.isnan(tensor)) or np.any(np.isinf(tensor))
# Find all "offending tensors".
bad_data = dump.find(has_bad_value)
# Verify that the nodes with bad values are caught through running find
# on the debug dump.
self.assertEqual(3, len(bad_data))
self.assertEqual(x_name, bad_data[0].node_name)
self.assertEqual(y_name, bad_data[1].node_name)
self.assertEqual(z_name, bad_data[2].node_name)
# Test first_n kwarg of find(): Find the first offending tensor.
first_bad_datum = dump.find(has_bad_value, first_n=1)
self.assertEqual(1, len(first_bad_datum))
self.assertEqual(x_name, first_bad_datum[0].node_name)
def testFindInfOrNanWithOpNameExclusion(self):
with session.Session() as sess:
u_name = "testFindInfOrNanWithOpNameExclusion/u"
v_name = "testFindInfOrNanWithOpNameExclusion/v"
w_name = "testFindInfOrNanWithOpNameExclusion/w"
x_name = "testFindInfOrNanWithOpNameExclusion/x"
y_name = "testFindInfOrNanWithOpNameExclusion/y"
z_name = "testFindInfOrNanWithOpNameExclusion/z"
u_init = constant_op.constant([2.0, 4.0])
u = variables.VariableV1(u_init, name=u_name)
v_init = constant_op.constant([2.0, 1.0])
v = variables.VariableV1(v_init, name=v_name)
# Expected output: [0.0, 3.0]
w = math_ops.subtract(u, v, name=w_name)
# Expected output: [inf, 1.3333]
x = math_ops.div(u, w, name=x_name)
# Expected output: [nan, 4.0]
y = math_ops.multiply(w, x, name=y_name)
z = math_ops.multiply(y, y, name=z_name)
u.initializer.run()
v.initializer.run()
_, dump = self._debug_run_and_get_dump(
sess, z,
expected_partition_graph_count=self._expected_partition_graph_count)
# Find all "offending tensors".
bad_data = dump.find(debug_data.has_inf_or_nan,
exclude_node_names=".*/x$")
# Verify that the nodes with bad values are caught through running find
# on the debug dump.
self.assertEqual(2, len(bad_data))
# Assert that the node `x` should have been excluded.
self.assertEqual(y_name, bad_data[0].node_name)
self.assertEqual(z_name, bad_data[1].node_name)
first_bad_datum = dump.find(
debug_data.has_inf_or_nan, first_n=1, exclude_node_names=".*/x$")
self.assertEqual(1, len(first_bad_datum))
self.assertEqual(y_name, first_bad_datum[0].node_name)
def _session_run_for_graph_structure_lookup(self):
with session.Session(config=no_rewrite_session_config()) as sess:
u_name = "testDumpGraphStructureLookup/u"
v_name = "testDumpGraphStructureLookup/v"
w_name = "testDumpGraphStructureLookup/w"
u_init = constant_op.constant([2.0, 4.0])
u = variables.VariableV1(u_init, name=u_name)
v = math_ops.add(u, u, name=v_name)
w = math_ops.add(v, v, name=w_name)
u.initializer.run()
_, dump = self._debug_run_and_get_dump(
sess, w,
expected_partition_graph_count=self._expected_partition_graph_count)
return u_name, v_name, w_name, dump
def testGraphStructureLookupGivesDevicesAndNodesInfo(self):
u_name, _, _, dump = self._session_run_for_graph_structure_lookup()
# Test num_devices().
self.assertEqual(self._expected_num_devices, len(dump.devices()))
# Test node_device().
self.assertEqual(self._main_device, dump.node_device(u_name))
with self.assertRaisesRegexp(ValueError,
"does not exist in partition graphs"):
dump.node_device(u_name + "foo")
# Test node_exists().
self.assertTrue(dump.node_exists(u_name))
self.assertTrue(dump.node_exists(u_name + "/read"))
self.assertFalse(dump.node_exists(u_name + "/read" + "/foo"))
def testGraphStructureLookupGivesNodesAndAttributes(self):
u_name, _, _, dump = self._session_run_for_graph_structure_lookup()
u_read_name = u_name + "/read"
# Test node name list lookup of the DebugDumpDir object.
if test_util.gpu_device_name():
node_names = dump.nodes(
device_name="/job:localhost/replica:0/task:0/device:GPU:0")
else:
node_names = dump.nodes()
self.assertTrue(u_name in node_names)
self.assertTrue(u_read_name in node_names)
# Test querying node attributes.
u_attr = dump.node_attributes(u_name)
self.assertEqual(dtypes.float32, u_attr["dtype"].type)
self.assertEqual(1, len(u_attr["shape"].shape.dim))
self.assertEqual(2, u_attr["shape"].shape.dim[0].size)
with self.assertRaisesRegexp(
ValueError, r"None of the .* device\(s\) has a node named "):
dump.node_attributes("foo")
def testGraphStructureLookupGivesDebugWatchKeys(self):
u_name, v_name, w_name, dump = (
self._session_run_for_graph_structure_lookup())
# Test querying the debug watch keys with node names.
self.assertEqual(["%s:0:DebugIdentity" % u_name],
dump.debug_watch_keys(u_name))
self.assertEqual(["%s:0:DebugIdentity" % v_name],
dump.debug_watch_keys(v_name))
self.assertEqual(["%s:0:DebugIdentity" % w_name],
dump.debug_watch_keys(w_name))
self.assertEqual([], dump.debug_watch_keys("foo"))
# Test querying debug datum instances from debug watch.
u_data = dump.watch_key_to_data(dump.debug_watch_keys(u_name)[0])
self.assertEqual(1, len(u_data))
self.assertEqual(u_name, u_data[0].node_name)
self.assertEqual(0, u_data[0].output_slot)
self.assertEqual("DebugIdentity", u_data[0].debug_op)
self.assertGreaterEqual(u_data[0].timestamp, 0)
self.assertEqual([], dump.watch_key_to_data("foo"))
def testGraphStructureLookupGivesNodeInputsAndRecipients(self):
u_name, v_name, w_name, dump = (
self._session_run_for_graph_structure_lookup())
u_read_name = u_name + "/read"
# Test the inputs lookup of the DebugDumpDir object.
self.assertEqual([], dump.node_inputs(u_name))
self.assertEqual([u_name], dump.node_inputs(u_read_name))
self.assertEqual([u_read_name] * 2, dump.node_inputs(v_name))
self.assertEqual([v_name] * 2, dump.node_inputs(w_name))
self.assertEqual([], dump.node_inputs(u_name, is_control=True))
self.assertEqual([], dump.node_inputs(u_read_name, is_control=True))
self.assertEqual([], dump.node_inputs(v_name, is_control=True))
self.assertEqual([], dump.node_inputs(w_name, is_control=True))
# Test the outputs recipient lookup of the DebugDumpDir object.
self.assertTrue(u_read_name in dump.node_recipients(u_name))
self.assertEqual(2, dump.node_recipients(u_read_name).count(v_name))
self.assertEqual(2, dump.node_recipients(v_name).count(w_name))
self.assertEqual([], dump.node_recipients(u_name, is_control=True))
self.assertEqual([], dump.node_recipients(u_read_name, is_control=True))
self.assertEqual([], dump.node_recipients(v_name, is_control=True))
self.assertEqual([], dump.node_recipients(w_name, is_control=True))
# Test errors raised on invalid node names.
with self.assertRaisesRegexp(
ValueError, r"None of the .* device\(s\) has a node named "):
dump.node_inputs(u_name + "foo")
with self.assertRaisesRegexp(
ValueError, r"None of the .* device\(s\) has a node named "):
dump.node_recipients(u_name + "foo")
# Test transitive_inputs().
self.assertEqual([], dump.transitive_inputs(u_name))
self.assertEqual([u_name], dump.transitive_inputs(u_read_name))
self.assertEqual(
set([u_name, u_read_name]), set(dump.transitive_inputs(v_name)))
self.assertEqual(
set([u_name, u_read_name, v_name]), set(dump.transitive_inputs(w_name)))
with self.assertRaisesRegexp(
ValueError, r"None of the .* device\(s\) has a node named "):
dump.transitive_inputs(u_name + "foo")
def testGraphStructureLookupWithoutPartitionGraphsDoesNotErrorOut(self):
_, _, _, dump = self._session_run_for_graph_structure_lookup()
# Now load the dump again, without the partition graphs, so we can check
# errors are not raised because the partition graphs are loaded from the
# dump directory.
dump = debug_data.DebugDumpDir(self._dump_root, validate=False)
self.assertTrue(dump.loaded_partition_graphs())
def testGraphPathFindingOnControlEdgesWorks(self):
with session.Session(config=no_rewrite_session_config()) as sess:
v1 = variables.VariableV1(1.0, name="v1")
v2 = variables.VariableV1(2.0, name="v2")
v3 = variables.VariableV1(3.0, name="v3")
a = math_ops.add(v1, v2, name="a")
with ops.control_dependencies([a]):
c = math_ops.subtract(v3, v3, name="c")
sess.run(variables.global_variables_initializer())
_, dump = self._debug_run_and_get_dump(sess, c)
self.assertEqual(["v1", "v1/read", "a", "c"],
dump.find_some_path("v1", "c"))
self.assertIsNone(dump.find_some_path("v1", "c", include_control=False))
def testGraphPathFindingReverseRefEdgeWorks(self):
with session.Session(config=no_rewrite_session_config()) as sess:
v = variables.VariableV1(10.0, name="v")
delta = variables.VariableV1(1.0, name="delta")
inc_v = state_ops.assign_add(v, delta, name="inc_v")
sess.run(variables.global_variables_initializer())
_, dump = self._debug_run_and_get_dump(sess, inc_v)
self.assertEqual(
["delta", "delta/read", "inc_v", "v"],
dump.find_some_path("delta", "v", include_reversed_ref=True))
self.assertIsNone(dump.find_some_path("delta", "v"))
def testCausalityCheckOnDumpsDetectsWrongTemporalOrder(self):
with session.Session(config=no_rewrite_session_config()) as sess:
u_name = "testDumpCausalityCheck/u"
v_name = "testDumpCausalityCheck/v"
w_name = "testDumpCausalityCheck/w"
u_init = constant_op.constant([2.0, 4.0])
u = variables.VariableV1(u_init, name=u_name)
v = math_ops.add(u, u, name=v_name)
w = math_ops.add(v, v, name=w_name)
u.initializer.run()
run_options = config_pb2.RunOptions(output_partition_graphs=True)
debug_utils.watch_graph(
run_options,
sess.graph,
debug_ops=["DebugIdentity"],
debug_urls=self._debug_urls())
run_metadata = config_pb2.RunMetadata()
sess.run(w, options=run_options, run_metadata=run_metadata)
self.assertEqual(self._expected_partition_graph_count,
len(run_metadata.partition_graphs))
# First, loading the original dump without supplying the
# partition_graphs should not cause a LookupError, validation occurs
# only with partition_graphs loaded.
debug_data.DebugDumpDir(self._dump_root)
# Now, loading the original dump with partition graphs supplied should
# succeed. The validation should pass quietly.
dump = debug_data.DebugDumpDir(
self._dump_root, partition_graphs=run_metadata.partition_graphs)
# Get the dump file names and compute their timestamps.
self.assertEqual(
1, len(dump.get_tensor_file_paths(v_name, 0, "DebugIdentity")))
v_file_path = dump.get_tensor_file_paths(v_name, 0, "DebugIdentity")[0]
self.assertEqual(
1, len(dump.get_tensor_file_paths(w_name, 0, "DebugIdentity")))
w_file_path = dump.get_tensor_file_paths(w_name, 0, "DebugIdentity")[0]
v_timestamp = int(v_file_path[v_file_path.rindex("_") + 1:])
w_timestamp = int(w_file_path[w_file_path.rindex("_") + 1:])
# Swap and slightly shift the time stamps of the last two dumped tensors,
# to simulate "causality violation", which can happen if the dump
# directory contains incomplete data and/or mixes data from different
# Session.run() calls.
v_file_path_1 = v_file_path[:v_file_path.rindex(
"_")] + "_%d" % w_timestamp
w_file_path_1 = w_file_path[:w_file_path.rindex("_")] + "_%d" % (
v_timestamp - 1)
os.rename(v_file_path, v_file_path_1)
os.rename(w_file_path, w_file_path_1)
# Load the dump directory again. Now a ValueError is expected to be
# raised due to the timestamp swap.
with self.assertRaisesRegexp(ValueError, "Causality violated"):
dump = debug_data.DebugDumpDir(
self._dump_root, partition_graphs=run_metadata.partition_graphs)
# Loading the dump directory with kwarg "validate" set explicitly to
# False should get rid of the error.
dump = debug_data.DebugDumpDir(
self._dump_root,
partition_graphs=run_metadata.partition_graphs,
validate=False)
# Next, set the two times stamps to be the same, which should be fine.
v_file_path_2 = v_file_path[:v_file_path.rindex(
"_")] + "_%d" % w_timestamp
w_file_path_2 = w_file_path[:w_file_path.rindex(
"_")] + "_%d" % w_timestamp
os.rename(v_file_path_1, v_file_path_2)
os.rename(w_file_path_1, w_file_path_2)
debug_data.DebugDumpDir(
self._dump_root, partition_graphs=run_metadata.partition_graphs)
def testWatchingOnlyOneOfTwoOutputSlotsDoesNotLeadToCausalityFailure(self):
with session.Session() as sess:
x_name = "oneOfTwoSlots/x"
u_name = "oneOfTwoSlots/u"
v_name = "oneOfTwoSlots/v"
w_name = "oneOfTwoSlots/w"
y_name = "oneOfTwoSlots/y"
x = variables.VariableV1([1, 3, 3, 7], dtype=dtypes.int32, name=x_name)
sess.run(x.initializer)
unique_x, indices, _ = array_ops.unique_with_counts(x, name=u_name)
v = math_ops.add(unique_x, unique_x, name=v_name)
w = math_ops.add(indices, indices, name=w_name)
y = math_ops.add(w, w, name=y_name)
run_options = config_pb2.RunOptions(output_partition_graphs=True)
# Watch only the first output slot of u, even though it has two output
# slots.
debug_utils.add_debug_tensor_watch(
run_options, u_name, 0, debug_urls=self._debug_urls())
debug_utils.add_debug_tensor_watch(
run_options, w_name, 0, debug_urls=self._debug_urls())
debug_utils.add_debug_tensor_watch(
run_options, y_name, 0, debug_urls=self._debug_urls())
run_metadata = config_pb2.RunMetadata()
sess.run([v, y], options=run_options, run_metadata=run_metadata)
dump = debug_data.DebugDumpDir(
self._dump_root,
partition_graphs=run_metadata.partition_graphs,
validate=True)
self.assertAllClose([1, 3, 7],
dump.get_tensors(u_name, 0, "DebugIdentity")[0])
def testOutputSlotWithoutOutgoingEdgeCanBeWatched(self):
"""Test watching output slots not attached to any outgoing edges."""
with session.Session(config=no_rewrite_session_config()) as sess:
u_init_val = np.array([[5.0, 3.0], [-1.0, 0.0]])
u = constant_op.constant(u_init_val, shape=[2, 2], name="u")
# Create a control edge from a node with an output: From u to z.
# Node u will get executed only because of the control edge. The output
# tensor u:0 is not attached to any outgoing edge in the graph. This test
# checks that the debugger can watch such a tensor.
with ops.control_dependencies([u]):
z = control_flow_ops.no_op(name="z")
_, dump = self._debug_run_and_get_dump(sess, z)
# Assert that the DebugIdentity watch on u works properly.
self.assertEqual(1, len(dump.dumped_tensor_data))
datum = dump.dumped_tensor_data[0]
self.assertEqual("u", datum.node_name)
self.assertEqual(0, datum.output_slot)
self.assertEqual("DebugIdentity", datum.debug_op)
self.assertAllClose([[5.0, 3.0], [-1.0, 0.0]], datum.get_tensor())
def testWatchingVariableUpdateOpsSeesUpdatedValues(self):
"""Watch output slots on Variable-updating ops, with no emitted edges."""
with session.Session(config=no_rewrite_session_config()) as sess:
u_init = constant_op.constant(10.0)
u = variables.VariableV1(u_init, name="gdo/u")
v_init = constant_op.constant(20.0)
v = variables.VariableV1(v_init, name="gdo/v")
w = math_ops.multiply(u, v, name="gdo/w")
# gdo stands for GradientDescentOptimizer.
train_op = gradient_descent.GradientDescentOptimizer(
learning_rate=0.1).minimize(
w, name="gdo/train")
u.initializer.run()
v.initializer.run()
_, dump = self._debug_run_and_get_dump(sess, train_op)
update_u_data = dump.watch_key_to_data(
"gdo/train/update_gdo/u/ApplyGradientDescent:0:DebugIdentity")
self.assertEqual(1, len(update_u_data))
# Gradient descent on u: w = u * v, so dw / du = v.
# Updated value of u should be:
# 10.0 - learning_rate * v = 10.0 - 0.1 * 20.0 = 8.0
self.assertAllClose(8.0, update_u_data[0].get_tensor())
update_v_data = dump.watch_key_to_data(
"gdo/train/update_gdo/v/ApplyGradientDescent:0:DebugIdentity")
self.assertEqual(1, len(update_v_data))
# Gradient descent on u: w = u * v, so dw / dv = u.
# Updated value of u should be:
# 20.0 - learning_rate * u = 20.0 - 0.1 * 10.0 = 19.0
self.assertAllClose(19.0, update_v_data[0].get_tensor())
# Verify that the Variables u and v are updated properly.
self.assertAllClose(8.0, sess.run(u))
self.assertAllClose(19.0, sess.run(v))
def testAllowsWatchingUnconnectedOutputTensor(self):
"""Watch an output slot not emitting any edges.
(Not even control edges from the node.)
"""
with session.Session() as sess:
x_init = constant_op.constant([2, 2, 3, 5, 5])
x = variables.VariableV1(x_init, name="unconnected/x")
# The UniqueOp (tf.unique) has two output slots. Use only slot 0 in the
# graph. Let the debugger watch the unused slot 1.
unique_x, _ = array_ops.unique(x, name="unconnected/unique_x")
y = math_ops.add(unique_x, [0, 1, 2], name="unconnected/y")
x.initializer.run()
# Verify that only slot 0 of unique_x has recipients, while slot 1 of the
# same node does not have recipients.
unique_x_slot_0_recipients = []
unique_x_slot_1_recipients = []
for op in sess.graph.get_operations():
for inp in op.inputs:
if inp.name == "unconnected/unique_x:0":
unique_x_slot_0_recipients.append(op.name)
elif inp.name == "unconnected/unique_x:1":
unique_x_slot_1_recipients.append(op.name)
self.assertEqual(["unconnected/y"], unique_x_slot_0_recipients)
self.assertEqual([], unique_x_slot_1_recipients)
y_result, dump = self._debug_run_and_get_dump(sess, y)
self.assertAllClose([2, 4, 7], y_result)
# Assert that the connected slot (slot 0) is dumped properly.
unique_x_slot_0_dumps = dump.watch_key_to_data(
"unconnected/unique_x:0:DebugIdentity")
self.assertEqual(1, len(unique_x_slot_0_dumps))
self.assertEqual("unconnected/unique_x",
unique_x_slot_0_dumps[0].node_name)
self.assertEqual(0, unique_x_slot_0_dumps[0].output_slot)
self.assertAllClose([2, 3, 5], unique_x_slot_0_dumps[0].get_tensor())
# Assert that the unconnected slot (slot 1) is dumped properly.
unique_x_slot_1_dumps = dump.watch_key_to_data(
"unconnected/unique_x:1:DebugIdentity")
self.assertEqual(1, len(unique_x_slot_1_dumps))
self.assertEqual("unconnected/unique_x",
unique_x_slot_1_dumps[0].node_name)
self.assertEqual(1, unique_x_slot_1_dumps[0].output_slot)
self.assertAllClose([0, 0, 1, 2, 2],
unique_x_slot_1_dumps[0].get_tensor())
def testSuccessiveDebuggingRunsIncreasesCounters(self):
"""Test repeated Session.run() calls with debugger increments counters."""
with session.Session() as sess:
ph = array_ops.placeholder(dtypes.float32, name="successive/ph")
x = array_ops.transpose(ph, name="mismatch/x")
y = array_ops.squeeze(ph, name="mismatch/y")
_, dump1 = self._debug_run_and_get_dump(
sess, x, feed_dict={ph: np.array([[7.0, 8.0]])}, global_step=1)
self.assertEqual(1, dump1.core_metadata.global_step)
self.assertGreaterEqual(dump1.core_metadata.session_run_index, 0)
self.assertEqual(0, dump1.core_metadata.executor_step_index)
self.assertEqual([ph.name], dump1.core_metadata.input_names)
self.assertEqual([x.name], dump1.core_metadata.output_names)
self.assertEqual([], dump1.core_metadata.target_nodes)
shutil.rmtree(self._dump_root)
# Calling run() with the same feed, same output and same debug watch
# options should increment both session_run_index and
# executor_step_index.
_, dump2 = self._debug_run_and_get_dump(
sess, x, feed_dict={ph: np.array([[7.0, 8.0]])}, global_step=2)
self.assertEqual(2, dump2.core_metadata.global_step)
self.assertEqual(dump1.core_metadata.session_run_index + 1,
dump2.core_metadata.session_run_index)
self.assertEqual(dump1.core_metadata.executor_step_index + 1,
dump2.core_metadata.executor_step_index)
self.assertEqual([ph.name], dump2.core_metadata.input_names)
self.assertEqual([x.name], dump2.core_metadata.output_names)
self.assertEqual([], dump2.core_metadata.target_nodes)
shutil.rmtree(self._dump_root)
run_options = config_pb2.RunOptions(output_partition_graphs=True)
debug_utils.watch_graph(
run_options, sess.graph, debug_urls=self._debug_urls(), global_step=3)
# Calling run() with a different output should increment
# session_run_index, but not executor_step_index.
_, dump3 = self._debug_run_and_get_dump(
sess, y, feed_dict={ph: np.array([[7.0, 8.0]])}, global_step=3)
self.assertEqual(3, dump3.core_metadata.global_step)
self.assertEqual(dump2.core_metadata.session_run_index + 1,
dump3.core_metadata.session_run_index)
self.assertEqual(0, dump3.core_metadata.executor_step_index)
self.assertEqual([ph.name], dump3.core_metadata.input_names)
self.assertEqual([y.name], dump3.core_metadata.output_names)
self.assertEqual([], dump3.core_metadata.target_nodes)
def testDebuggingDuringOpError(self):
"""Test the debug tensor dumping when error occurs in graph runtime."""
with session.Session() as sess:
ph = array_ops.placeholder(dtypes.float32, name="mismatch/ph")
x = array_ops.transpose(ph, name="mismatch/x")
m = constant_op.constant(
np.array(
[[1.0, 2.0]], dtype=np.float32), name="mismatch/m")
y = math_ops.matmul(m, x, name="mismatch/y")
run_options = config_pb2.RunOptions(output_partition_graphs=True)
debug_utils.watch_graph(
run_options,
sess.graph,
debug_ops=["DebugIdentity"],
debug_urls=self._debug_urls())
with self.assertRaises(errors.OpError):
sess.run(y,
options=run_options,
feed_dict={ph: np.array([[-3.0], [0.0]])})
dump = debug_data.DebugDumpDir(self._dump_root)
self.assertGreaterEqual(dump.core_metadata.session_run_index, 0)
self.assertGreaterEqual(dump.core_metadata.executor_step_index, 0)
self.assertEqual([ph.name], dump.core_metadata.input_names)
self.assertEqual([y.name], dump.core_metadata.output_names)
self.assertEqual([], dump.core_metadata.target_nodes)
# Despite the fact that the run() call errored out and partition_graphs
# are not available via run_metadata, the partition graphs should still
# have been loaded from the dump directory.
self.assertTrue(dump.loaded_partition_graphs())
m_dumps = dump.watch_key_to_data("mismatch/m:0:DebugIdentity")
self.assertEqual(1, len(m_dumps))
self.assertAllClose(np.array([[1.0, 2.0]]), m_dumps[0].get_tensor())
x_dumps = dump.watch_key_to_data("mismatch/x:0:DebugIdentity")
self.assertEqual(1, len(x_dumps))
self.assertAllClose(np.array([[-3.0, 0.0]]), x_dumps[0].get_tensor())
def testDebugNumericSummaryOnInitializedTensorGivesCorrectResult(self):
with session.Session(config=no_rewrite_session_config()) as sess:
a = variables.VariableV1(
[
np.nan, np.nan, 0.0, 0.0, 0.0, -1.0, -3.0, 3.0, 7.0, -np.inf,
-np.inf, np.inf, np.inf, np.inf, np.inf, np.inf, np.nan, np.nan
],
dtype=np.float32,
name="numeric_summary/a")
b = variables.VariableV1(
[0.0] * 18, dtype=np.float32, name="numeric_summary/b")
c = math_ops.add(a, b, name="numeric_summary/c")
sess.run(variables.global_variables_initializer())
_, dump = self._debug_run_and_get_dump(
sess, c, debug_ops=["DebugNumericSummary"])
self.assertTrue(dump.loaded_partition_graphs())
self.assertAllClose([[
1.0, 18.0, 4.0, 2.0, 2.0, 3.0, 2.0, 5.0, -3.0, 7.0, 0.85714286,
8.97959184, 1.0, 1.0, 18.0
]], dump.get_tensors("numeric_summary/a/read", 0, "DebugNumericSummary"))
def testDebugNumericSummaryOnUninitializedTensorGivesCorrectResult(self):
with session.Session() as sess:
a = variables.VariableV1(
[42], dtype=np.float32, name="numeric_summary_uninit/a")
_, dump = self._debug_run_and_get_dump(
sess, a.initializer, debug_ops=["DebugNumericSummary"])
self.assertTrue(dump.loaded_partition_graphs())
# DebugNumericSummary output should reflect the uninitialized state of
# the watched tensor.
numeric_summary = dump.get_tensors("numeric_summary_uninit/a", 0,
"DebugNumericSummary")[0]
self.assertAllClose([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
numeric_summary[0:8])
# Check dtype (index 12), ndims (index 13) and dimension sizes (index
# 14+).
self.assertAllClose([1.0, 1.0, 1.0], numeric_summary[12:])
self.assertTrue(np.isinf(numeric_summary[8]))
self.assertGreater(numeric_summary[8], 0.0)
self.assertTrue(np.isinf(numeric_summary[9]))
self.assertLess(numeric_summary[9], 0.0)
self.assertTrue(np.isnan(numeric_summary[10]))
self.assertTrue(np.isnan(numeric_summary[11]))
def testDebugNumericSummaryFailureIsToleratedWhenOrdered(self):
with session.Session() as sess:
a = variables.VariableV1("1", name="a")
b = variables.VariableV1("3", name="b")
c = variables.VariableV1("2", name="c")
d = math_ops.add(a, b, name="d")
e = math_ops.add(d, c, name="e")
n = parsing_ops.string_to_number(e, name="n")
m = math_ops.add(n, n, name="m")
sess.run(variables.global_variables_initializer())
# Using DebugNumericSummary on sess.run(m) with the default
# tolerate_debug_op_creation_failures=False should error out due to the
# presence of string-dtype Tensors in the graph.
run_metadata = config_pb2.RunMetadata()
run_options = config_pb2.RunOptions(output_partition_graphs=True)
debug_utils.watch_graph(
run_options,
sess.graph,
debug_ops=["DebugNumericSummary"],
debug_urls=self._debug_urls())
with self.assertRaises(errors.FailedPreconditionError):
sess.run(m, options=run_options, run_metadata=run_metadata)
# Using tolerate_debug_op_creation_failures=True should get rid of the
# error.
m_result, dump = self._debug_run_and_get_dump(
sess, m, debug_ops=["DebugNumericSummary"],
tolerate_debug_op_creation_failures=True)
self.assertEqual(264, m_result)
# The integer-dtype Tensors in the graph should have been dumped
# properly.
self.assertIn("n:0:DebugNumericSummary", dump.debug_watch_keys("n"))
self.assertIn("m:0:DebugNumericSummary", dump.debug_watch_keys("m"))
def testDebugNumericSummaryInvalidAttributesStringAreCaught(self):
with session.Session(config=no_rewrite_session_config()) as sess:
a = variables.VariableV1(10.0, name="a")
b = variables.VariableV1(0.0, name="b")
c = variables.VariableV1(0.0, name="c")
x = math_ops.divide(a, b, name="x")
y = math_ops.multiply(x, c, name="y")
sess.run(variables.global_variables_initializer())
run_metadata = config_pb2.RunMetadata()
run_options = config_pb2.RunOptions(output_partition_graphs=True)
debug_utils.watch_graph(
run_options,
sess.graph,
debug_ops=["DebugNumericSummary(foo=1.0)"],
debug_urls=self._debug_urls())
with self.assertRaisesRegexp(
errors.FailedPreconditionError,
r"1 attribute key\(s\) were not valid for debug node "
r"__dbg_.:0_0_DebugNumericSummary: foo"):
sess.run(y, options=run_options, run_metadata=run_metadata)
run_options = config_pb2.RunOptions(output_partition_graphs=True)
debug_utils.watch_graph(
run_options,
sess.graph,
debug_ops=["DebugNumericSummary(foo=1.0; bar=false)"],
debug_urls=self._debug_urls())
with self.assertRaisesRegexp(
errors.FailedPreconditionError,
r"2 attribute key\(s\) were not valid for debug node "
r"__dbg_.:0_0_DebugNumericSummary:"):
sess.run(y, options=run_options, run_metadata=run_metadata)
run_options = config_pb2.RunOptions(output_partition_graphs=True)
debug_utils.watch_graph(
run_options,
sess.graph,
debug_ops=["DebugNumericSummary(foo=1.0; mute_if_healthy=true)"],
debug_urls=self._debug_urls())
with self.assertRaisesRegexp(
errors.FailedPreconditionError,
r"1 attribute key\(s\) were not valid for debug node "
r"__dbg_.:0_0_DebugNumericSummary: foo"):
sess.run(y, options=run_options, run_metadata=run_metadata)
def testDebugNumericSummaryMuteOnHealthyMutesOnlyHealthyTensorDumps(self):
with session.Session(config=no_rewrite_session_config()) as sess:
a = variables.VariableV1(10.0, name="a")
b = variables.VariableV1(0.0, name="b")
c = variables.VariableV1(0.0, name="c")
x = math_ops.divide(a, b, name="x")
y = math_ops.multiply(x, c, name="y")
sess.run(variables.global_variables_initializer())
# Here, validate=False is necessary to avoid causality check error.
# TODO(cais): Maybe let DebugDumpDir constructor automatically ignore
# debug ops with mute_if_healthy=false attribute during validation.
_, dump = self._debug_run_and_get_dump(
sess, y, debug_ops=["DebugNumericSummary(mute_if_healthy=true)"],
validate=False)
self.assertEqual(2, dump.size)
self.assertAllClose([[
1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, np.inf, -np.inf, np.nan,
np.nan, 1.0, 0.0
]], dump.get_tensors("x", 0, "DebugNumericSummary"))
self.assertAllClose([[
1.0, 1.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, np.inf, -np.inf, np.nan,
np.nan, 1.0, 0.0
]], dump.get_tensors("y", 0, "DebugNumericSummary"))
# Another run with the default mute_if_healthy (false) value should
# dump all the tensors.
shutil.rmtree(self._dump_root)
_, dump = self._debug_run_and_get_dump(
sess, y, debug_ops=["DebugNumericSummary()"])
self.assertEqual(8, dump.size)
def testDebugNumericSummaryMuteOnHealthyAndCustomBoundsWork(self):
with session.Session() as sess:
a = variables.VariableV1([10.0, 10.0], name="a")
b = variables.VariableV1([10.0, 2.0], name="b")
x = math_ops.add(a, b, name="x") # [20.0, 12.0]
y = math_ops.divide(x, b, name="y") # [2.0, 6.0]
sess.run(variables.global_variables_initializer())
# Here, validate=False is necessary to avoid causality check error.
# TODO(cais): Maybe let DebugDumpDir constructor automatically ignore
# debug ops with mute_if_healthy=false attribute during validation.
_, dump = self._debug_run_and_get_dump(
sess, y, debug_ops=[
"DebugNumericSummary(mute_if_healthy=true; upper_bound=11.0)"],
validate=False)
self.assertEqual(1, dump.size)
self.assertAllClose([[
1.0, 2.0, 0.0, 0.0, 0.0, 0.0, 0.0, 2.0, 12.0, 20.0, 16.0, 16.0, 1.0,
1.0, 2.0]], dump.get_tensors("x", 0, "DebugNumericSummary"))
def testDebugQueueOpsDoesNotoErrorOut(self):
with session.Session() as sess:
q = data_flow_ops.FIFOQueue(3, "float", name="fifo_queue")
q_init = q.enqueue_many(([101.0, 202.0, 303.0],), name="enqueue_many")
_, dump = self._debug_run_and_get_dump(sess, q_init)
self.assertTrue(dump.loaded_partition_graphs())
fifo_queue_tensor = dump.get_tensors("fifo_queue", 0, "DebugIdentity")[0]
self.assertIsInstance(fifo_queue_tensor,
debug_data.InconvertibleTensorProto)
self.assertTrue(fifo_queue_tensor.initialized)
self.assertAllClose(
[101.0, 202.0, 303.0],
dump.get_tensors("enqueue_many/component_0", 0, "DebugIdentity")[0])
def testLookUpNodePythonTracebackWorks(self):
with session.Session() as sess:
u_init = constant_op.constant(10.0)
u = variables.VariableV1(u_init, name="traceback/u")
v_init = constant_op.constant(20.0)
v = variables.VariableV1(v_init, name="traceback/v")
w = math_ops.multiply(u, v, name="traceback/w")
sess.run(variables.global_variables_initializer())
_, dump = self._debug_run_and_get_dump(sess, w)
# Prior to setting the Python graph, attempts to do traceback lookup
# should lead to exceptions.
with self.assertRaisesRegexp(
LookupError, "Python graph is not available for traceback lookup"):
dump.node_traceback("traceback/w")
dump.set_python_graph(sess.graph)
# After setting the Python graph, attempts to look up nonexistent nodes
# should lead to exceptions.
with self.assertRaisesRegexp(KeyError,
r"Cannot find node \"foo\" in Python graph"):
dump.node_traceback("foo")
# Lookup should work with node name input.
traceback = dump.node_traceback("traceback/w")
self.assertIsInstance(traceback, list)
self.assertGreater(len(traceback), 0)
for trace in traceback:
self.assertIsInstance(trace, tuple)
# Lookup should also work with tensor name input.
traceback = dump.node_traceback("traceback/w:0")
self.assertIsInstance(traceback, list)
self.assertGreater(len(traceback), 0)
for trace in traceback:
self.assertIsInstance(trace, tuple)
class DebugConcurrentRunCallsTest(test_util.TensorFlowTestCase):
"""Test for debugging concurrent Session.run() calls."""
def _get_concurrent_debug_urls(self):
"""Abstract method to generate debug URLs for concurrent debugged runs."""
raise NotImplementedError(
"_get_concurrent_debug_urls is not implemented in the base test class")
def testDebugConcurrentVariableUpdates(self):
if test.is_gpu_available():
self.skipTest("No testing concurrent runs on a single GPU.")
with session.Session() as sess:
v = variables.VariableV1(30.0, name="v")
constants = []
for i in xrange(self._num_concurrent_runs):
constants.append(constant_op.constant(1.0, name="c%d" % i))
incs = [
state_ops.assign_add(
v, c, use_locking=True, name=("inc%d" % i))
for (i, c) in enumerate(constants)
]
sess.run(v.initializer)
concurrent_debug_urls = self._get_concurrent_debug_urls()
def inc_job(index):
run_options = config_pb2.RunOptions(output_partition_graphs=True)
debug_utils.watch_graph(
run_options, sess.graph, debug_urls=concurrent_debug_urls[index])
for _ in xrange(100):
sess.run(incs[index], options=run_options)
inc_threads = []
for index in xrange(self._num_concurrent_runs):
inc_thread = threading.Thread(target=functools.partial(inc_job, index))
inc_thread.start()
inc_threads.append(inc_thread)
for inc_thread in inc_threads:
inc_thread.join()
self.assertAllClose(30.0 + 1.0 * self._num_concurrent_runs * 100,
sess.run(v))
all_session_run_indices = []
for index in xrange(self._num_concurrent_runs):
dump = debug_data.DebugDumpDir(self._dump_roots[index])
self.assertTrue(dump.loaded_partition_graphs())
v_data = dump.get_tensors("v", 0, "DebugIdentity")
self.assertEqual(100, len(v_data))
# Examine all the core metadata files
core_metadata_files = glob.glob(
os.path.join(self._dump_roots[index], "_tfdbg_core*"))
timestamps = []
session_run_indices = []
executor_step_indices = []
for core_metadata_file in core_metadata_files:
with open(core_metadata_file, "rb") as f:
event = event_pb2.Event()
event.ParseFromString(f.read())
core_metadata = (
debug_data.extract_core_metadata_from_event_proto(event))
timestamps.append(event.wall_time)
session_run_indices.append(core_metadata.session_run_index)
executor_step_indices.append(core_metadata.executor_step_index)
all_session_run_indices.extend(session_run_indices)
# Assert that executor_step_index increases by one at a time.
executor_step_indices = zip(timestamps, executor_step_indices)
executor_step_indices = sorted(
executor_step_indices, key=lambda x: x[0])
for i in xrange(len(executor_step_indices) - 1):
self.assertEquals(executor_step_indices[i][1] + 1,
executor_step_indices[i + 1][1])
# Assert that session_run_index increase monotonically.
session_run_indices = zip(timestamps, session_run_indices)
session_run_indices = sorted(session_run_indices, key=lambda x: x[0])
for i in xrange(len(session_run_indices) - 1):
self.assertGreater(session_run_indices[i + 1][1],
session_run_indices[i][1])
# Assert that the session_run_indices from the concurrent run() calls are
# all unique.
self.assertEqual(len(all_session_run_indices),
len(set(all_session_run_indices)))
if __name__ == "__main__":
googletest.main()
| apache-2.0 |
neteler/QGIS | python/plugins/processing/algs/lidar/fusion/GridSurfaceCreate.py | 7 | 5821 | # -*- coding: utf-8 -*-
"""
***************************************************************************
GridSurfaceCreate.py
---------------------
Date : August 2012
Copyright : (C) 2012 by Victor Olaya
Email : volayaf at gmail dot com
---------------------
Date : June 2014
Copyright : (C) 2014 by Agresta S. Coop.
Email : iescamochero at agresta dot org
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Victor Olaya'
__date__ = 'August 2012'
__copyright__ = '(C) 2012, Victor Olaya'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
import os
from processing.core.parameters import ParameterFile
from processing.core.parameters import ParameterNumber
from processing.core.parameters import ParameterBoolean
from processing.core.parameters import ParameterSelection
from processing.core.outputs import OutputFile
from FusionAlgorithm import FusionAlgorithm
from FusionUtils import FusionUtils
from processing.core.parameters import ParameterString
class GridSurfaceCreate(FusionAlgorithm):
INPUT = 'INPUT'
OUTPUT_DTM = 'OUTPUT_DTM'
CELLSIZE = 'CELLSIZE'
XYUNITS = 'XYUNITS'
ZUNITS = 'ZUNITS'
UNITS = ['Meter', 'Feet']
SPIKE = 'SPIKE'
MEDIAN = 'MEDIAN'
SMOOTH = 'SMOOTH'
SLOPE = 'SLOPE'
MINIMUM = 'MINIMUM'
CLASS = 'CLASS'
ADVANCED_MODIFIERS = 'ADVANCED_MODIFIERS'
def defineCharacteristics(self):
self.name = 'Grid Surface Create'
self.group = 'Surface'
self.addParameter(ParameterFile(
self.INPUT, self.tr('Input las layer')))
self.addParameter(ParameterNumber(
self.CELLSIZE, self.tr('Cellsize'), 0, None, 10.0))
self.addParameter(ParameterSelection(
self.XYUNITS, self.tr('XY Units'), self.UNITS))
self.addParameter(ParameterSelection(
self.ZUNITS, self.tr('Z Units'), self.UNITS))
self.addOutput(OutputFile(
self.OUTPUT_DTM, self.tr('DTM Output Surface'), 'dtm'))
spike = ParameterString(
self.SPIKE, self.tr('Spike (set blank if not used)'), '', False, True)
spike.isAdvanced = True
self.addParameter(spike)
median = ParameterString(
self.MEDIAN, self.tr('Median'), '', False, True)
median.isAdvanced = True
self.addParameter(median)
smooth = ParameterString(
self.SMOOTH, self.tr('Smooth'), '', False, True)
smooth.isAdvanced = True
self.addParameter(smooth)
slope = ParameterString(
self.SLOPE, self.tr('Slope'), '', False, True)
slope.isAdvanced = True
self.addParameter(slope)
minimum = ParameterBoolean(
self.MINIMUM, self.tr('Minimum (set blank if not used)'), False)
minimum.isAdvanced = True
self.addParameter(minimum)
class_var = ParameterString(
self.CLASS, self.tr('Class(es)'), 2, False, True)
class_var.isAdvanced = True
self.addParameter(class_var)
advance_modifiers = ParameterString(
self.ADVANCED_MODIFIERS, self.tr('Additional modifiers'), '', False, True)
advance_modifiers.isAdvanced = True
self.addParameter(advance_modifiers)
def processAlgorithm(self, progress):
commands = [os.path.join(FusionUtils.FusionPath(), 'GridSurfaceCreate.exe')]
commands.append('/verbose')
spike = self.getParameterValue(self.SPIKE)
if str(spike).strip():
commands.append('/spike:' + str(spike))
median = self.getParameterValue(self.MEDIAN)
if str(median).strip():
commands.append('/median:' + str(median))
smooth = self.getParameterValue(self.SMOOTH)
if str(smooth).strip():
commands.append('/smooth:' + str(smooth))
slope = self.getParameterValue(self.SLOPE)
if str(slope).strip():
commands.append('/slope:' + str(slope))
minimum = self.getParameterValue(self.MINIMUM)
if str(minimum).strip():
commands.append('/minimum:' + str(minimum))
class_var = self.getParameterValue(self.CLASS)
if str(class_var).strip():
commands.append('/class:' + str(class_var))
advance_modifiers = str(self.getParameterValue(self.ADVANCED_MODIFIERS)).strip()
if advance_modifiers:
commands.append(advance_modifiers)
commands.append(self.getOutputValue(self.OUTPUT_DTM))
commands.append(str(self.getParameterValue(self.CELLSIZE)))
commands.append(self.UNITS[self.getParameterValue(self.XYUNITS)][0])
commands.append(self.UNITS[self.getParameterValue(self.ZUNITS)][0])
commands.append('0')
commands.append('0')
commands.append('0')
commands.append('0')
files = self.getParameterValue(self.INPUT).split(';')
if len(files) == 1:
commands.append(self.getParameterValue(self.INPUT))
else:
FusionUtils.createFileList(files)
commands.append(FusionUtils.tempFileListFilepath())
FusionUtils.runFusion(commands, progress)
| gpl-2.0 |
DanielSBrown/osf.io | framework/mongo/__init__.py | 43 | 1181 | # -*- coding: utf-8 -*-
from flask import request
from modularodm.storedobject import StoredObject as GenericStoredObject
from modularodm.ext.concurrency import with_proxies, proxied_members
from bson import ObjectId
from .handlers import client, database, set_up_storage
from api.base.api_globals import api_globals
class DummyRequest(object):
pass
dummy_request = DummyRequest()
def get_cache_key():
"""
Fetch a request key from either a Django or Flask request. Fall back on a process-global dummy object
if we are not in either type of request
"""
# TODO: This is ugly use of exceptions; is there a better way to track whether in a given type of request?
try:
return request._get_current_object()
except RuntimeError: # Not in a flask request context
if getattr(api_globals, 'request', None) is not None:
return api_globals.request
else: # Not in a Django request
return dummy_request
@with_proxies(proxied_members, get_cache_key)
class StoredObject(GenericStoredObject):
pass
__all__ = [
'StoredObject',
'ObjectId',
'client',
'database',
'set_up_storage',
]
| apache-2.0 |
tgreenyc/heat-templates | hot/software-config/elements/heat-config-docker-compose/install.d/hook-docker-compose.py | 8 | 3705 | #!/usr/bin/env python
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import ast
import dpath
import json
import logging
import os
import subprocess
import sys
import yaml
WORKING_DIR = os.environ.get('HEAT_DOCKER_COMPOSE_WORKING',
'/var/lib/heat-config/heat-config-docker-compose')
DOCKER_COMPOSE_CMD = os.environ.get('HEAT_DOCKER_COMPOSE_CMD',
'docker-compose')
def prepare_dir(path):
if not os.path.isdir(path):
os.makedirs(path, 0o700)
def write_input_file(file_path, content):
prepare_dir(os.path.dirname(file_path))
with os.fdopen(os.open(
file_path, os.O_CREAT | os.O_WRONLY, 0o600), 'w') as f:
f.write(content.encode('utf-8'))
def build_response(deploy_stdout, deploy_stderr, deploy_status_code):
return {
'deploy_stdout': deploy_stdout,
'deploy_stderr': deploy_stderr,
'deploy_status_code': deploy_status_code,
}
def main(argv=sys.argv):
log = logging.getLogger('heat-config')
handler = logging.StreamHandler(sys.stderr)
handler.setFormatter(
logging.Formatter(
'[%(asctime)s] (%(name)s) [%(levelname)s] %(message)s'))
log.addHandler(handler)
log.setLevel('DEBUG')
c = json.load(sys.stdin)
input_values = dict((i['name'], i['value']) for i in c['inputs'])
proj = os.path.join(WORKING_DIR, c.get('name'))
prepare_dir(proj)
stdout, stderr = {}, {}
if input_values.get('deploy_action') == 'DELETE':
json.dump(build_response(stdout, stderr, 0), sys.stdout)
return
config = c.get('config', '')
if not config:
log.debug("No 'config' input found, nothing to do.")
json.dump(build_response(stdout, stderr, 0), sys.stdout)
return
# convert config to dict
if not isinstance(config, dict):
config = ast.literal_eval(json.dumps(yaml.load(config)))
os.chdir(proj)
compose_env_files = []
for value in dpath.util.values(config, '*/env_file'):
if isinstance(value, list):
compose_env_files.extend(value)
elif isinstance(value, basestring):
compose_env_files.extend([value])
input_env_files = {}
if input_values.get('env_files'):
input_env_files = dict(
(i['file_name'], i['content'])
for i in ast.literal_eval(input_values.get('env_files')))
for file in compose_env_files:
if file in input_env_files.keys():
write_input_file(file, input_env_files.get(file))
cmd = [
DOCKER_COMPOSE_CMD,
'up',
'-d',
'--no-build',
]
log.debug('Running %s' % cmd)
subproc = subprocess.Popen(cmd, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stdout, stderr = subproc.communicate()
log.debug(stdout)
log.debug(stderr)
if subproc.returncode:
log.error("Error running %s. [%s]\n" % (cmd, subproc.returncode))
else:
log.debug('Completed %s' % cmd)
json.dump(build_response(stdout, stderr, subproc.returncode), sys.stdout)
if __name__ == '__main__':
sys.exit(main(sys.argv))
| apache-2.0 |
ronekko/chainer | chainer/datasets/fashion_mnist.py | 12 | 3771 | import os
import numpy
import chainer
from chainer.dataset import download
from chainer.datasets._mnist_helper import make_npz
from chainer.datasets._mnist_helper import preprocess_mnist
_fashion_mnist_labels = ['T-shirt/top', 'Trouser', 'Pullover', 'Dress', 'Coat',
'Sandal', 'Shirt', 'Sneaker', 'Bag', 'Ankle boot']
def get_fashion_mnist_labels():
"""Provide a list of the string value names of the labels.
Returns:
List of string values of the image labels.
"""
return list(_fashion_mnist_labels)
def get_fashion_mnist(withlabel=True, ndim=1, scale=1., dtype=None,
label_dtype=numpy.int32, rgb_format=False):
"""Gets the Fashion-MNIST dataset.
`Fashion-MNIST <https://github.com/zalandoresearch/fashion-mnist/>`_ is a
set of fashion articles represented by grey-scale 28x28 images. In the
original images, each pixel is represented by one-byte unsigned integer.
This function scales the pixels to floating point values in the interval
``[0, scale]``.
This function returns the training set and the test set of the official
Fashion-MNIST dataset. If ``withlabel`` is ``True``, each dataset consists
of tuples of images and labels, otherwise it only consists of images.
Args:
withlabel (bool): If ``True``, it returns datasets with labels. In this
case, each example is a tuple of an image and a label. Otherwise,
the datasets only contain images.
ndim (int): Number of dimensions of each image. The shape of each image
is determined depending on ``ndim`` as follows:
- ``ndim == 1``: the shape is ``(784,)``
- ``ndim == 2``: the shape is ``(28, 28)``
- ``ndim == 3``: the shape is ``(1, 28, 28)``
scale (float): Pixel value scale. If it is 1 (default), pixels are
scaled to the interval ``[0, 1]``.
dtype: Data type of resulting image arrays. ``chainer.config.dtype`` is
used by default (see :ref:`configuration`).
label_dtype: Data type of the labels.
rgb_format (bool): if ``ndim == 3`` and ``rgb_format`` is ``True``, the
image will be converted to rgb format by duplicating the channels
so the image shape is (3, 28, 28). Default is ``False``.
Returns:
A tuple of two datasets. If ``withlabel`` is ``True``, both datasets
are :class:`~chainer.datasets.TupleDataset` instances. Otherwise, both
datasets are arrays of images.
"""
train_raw = _retrieve_fashion_mnist_training()
dtype = chainer.get_dtype(dtype)
train = preprocess_mnist(train_raw, withlabel, ndim, scale, dtype,
label_dtype, rgb_format)
test_raw = _retrieve_fashion_mnist_test()
test = preprocess_mnist(test_raw, withlabel, ndim, scale, dtype,
label_dtype, rgb_format)
return train, test
def _retrieve_fashion_mnist_training():
base_url = 'http://fashion-mnist.s3-website.eu-central-1.amazonaws.com/'
urls = [base_url + 'train-images-idx3-ubyte.gz',
base_url + 'train-labels-idx1-ubyte.gz']
return _retrieve_fashion_mnist('train.npz', urls)
def _retrieve_fashion_mnist_test():
base_url = 'http://fashion-mnist.s3-website.eu-central-1.amazonaws.com/'
urls = [base_url + 't10k-images-idx3-ubyte.gz',
base_url + 't10k-labels-idx1-ubyte.gz']
return _retrieve_fashion_mnist('test.npz', urls)
def _retrieve_fashion_mnist(name, urls):
root = download.get_dataset_directory('pfnet/chainer/fashion-mnist')
path = os.path.join(root, name)
return download.cache_or_load_file(
path, lambda path: make_npz(path, urls), numpy.load)
| mit |
mcfreis/pydtls | dtls/tlock.py | 2 | 1903 | # TLock: OpenSSL lock support on thread-enabled systems.
# Copyright 2012 Ray Brown
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# The License is also distributed with this work in the file named "LICENSE."
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""TLock
This module provides the callbacks required by the OpenSSL library in situations
where it is being entered concurrently by multiple threads. This module is
enagaged automatically by the PyDTLS package on systems that have Python
threading support. It does not have client-visible components.
"""
from logging import getLogger
from openssl import *
try:
import threading
except ImportError:
pass
_logger = getLogger(__name__)
DO_DEBUG_LOG = False
def tlock_init():
if not globals().has_key("threading"):
return # nothing to configure
# The standard library ssl module's lock implementation is more efficient;
# do not override it if it has been established
if CRYPTO_get_id_callback():
return
global _locks
num_locks = CRYPTO_num_locks()
_locks = tuple(threading.Lock() for _ in range(num_locks))
CRYPTO_set_locking_callback(_locking_function)
def _locking_function(mode, n, file, line):
if DO_DEBUG_LOG:
_logger.debug("Thread lock: mode: %d, n: %d, file: %s, line: %d",
mode, n, file, line)
if mode & CRYPTO_LOCK:
_locks[n].acquire()
else:
_locks[n].release()
| apache-2.0 |
imply/chuu | third_party/tlslite/tlslite/utils/ASN1Parser.py | 80 | 1038 | """Class for parsing ASN.1"""
from compat import *
from codec import *
#Takes a byte array which has a DER TLV field at its head
class ASN1Parser:
def __init__(self, bytes):
p = Parser(bytes)
p.get(1) #skip Type
#Get Length
self.length = self._getASN1Length(p)
#Get Value
self.value = p.getFixBytes(self.length)
#Assuming this is a sequence...
def getChild(self, which):
return ASN1Parser(self.getChildBytes(which))
def getChildBytes(self, which):
p = Parser(self.value)
for x in range(which+1):
markIndex = p.index
p.get(1) #skip Type
length = self._getASN1Length(p)
p.getFixBytes(length)
return p.bytes[markIndex : p.index]
#Decode the ASN.1 DER length field
def _getASN1Length(self, p):
firstLength = p.get(1)
if firstLength<=127:
return firstLength
else:
lengthLength = firstLength & 0x7F
return p.get(lengthLength)
| bsd-3-clause |
hlin117/scikit-learn | sklearn/tests/test_pipeline.py | 2 | 31779 | """
Test the pipeline module.
"""
from tempfile import mkdtemp
import shutil
import time
import numpy as np
from scipy import sparse
from sklearn.externals.six.moves import zip
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raises_regex
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_dict_equal
from sklearn.base import clone, BaseEstimator
from sklearn.pipeline import Pipeline, FeatureUnion, make_pipeline, make_union
from sklearn.svm import SVC
from sklearn.linear_model import LogisticRegression
from sklearn.linear_model import LinearRegression
from sklearn.cluster import KMeans
from sklearn.feature_selection import SelectKBest, f_classif
from sklearn.decomposition import PCA, TruncatedSVD
from sklearn.datasets import load_iris
from sklearn.preprocessing import StandardScaler
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.externals.joblib import Memory
JUNK_FOOD_DOCS = (
"the pizza pizza beer copyright",
"the pizza burger beer copyright",
"the the pizza beer beer copyright",
"the burger beer beer copyright",
"the coke burger coke copyright",
"the coke burger burger",
)
class NoFit(object):
"""Small class to test parameter dispatching.
"""
def __init__(self, a=None, b=None):
self.a = a
self.b = b
class NoTrans(NoFit):
def fit(self, X, y):
return self
def get_params(self, deep=False):
return {'a': self.a, 'b': self.b}
def set_params(self, **params):
self.a = params['a']
return self
class NoInvTransf(NoTrans):
def transform(self, X, y=None):
return X
class Transf(NoInvTransf):
def transform(self, X, y=None):
return X
def inverse_transform(self, X):
return X
class TransfFitParams(Transf):
def fit(self, X, y, **fit_params):
self.fit_params = fit_params
return self
class Mult(BaseEstimator):
def __init__(self, mult=1):
self.mult = mult
def fit(self, X, y):
return self
def transform(self, X):
return np.asarray(X) * self.mult
def inverse_transform(self, X):
return np.asarray(X) / self.mult
def predict(self, X):
return (np.asarray(X) * self.mult).sum(axis=1)
predict_proba = predict_log_proba = decision_function = predict
def score(self, X, y=None):
return np.sum(X)
class FitParamT(BaseEstimator):
"""Mock classifier
"""
def __init__(self):
self.successful = False
def fit(self, X, y, should_succeed=False):
self.successful = should_succeed
def predict(self, X):
return self.successful
def fit_predict(self, X, y, should_succeed=False):
self.fit(X, y, should_succeed=should_succeed)
return self.predict(X)
def score(self, X, y=None, sample_weight=None):
if sample_weight is not None:
X = X * sample_weight
return np.sum(X)
class DummyTransf(Transf):
"""Transformer which store the column means"""
def fit(self, X, y):
self.means_ = np.mean(X, axis=0)
# store timestamp to figure out whether the result of 'fit' has been
# cached or not
self.timestamp_ = time.time()
return self
def test_pipeline_init():
# Test the various init parameters of the pipeline.
assert_raises(TypeError, Pipeline)
# Check that we can't instantiate pipelines with objects without fit
# method
assert_raises_regex(TypeError,
'Last step of Pipeline should implement fit. '
'.*NoFit.*',
Pipeline, [('clf', NoFit())])
# Smoke test with only an estimator
clf = NoTrans()
pipe = Pipeline([('svc', clf)])
assert_equal(pipe.get_params(deep=True),
dict(svc__a=None, svc__b=None, svc=clf,
**pipe.get_params(deep=False)))
# Check that params are set
pipe.set_params(svc__a=0.1)
assert_equal(clf.a, 0.1)
assert_equal(clf.b, None)
# Smoke test the repr:
repr(pipe)
# Test with two objects
clf = SVC()
filter1 = SelectKBest(f_classif)
pipe = Pipeline([('anova', filter1), ('svc', clf)])
# Check that we can't instantiate with non-transformers on the way
# Note that NoTrans implements fit, but not transform
assert_raises_regex(TypeError,
'All intermediate steps should be transformers'
'.*\\bNoTrans\\b.*',
Pipeline, [('t', NoTrans()), ('svc', clf)])
# Check that params are set
pipe.set_params(svc__C=0.1)
assert_equal(clf.C, 0.1)
# Smoke test the repr:
repr(pipe)
# Check that params are not set when naming them wrong
assert_raises(ValueError, pipe.set_params, anova__C=0.1)
# Test clone
pipe2 = clone(pipe)
assert_false(pipe.named_steps['svc'] is pipe2.named_steps['svc'])
# Check that apart from estimators, the parameters are the same
params = pipe.get_params(deep=True)
params2 = pipe2.get_params(deep=True)
for x in pipe.get_params(deep=False):
params.pop(x)
for x in pipe2.get_params(deep=False):
params2.pop(x)
# Remove estimators that where copied
params.pop('svc')
params.pop('anova')
params2.pop('svc')
params2.pop('anova')
assert_equal(params, params2)
def test_pipeline_methods_anova():
# Test the various methods of the pipeline (anova).
iris = load_iris()
X = iris.data
y = iris.target
# Test with Anova + LogisticRegression
clf = LogisticRegression()
filter1 = SelectKBest(f_classif, k=2)
pipe = Pipeline([('anova', filter1), ('logistic', clf)])
pipe.fit(X, y)
pipe.predict(X)
pipe.predict_proba(X)
pipe.predict_log_proba(X)
pipe.score(X, y)
def test_pipeline_fit_params():
# Test that the pipeline can take fit parameters
pipe = Pipeline([('transf', Transf()), ('clf', FitParamT())])
pipe.fit(X=None, y=None, clf__should_succeed=True)
# classifier should return True
assert_true(pipe.predict(None))
# and transformer params should not be changed
assert_true(pipe.named_steps['transf'].a is None)
assert_true(pipe.named_steps['transf'].b is None)
# invalid parameters should raise an error message
assert_raise_message(
TypeError,
"fit() got an unexpected keyword argument 'bad'",
pipe.fit, None, None, clf__bad=True
)
def test_pipeline_sample_weight_supported():
# Pipeline should pass sample_weight
X = np.array([[1, 2]])
pipe = Pipeline([('transf', Transf()), ('clf', FitParamT())])
pipe.fit(X, y=None)
assert_equal(pipe.score(X), 3)
assert_equal(pipe.score(X, y=None), 3)
assert_equal(pipe.score(X, y=None, sample_weight=None), 3)
assert_equal(pipe.score(X, sample_weight=np.array([2, 3])), 8)
def test_pipeline_sample_weight_unsupported():
# When sample_weight is None it shouldn't be passed
X = np.array([[1, 2]])
pipe = Pipeline([('transf', Transf()), ('clf', Mult())])
pipe.fit(X, y=None)
assert_equal(pipe.score(X), 3)
assert_equal(pipe.score(X, sample_weight=None), 3)
assert_raise_message(
TypeError,
"score() got an unexpected keyword argument 'sample_weight'",
pipe.score, X, sample_weight=np.array([2, 3])
)
def test_pipeline_raise_set_params_error():
# Test pipeline raises set params error message for nested models.
pipe = Pipeline([('cls', LinearRegression())])
# expected error message
error_msg = ('Invalid parameter %s for estimator %s. '
'Check the list of available parameters '
'with `estimator.get_params().keys()`.')
assert_raise_message(ValueError,
error_msg % ('fake', 'Pipeline'),
pipe.set_params,
fake='nope')
# nested model check
assert_raise_message(ValueError,
error_msg % ("fake", pipe),
pipe.set_params,
fake__estimator='nope')
def test_pipeline_methods_pca_svm():
# Test the various methods of the pipeline (pca + svm).
iris = load_iris()
X = iris.data
y = iris.target
# Test with PCA + SVC
clf = SVC(probability=True, random_state=0)
pca = PCA(svd_solver='full', n_components='mle', whiten=True)
pipe = Pipeline([('pca', pca), ('svc', clf)])
pipe.fit(X, y)
pipe.predict(X)
pipe.predict_proba(X)
pipe.predict_log_proba(X)
pipe.score(X, y)
def test_pipeline_methods_preprocessing_svm():
# Test the various methods of the pipeline (preprocessing + svm).
iris = load_iris()
X = iris.data
y = iris.target
n_samples = X.shape[0]
n_classes = len(np.unique(y))
scaler = StandardScaler()
pca = PCA(n_components=2, svd_solver='randomized', whiten=True)
clf = SVC(probability=True, random_state=0, decision_function_shape='ovr')
for preprocessing in [scaler, pca]:
pipe = Pipeline([('preprocess', preprocessing), ('svc', clf)])
pipe.fit(X, y)
# check shapes of various prediction functions
predict = pipe.predict(X)
assert_equal(predict.shape, (n_samples,))
proba = pipe.predict_proba(X)
assert_equal(proba.shape, (n_samples, n_classes))
log_proba = pipe.predict_log_proba(X)
assert_equal(log_proba.shape, (n_samples, n_classes))
decision_function = pipe.decision_function(X)
assert_equal(decision_function.shape, (n_samples, n_classes))
pipe.score(X, y)
def test_fit_predict_on_pipeline():
# test that the fit_predict method is implemented on a pipeline
# test that the fit_predict on pipeline yields same results as applying
# transform and clustering steps separately
iris = load_iris()
scaler = StandardScaler()
km = KMeans(random_state=0)
# As pipeline doesn't clone estimators on construction,
# it must have its own estimators
scaler_for_pipeline = StandardScaler()
km_for_pipeline = KMeans(random_state=0)
# first compute the transform and clustering step separately
scaled = scaler.fit_transform(iris.data)
separate_pred = km.fit_predict(scaled)
# use a pipeline to do the transform and clustering in one step
pipe = Pipeline([
('scaler', scaler_for_pipeline),
('Kmeans', km_for_pipeline)
])
pipeline_pred = pipe.fit_predict(iris.data)
assert_array_almost_equal(pipeline_pred, separate_pred)
def test_fit_predict_on_pipeline_without_fit_predict():
# tests that a pipeline does not have fit_predict method when final
# step of pipeline does not have fit_predict defined
scaler = StandardScaler()
pca = PCA(svd_solver='full')
pipe = Pipeline([('scaler', scaler), ('pca', pca)])
assert_raises_regex(AttributeError,
"'PCA' object has no attribute 'fit_predict'",
getattr, pipe, 'fit_predict')
def test_fit_predict_with_intermediate_fit_params():
# tests that Pipeline passes fit_params to intermediate steps
# when fit_predict is invoked
pipe = Pipeline([('transf', TransfFitParams()), ('clf', FitParamT())])
pipe.fit_predict(X=None,
y=None,
transf__should_get_this=True,
clf__should_succeed=True)
assert_true(pipe.named_steps['transf'].fit_params['should_get_this'])
assert_true(pipe.named_steps['clf'].successful)
assert_false('should_succeed' in pipe.named_steps['transf'].fit_params)
def test_feature_union():
# basic sanity check for feature union
iris = load_iris()
X = iris.data
X -= X.mean(axis=0)
y = iris.target
svd = TruncatedSVD(n_components=2, random_state=0)
select = SelectKBest(k=1)
fs = FeatureUnion([("svd", svd), ("select", select)])
fs.fit(X, y)
X_transformed = fs.transform(X)
assert_equal(X_transformed.shape, (X.shape[0], 3))
# check if it does the expected thing
assert_array_almost_equal(X_transformed[:, :-1], svd.fit_transform(X))
assert_array_equal(X_transformed[:, -1],
select.fit_transform(X, y).ravel())
# test if it also works for sparse input
# We use a different svd object to control the random_state stream
fs = FeatureUnion([("svd", svd), ("select", select)])
X_sp = sparse.csr_matrix(X)
X_sp_transformed = fs.fit_transform(X_sp, y)
assert_array_almost_equal(X_transformed, X_sp_transformed.toarray())
# test setting parameters
fs.set_params(select__k=2)
assert_equal(fs.fit_transform(X, y).shape, (X.shape[0], 4))
# test it works with transformers missing fit_transform
fs = FeatureUnion([("mock", Transf()), ("svd", svd), ("select", select)])
X_transformed = fs.fit_transform(X, y)
assert_equal(X_transformed.shape, (X.shape[0], 8))
# test error if some elements do not support transform
assert_raises_regex(TypeError,
'All estimators should implement fit and '
'transform.*\\bNoTrans\\b',
FeatureUnion,
[("transform", Transf()), ("no_transform", NoTrans())])
def test_make_union():
pca = PCA(svd_solver='full')
mock = Transf()
fu = make_union(pca, mock)
names, transformers = zip(*fu.transformer_list)
assert_equal(names, ("pca", "transf"))
assert_equal(transformers, (pca, mock))
def test_make_union_kwargs():
pca = PCA(svd_solver='full')
mock = Transf()
fu = make_union(pca, mock, n_jobs=3)
assert_equal(fu.transformer_list, make_union(pca, mock).transformer_list)
assert_equal(3, fu.n_jobs)
# invalid keyword parameters should raise an error message
assert_raise_message(
TypeError,
'Unknown keyword arguments: "transformer_weights"',
make_union, pca, mock, transformer_weights={'pca': 10, 'Transf': 1}
)
def test_pipeline_transform():
# Test whether pipeline works with a transformer at the end.
# Also test pipeline.transform and pipeline.inverse_transform
iris = load_iris()
X = iris.data
pca = PCA(n_components=2, svd_solver='full')
pipeline = Pipeline([('pca', pca)])
# test transform and fit_transform:
X_trans = pipeline.fit(X).transform(X)
X_trans2 = pipeline.fit_transform(X)
X_trans3 = pca.fit_transform(X)
assert_array_almost_equal(X_trans, X_trans2)
assert_array_almost_equal(X_trans, X_trans3)
X_back = pipeline.inverse_transform(X_trans)
X_back2 = pca.inverse_transform(X_trans)
assert_array_almost_equal(X_back, X_back2)
def test_pipeline_fit_transform():
# Test whether pipeline works with a transformer missing fit_transform
iris = load_iris()
X = iris.data
y = iris.target
transf = Transf()
pipeline = Pipeline([('mock', transf)])
# test fit_transform:
X_trans = pipeline.fit_transform(X, y)
X_trans2 = transf.fit(X, y).transform(X)
assert_array_almost_equal(X_trans, X_trans2)
def test_set_pipeline_steps():
transf1 = Transf()
transf2 = Transf()
pipeline = Pipeline([('mock', transf1)])
assert_true(pipeline.named_steps['mock'] is transf1)
# Directly setting attr
pipeline.steps = [('mock2', transf2)]
assert_true('mock' not in pipeline.named_steps)
assert_true(pipeline.named_steps['mock2'] is transf2)
assert_equal([('mock2', transf2)], pipeline.steps)
# Using set_params
pipeline.set_params(steps=[('mock', transf1)])
assert_equal([('mock', transf1)], pipeline.steps)
# Using set_params to replace single step
pipeline.set_params(mock=transf2)
assert_equal([('mock', transf2)], pipeline.steps)
# With invalid data
pipeline.set_params(steps=[('junk', ())])
assert_raises(TypeError, pipeline.fit, [[1]], [1])
assert_raises(TypeError, pipeline.fit_transform, [[1]], [1])
def test_pipeline_named_steps():
transf = Transf()
mult2 = Mult(mult=2)
pipeline = Pipeline([('mock', transf), ("mult", mult2)])
# Test access via named_steps bunch object
assert_true('mock' in pipeline.named_steps)
assert_true('mock2' not in pipeline.named_steps)
assert_true(pipeline.named_steps.mock is transf)
assert_true(pipeline.named_steps.mult is mult2)
# Test bunch with conflict attribute of dict
pipeline = Pipeline([('values', transf), ("mult", mult2)])
assert_true(pipeline.named_steps.values is not transf)
assert_true(pipeline.named_steps.mult is mult2)
def test_set_pipeline_step_none():
# Test setting Pipeline steps to None
X = np.array([[1]])
y = np.array([1])
mult2 = Mult(mult=2)
mult3 = Mult(mult=3)
mult5 = Mult(mult=5)
def make():
return Pipeline([('m2', mult2), ('m3', mult3), ('last', mult5)])
pipeline = make()
exp = 2 * 3 * 5
assert_array_equal([[exp]], pipeline.fit_transform(X, y))
assert_array_equal([exp], pipeline.fit(X).predict(X))
assert_array_equal(X, pipeline.inverse_transform([[exp]]))
pipeline.set_params(m3=None)
exp = 2 * 5
assert_array_equal([[exp]], pipeline.fit_transform(X, y))
assert_array_equal([exp], pipeline.fit(X).predict(X))
assert_array_equal(X, pipeline.inverse_transform([[exp]]))
assert_dict_equal(pipeline.get_params(deep=True),
{'steps': pipeline.steps,
'm2': mult2,
'm3': None,
'last': mult5,
'memory': None,
'm2__mult': 2,
'last__mult': 5,
})
pipeline.set_params(m2=None)
exp = 5
assert_array_equal([[exp]], pipeline.fit_transform(X, y))
assert_array_equal([exp], pipeline.fit(X).predict(X))
assert_array_equal(X, pipeline.inverse_transform([[exp]]))
# for other methods, ensure no AttributeErrors on None:
other_methods = ['predict_proba', 'predict_log_proba',
'decision_function', 'transform', 'score']
for method in other_methods:
getattr(pipeline, method)(X)
pipeline.set_params(m2=mult2)
exp = 2 * 5
assert_array_equal([[exp]], pipeline.fit_transform(X, y))
assert_array_equal([exp], pipeline.fit(X).predict(X))
assert_array_equal(X, pipeline.inverse_transform([[exp]]))
pipeline = make()
pipeline.set_params(last=None)
# mult2 and mult3 are active
exp = 6
assert_array_equal([[exp]], pipeline.fit(X, y).transform(X))
assert_array_equal([[exp]], pipeline.fit_transform(X, y))
assert_array_equal(X, pipeline.inverse_transform([[exp]]))
assert_raise_message(AttributeError,
"'NoneType' object has no attribute 'predict'",
getattr, pipeline, 'predict')
# Check None step at construction time
exp = 2 * 5
pipeline = Pipeline([('m2', mult2), ('m3', None), ('last', mult5)])
assert_array_equal([[exp]], pipeline.fit_transform(X, y))
assert_array_equal([exp], pipeline.fit(X).predict(X))
assert_array_equal(X, pipeline.inverse_transform([[exp]]))
def test_pipeline_ducktyping():
pipeline = make_pipeline(Mult(5))
pipeline.predict
pipeline.transform
pipeline.inverse_transform
pipeline = make_pipeline(Transf())
assert_false(hasattr(pipeline, 'predict'))
pipeline.transform
pipeline.inverse_transform
pipeline = make_pipeline(None)
assert_false(hasattr(pipeline, 'predict'))
pipeline.transform
pipeline.inverse_transform
pipeline = make_pipeline(Transf(), NoInvTransf())
assert_false(hasattr(pipeline, 'predict'))
pipeline.transform
assert_false(hasattr(pipeline, 'inverse_transform'))
pipeline = make_pipeline(NoInvTransf(), Transf())
assert_false(hasattr(pipeline, 'predict'))
pipeline.transform
assert_false(hasattr(pipeline, 'inverse_transform'))
def test_make_pipeline():
t1 = Transf()
t2 = Transf()
pipe = make_pipeline(t1, t2)
assert_true(isinstance(pipe, Pipeline))
assert_equal(pipe.steps[0][0], "transf-1")
assert_equal(pipe.steps[1][0], "transf-2")
pipe = make_pipeline(t1, t2, FitParamT())
assert_true(isinstance(pipe, Pipeline))
assert_equal(pipe.steps[0][0], "transf-1")
assert_equal(pipe.steps[1][0], "transf-2")
assert_equal(pipe.steps[2][0], "fitparamt")
def test_feature_union_weights():
# test feature union with transformer weights
iris = load_iris()
X = iris.data
y = iris.target
pca = PCA(n_components=2, svd_solver='randomized', random_state=0)
select = SelectKBest(k=1)
# test using fit followed by transform
fs = FeatureUnion([("pca", pca), ("select", select)],
transformer_weights={"pca": 10})
fs.fit(X, y)
X_transformed = fs.transform(X)
# test using fit_transform
fs = FeatureUnion([("pca", pca), ("select", select)],
transformer_weights={"pca": 10})
X_fit_transformed = fs.fit_transform(X, y)
# test it works with transformers missing fit_transform
fs = FeatureUnion([("mock", Transf()), ("pca", pca), ("select", select)],
transformer_weights={"mock": 10})
X_fit_transformed_wo_method = fs.fit_transform(X, y)
# check against expected result
# We use a different pca object to control the random_state stream
assert_array_almost_equal(X_transformed[:, :-1], 10 * pca.fit_transform(X))
assert_array_equal(X_transformed[:, -1],
select.fit_transform(X, y).ravel())
assert_array_almost_equal(X_fit_transformed[:, :-1],
10 * pca.fit_transform(X))
assert_array_equal(X_fit_transformed[:, -1],
select.fit_transform(X, y).ravel())
assert_equal(X_fit_transformed_wo_method.shape, (X.shape[0], 7))
def test_feature_union_parallel():
# test that n_jobs work for FeatureUnion
X = JUNK_FOOD_DOCS
fs = FeatureUnion([
("words", CountVectorizer(analyzer='word')),
("chars", CountVectorizer(analyzer='char')),
])
fs_parallel = FeatureUnion([
("words", CountVectorizer(analyzer='word')),
("chars", CountVectorizer(analyzer='char')),
], n_jobs=2)
fs_parallel2 = FeatureUnion([
("words", CountVectorizer(analyzer='word')),
("chars", CountVectorizer(analyzer='char')),
], n_jobs=2)
fs.fit(X)
X_transformed = fs.transform(X)
assert_equal(X_transformed.shape[0], len(X))
fs_parallel.fit(X)
X_transformed_parallel = fs_parallel.transform(X)
assert_equal(X_transformed.shape, X_transformed_parallel.shape)
assert_array_equal(
X_transformed.toarray(),
X_transformed_parallel.toarray()
)
# fit_transform should behave the same
X_transformed_parallel2 = fs_parallel2.fit_transform(X)
assert_array_equal(
X_transformed.toarray(),
X_transformed_parallel2.toarray()
)
# transformers should stay fit after fit_transform
X_transformed_parallel2 = fs_parallel2.transform(X)
assert_array_equal(
X_transformed.toarray(),
X_transformed_parallel2.toarray()
)
def test_feature_union_feature_names():
word_vect = CountVectorizer(analyzer="word")
char_vect = CountVectorizer(analyzer="char_wb", ngram_range=(3, 3))
ft = FeatureUnion([("chars", char_vect), ("words", word_vect)])
ft.fit(JUNK_FOOD_DOCS)
feature_names = ft.get_feature_names()
for feat in feature_names:
assert_true("chars__" in feat or "words__" in feat)
assert_equal(len(feature_names), 35)
ft = FeatureUnion([("tr1", Transf())]).fit([[1]])
assert_raise_message(AttributeError,
'Transformer tr1 (type Transf) does not provide '
'get_feature_names', ft.get_feature_names)
def test_classes_property():
iris = load_iris()
X = iris.data
y = iris.target
reg = make_pipeline(SelectKBest(k=1), LinearRegression())
reg.fit(X, y)
assert_raises(AttributeError, getattr, reg, "classes_")
clf = make_pipeline(SelectKBest(k=1), LogisticRegression(random_state=0))
assert_raises(AttributeError, getattr, clf, "classes_")
clf.fit(X, y)
assert_array_equal(clf.classes_, np.unique(y))
def test_set_feature_union_steps():
mult2 = Mult(2)
mult2.get_feature_names = lambda: ['x2']
mult3 = Mult(3)
mult3.get_feature_names = lambda: ['x3']
mult5 = Mult(5)
mult5.get_feature_names = lambda: ['x5']
ft = FeatureUnion([('m2', mult2), ('m3', mult3)])
assert_array_equal([[2, 3]], ft.transform(np.asarray([[1]])))
assert_equal(['m2__x2', 'm3__x3'], ft.get_feature_names())
# Directly setting attr
ft.transformer_list = [('m5', mult5)]
assert_array_equal([[5]], ft.transform(np.asarray([[1]])))
assert_equal(['m5__x5'], ft.get_feature_names())
# Using set_params
ft.set_params(transformer_list=[('mock', mult3)])
assert_array_equal([[3]], ft.transform(np.asarray([[1]])))
assert_equal(['mock__x3'], ft.get_feature_names())
# Using set_params to replace single step
ft.set_params(mock=mult5)
assert_array_equal([[5]], ft.transform(np.asarray([[1]])))
assert_equal(['mock__x5'], ft.get_feature_names())
def test_set_feature_union_step_none():
mult2 = Mult(2)
mult2.get_feature_names = lambda: ['x2']
mult3 = Mult(3)
mult3.get_feature_names = lambda: ['x3']
X = np.asarray([[1]])
ft = FeatureUnion([('m2', mult2), ('m3', mult3)])
assert_array_equal([[2, 3]], ft.fit(X).transform(X))
assert_array_equal([[2, 3]], ft.fit_transform(X))
assert_equal(['m2__x2', 'm3__x3'], ft.get_feature_names())
ft.set_params(m2=None)
assert_array_equal([[3]], ft.fit(X).transform(X))
assert_array_equal([[3]], ft.fit_transform(X))
assert_equal(['m3__x3'], ft.get_feature_names())
ft.set_params(m3=None)
assert_array_equal([[]], ft.fit(X).transform(X))
assert_array_equal([[]], ft.fit_transform(X))
assert_equal([], ft.get_feature_names())
# check we can change back
ft.set_params(m3=mult3)
assert_array_equal([[3]], ft.fit(X).transform(X))
def test_step_name_validation():
bad_steps1 = [('a__q', Mult(2)), ('b', Mult(3))]
bad_steps2 = [('a', Mult(2)), ('a', Mult(3))]
for cls, param in [(Pipeline, 'steps'),
(FeatureUnion, 'transformer_list')]:
# we validate in construction (despite scikit-learn convention)
bad_steps3 = [('a', Mult(2)), (param, Mult(3))]
for bad_steps, message in [
(bad_steps1, "Estimator names must not contain __: got ['a__q']"),
(bad_steps2, "Names provided are not unique: ['a', 'a']"),
(bad_steps3, "Estimator names conflict with constructor "
"arguments: ['%s']" % param),
]:
# three ways to make invalid:
# - construction
assert_raise_message(ValueError, message, cls,
**{param: bad_steps})
# - setattr
est = cls(**{param: [('a', Mult(1))]})
setattr(est, param, bad_steps)
assert_raise_message(ValueError, message, est.fit, [[1]], [1])
assert_raise_message(ValueError, message, est.fit_transform,
[[1]], [1])
# - set_params
est = cls(**{param: [('a', Mult(1))]})
est.set_params(**{param: bad_steps})
assert_raise_message(ValueError, message, est.fit, [[1]], [1])
assert_raise_message(ValueError, message, est.fit_transform,
[[1]], [1])
def test_pipeline_wrong_memory():
# Test that an error is raised when memory is not a string or a Memory
# instance
iris = load_iris()
X = iris.data
y = iris.target
# Define memory as an integer
memory = 1
cached_pipe = Pipeline([('transf', DummyTransf()), ('svc', SVC())],
memory=memory)
assert_raises_regex(ValueError, "'memory' should either be a string or a"
" joblib.Memory instance, got 'memory=1' instead.",
cached_pipe.fit, X, y)
def test_pipeline_memory():
iris = load_iris()
X = iris.data
y = iris.target
cachedir = mkdtemp()
try:
memory = Memory(cachedir=cachedir, verbose=10)
# Test with Transformer + SVC
clf = SVC(probability=True, random_state=0)
transf = DummyTransf()
pipe = Pipeline([('transf', clone(transf)), ('svc', clf)])
cached_pipe = Pipeline([('transf', transf), ('svc', clf)],
memory=memory)
# Memoize the transformer at the first fit
cached_pipe.fit(X, y)
pipe.fit(X, y)
# Get the time stamp of the tranformer in the cached pipeline
ts = cached_pipe.named_steps['transf'].timestamp_
# Check that cached_pipe and pipe yield identical results
assert_array_equal(pipe.predict(X), cached_pipe.predict(X))
assert_array_equal(pipe.predict_proba(X), cached_pipe.predict_proba(X))
assert_array_equal(pipe.predict_log_proba(X),
cached_pipe.predict_log_proba(X))
assert_array_equal(pipe.score(X, y), cached_pipe.score(X, y))
assert_array_equal(pipe.named_steps['transf'].means_,
cached_pipe.named_steps['transf'].means_)
assert_false(hasattr(transf, 'means_'))
# Check that we are reading the cache while fitting
# a second time
cached_pipe.fit(X, y)
# Check that cached_pipe and pipe yield identical results
assert_array_equal(pipe.predict(X), cached_pipe.predict(X))
assert_array_equal(pipe.predict_proba(X), cached_pipe.predict_proba(X))
assert_array_equal(pipe.predict_log_proba(X),
cached_pipe.predict_log_proba(X))
assert_array_equal(pipe.score(X, y), cached_pipe.score(X, y))
assert_array_equal(pipe.named_steps['transf'].means_,
cached_pipe.named_steps['transf'].means_)
assert_equal(ts, cached_pipe.named_steps['transf'].timestamp_)
# Create a new pipeline with cloned estimators
# Check that even changing the name step does not affect the cache hit
clf_2 = SVC(probability=True, random_state=0)
transf_2 = DummyTransf()
cached_pipe_2 = Pipeline([('transf_2', transf_2), ('svc', clf_2)],
memory=memory)
cached_pipe_2.fit(X, y)
# Check that cached_pipe and pipe yield identical results
assert_array_equal(pipe.predict(X), cached_pipe_2.predict(X))
assert_array_equal(pipe.predict_proba(X),
cached_pipe_2.predict_proba(X))
assert_array_equal(pipe.predict_log_proba(X),
cached_pipe_2.predict_log_proba(X))
assert_array_equal(pipe.score(X, y), cached_pipe_2.score(X, y))
assert_array_equal(pipe.named_steps['transf'].means_,
cached_pipe_2.named_steps['transf_2'].means_)
assert_equal(ts, cached_pipe_2.named_steps['transf_2'].timestamp_)
finally:
shutil.rmtree(cachedir)
| bsd-3-clause |
hzy/raven-python | tests/utils/wsgi/tests.py | 23 | 2841 | from raven.utils.testutils import TestCase
from raven.utils.wsgi import get_headers, get_host, get_environ
class GetHeadersTest(TestCase):
def test_tuple_as_key(self):
result = dict(get_headers({
('a', 'tuple'): 'foo',
}))
self.assertEquals(result, {})
def test_coerces_http_name(self):
result = dict(get_headers({
'HTTP_ACCEPT': 'text/plain',
}))
self.assertIn('Accept', result)
self.assertEquals(result['Accept'], 'text/plain')
def test_coerces_content_type(self):
result = dict(get_headers({
'CONTENT_TYPE': 'text/plain',
}))
self.assertIn('Content-Type', result)
self.assertEquals(result['Content-Type'], 'text/plain')
def test_coerces_content_length(self):
result = dict(get_headers({
'CONTENT_LENGTH': '134',
}))
self.assertIn('Content-Length', result)
self.assertEquals(result['Content-Length'], '134')
class GetEnvironTest(TestCase):
def test_has_remote_addr(self):
result = dict(get_environ({'REMOTE_ADDR': '127.0.0.1'}))
self.assertIn('REMOTE_ADDR', result)
self.assertEquals(result['REMOTE_ADDR'], '127.0.0.1')
def test_has_server_name(self):
result = dict(get_environ({'SERVER_NAME': '127.0.0.1'}))
self.assertIn('SERVER_NAME', result)
self.assertEquals(result['SERVER_NAME'], '127.0.0.1')
def test_has_server_port(self):
result = dict(get_environ({'SERVER_PORT': 80}))
self.assertIn('SERVER_PORT', result)
self.assertEquals(result['SERVER_PORT'], 80)
def test_hides_wsgi_input(self):
result = list(get_environ({'wsgi.input': 'foo'}))
self.assertNotIn('wsgi.input', result)
class GetHostTest(TestCase):
def test_http_x_forwarded_host(self):
result = get_host({'HTTP_X_FORWARDED_HOST': 'example.com'})
self.assertEquals(result, 'example.com')
def test_http_host(self):
result = get_host({'HTTP_HOST': 'example.com'})
self.assertEquals(result, 'example.com')
def test_http_strips_port(self):
result = get_host({
'wsgi.url_scheme': 'http',
'SERVER_NAME': 'example.com',
'SERVER_PORT': '80',
})
self.assertEquals(result, 'example.com')
def test_https_strips_port(self):
result = get_host({
'wsgi.url_scheme': 'https',
'SERVER_NAME': 'example.com',
'SERVER_PORT': '443',
})
self.assertEquals(result, 'example.com')
def test_http_nonstandard_port(self):
result = get_host({
'wsgi.url_scheme': 'http',
'SERVER_NAME': 'example.com',
'SERVER_PORT': '81',
})
self.assertEquals(result, 'example.com:81')
| bsd-3-clause |
lihui7115/ChromiumGStreamerBackend | chrome/tools/build/win/resedit.py | 152 | 11259 | #!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""A utility script that can extract and edit resources in a Windows binary.
For detailed help, see the script's usage by invoking it with --help."""
import ctypes
import ctypes.wintypes
import logging
import optparse
import os
import shutil
import sys
import tempfile
import win32api
import win32con
_LOGGER = logging.getLogger(__name__)
# The win32api-supplied UpdateResource wrapper unfortunately does not allow
# one to remove resources due to overzealous parameter verification.
# For that case we're forced to go straight to the native API implementation.
UpdateResource = ctypes.windll.kernel32.UpdateResourceW
UpdateResource.argtypes = [
ctypes.wintypes.HANDLE, # HANDLE hUpdate
ctypes.c_wchar_p, # LPCTSTR lpType
ctypes.c_wchar_p, # LPCTSTR lpName
ctypes.c_short, # WORD wLanguage
ctypes.c_void_p, # LPVOID lpData
ctypes.c_ulong, # DWORD cbData
]
UpdateResource.restype = ctypes.c_short
def _ResIdToString(res_id):
# Convert integral res types/ids to a string.
if isinstance(res_id, int):
return "#%d" % res_id
return res_id
class _ResourceEditor(object):
"""A utility class to make it easy to extract and manipulate resources in a
Windows binary."""
def __init__(self, input_file, output_file):
"""Create a new editor.
Args:
input_file: path to the input file.
output_file: (optional) path to the output file.
"""
self._input_file = input_file
self._output_file = output_file
self._modified = False
self._module = None
self._temp_dir = None
self._temp_file = None
self._update_handle = None
def __del__(self):
if self._module:
win32api.FreeLibrary(self._module)
self._module = None
if self._update_handle:
_LOGGER.info('Canceling edits to "%s".', self.input_file)
win32api.EndUpdateResource(self._update_handle, False)
self._update_handle = None
if self._temp_dir:
_LOGGER.info('Removing temporary directory "%s".', self._temp_dir)
shutil.rmtree(self._temp_dir)
self._temp_dir = None
def _GetModule(self):
if not self._module:
# Specify a full path to LoadLibraryEx to prevent
# it from searching the path.
input_file = os.path.abspath(self.input_file)
_LOGGER.info('Loading input_file from "%s"', input_file)
self._module = win32api.LoadLibraryEx(
input_file, None, win32con.LOAD_LIBRARY_AS_DATAFILE)
return self._module
def _GetTempDir(self):
if not self._temp_dir:
self._temp_dir = tempfile.mkdtemp()
_LOGGER.info('Created temporary directory "%s".', self._temp_dir)
return self._temp_dir
def _GetUpdateHandle(self):
if not self._update_handle:
# Make a copy of the input file in the temp dir.
self._temp_file = os.path.join(self.temp_dir,
os.path.basename(self._input_file))
shutil.copyfile(self._input_file, self._temp_file)
# Open a resource update handle on the copy.
_LOGGER.info('Opening temp file "%s".', self._temp_file)
self._update_handle = win32api.BeginUpdateResource(self._temp_file, False)
return self._update_handle
modified = property(lambda self: self._modified)
input_file = property(lambda self: self._input_file)
module = property(_GetModule)
temp_dir = property(_GetTempDir)
update_handle = property(_GetUpdateHandle)
def ExtractAllToDir(self, extract_to):
"""Extracts all resources from our input file to a directory hierarchy
in the directory named extract_to.
The generated directory hierarchy is three-level, and looks like:
resource-type/
resource-name/
lang-id.
Args:
extract_to: path to the folder to output to. This folder will be erased
and recreated if it already exists.
"""
_LOGGER.info('Extracting all resources from "%s" to directory "%s".',
self.input_file, extract_to)
if os.path.exists(extract_to):
_LOGGER.info('Destination directory "%s" exists, deleting', extract_to)
shutil.rmtree(extract_to)
# Make sure the destination dir exists.
os.makedirs(extract_to)
# Now enumerate the resource types.
for res_type in win32api.EnumResourceTypes(self.module):
res_type_str = _ResIdToString(res_type)
# And the resource names.
for res_name in win32api.EnumResourceNames(self.module, res_type):
res_name_str = _ResIdToString(res_name)
# Then the languages.
for res_lang in win32api.EnumResourceLanguages(self.module,
res_type, res_name):
res_lang_str = _ResIdToString(res_lang)
dest_dir = os.path.join(extract_to, res_type_str, res_lang_str)
dest_file = os.path.join(dest_dir, res_name_str)
_LOGGER.info('Extracting resource "%s", lang "%d" name "%s" '
'to file "%s".',
res_type_str, res_lang, res_name_str, dest_file)
# Extract each resource to a file in the output dir.
os.makedirs(dest_dir)
self.ExtractResource(res_type, res_lang, res_name, dest_file)
def ExtractResource(self, res_type, res_lang, res_name, dest_file):
"""Extracts a given resource, specified by type, language id and name,
to a given file.
Args:
res_type: the type of the resource, e.g. "B7".
res_lang: the language id of the resource e.g. 1033.
res_name: the name of the resource, e.g. "SETUP.EXE".
dest_file: path to the file where the resource data will be written.
"""
_LOGGER.info('Extracting resource "%s", lang "%d" name "%s" '
'to file "%s".', res_type, res_lang, res_name, dest_file)
data = win32api.LoadResource(self.module, res_type, res_name, res_lang)
with open(dest_file, 'wb') as f:
f.write(data)
def RemoveResource(self, res_type, res_lang, res_name):
"""Removes a given resource, specified by type, language id and name.
Args:
res_type: the type of the resource, e.g. "B7".
res_lang: the language id of the resource, e.g. 1033.
res_name: the name of the resource, e.g. "SETUP.EXE".
"""
_LOGGER.info('Removing resource "%s:%s".', res_type, res_name)
# We have to go native to perform a removal.
ret = UpdateResource(self.update_handle,
res_type,
res_name,
res_lang,
None,
0)
# Raise an error on failure.
if ret == 0:
error = win32api.GetLastError()
print "error", error
raise RuntimeError(error)
self._modified = True
def UpdateResource(self, res_type, res_lang, res_name, file_path):
"""Inserts or updates a given resource with the contents of a file.
Args:
res_type: the type of the resource, e.g. "B7".
res_lang: the language id of the resource, e.g. 1033.
res_name: the name of the resource, e.g. "SETUP.EXE".
file_path: path to the file containing the new resource data.
"""
_LOGGER.info('Writing resource "%s:%s" from file.',
res_type, res_name, file_path)
with open(file_path, 'rb') as f:
win32api.UpdateResource(self.update_handle,
res_type,
res_name,
f.read(),
res_lang);
self._modified = True
def Commit(self):
"""Commit any successful resource edits this editor has performed.
This has the effect of writing the output file.
"""
if self._update_handle:
update_handle = self._update_handle
self._update_handle = None
win32api.EndUpdateResource(update_handle, False)
_LOGGER.info('Writing edited file to "%s".', self._output_file)
shutil.copyfile(self._temp_file, self._output_file)
_USAGE = """\
usage: %prog [options] input_file
A utility script to extract and edit the resources in a Windows executable.
EXAMPLE USAGE:
# Extract from mini_installer.exe, the resource type "B7", langid 1033 and
# name "CHROME.PACKED.7Z" to a file named chrome.7z.
# Note that 1033 corresponds to English (United States).
%prog mini_installer.exe --extract B7 1033 CHROME.PACKED.7Z chrome.7z
# Update mini_installer.exe by removing the resouce type "BL", langid 1033 and
# name "SETUP.EXE". Add the resource type "B7", langid 1033 and name
# "SETUP.EXE.packed.7z" from the file setup.packed.7z.
# Write the edited file to mini_installer_packed.exe.
%prog mini_installer.exe \\
--remove BL 1033 SETUP.EXE \\
--update B7 1033 SETUP.EXE.packed.7z setup.packed.7z \\
--output-file mini_installer_packed.exe
"""
def _ParseArgs():
parser = optparse.OptionParser(_USAGE)
parser.add_option('', '--verbose', action='store_true',
help='Enable verbose logging.')
parser.add_option('', '--extract_all',
help='Path to a folder which will be created, in which all resources '
'from the input_file will be stored, each in a file named '
'"res_type/lang_id/res_name".')
parser.add_option('', '--extract', action='append', default=[], nargs=4,
help='Extract the resource with the given type, language id and name '
'to the given file.',
metavar='type langid name file_path')
parser.add_option('', '--remove', action='append', default=[], nargs=3,
help='Remove the resource with the given type, langid and name.',
metavar='type langid name')
parser.add_option('', '--update', action='append', default=[], nargs=4,
help='Insert or update the resource with the given type, langid and '
'name with the contents of the file given.',
metavar='type langid name file_path')
parser.add_option('', '--output_file',
help='On success, OUTPUT_FILE will be written with a copy of the '
'input file with the edits specified by any remove or update '
'options.')
options, args = parser.parse_args()
if len(args) != 1:
parser.error('You have to specify an input file to work on.')
modify = options.remove or options.update
if modify and not options.output_file:
parser.error('You have to specify an output file with edit options.')
return options, args
def main(options, args):
"""Main program for the script."""
if options.verbose:
logging.basicConfig(level=logging.INFO)
# Create the editor for our input file.
editor = _ResourceEditor(args[0], options.output_file)
if options.extract_all:
editor.ExtractAllToDir(options.extract_all)
for res_type, res_lang, res_name, dest_file in options.extract:
editor.ExtractResource(res_type, int(res_lang), res_name, dest_file)
for res_type, res_lang, res_name in options.remove:
editor.RemoveResource(res_type, int(res_lang), res_name)
for res_type, res_lang, res_name, src_file in options.update:
editor.UpdateResource(res_type, int(res_lang), res_name, src_file)
if editor.modified:
editor.Commit()
if __name__ == '__main__':
sys.exit(main(*_ParseArgs()))
| bsd-3-clause |
mnizol/ormpy | test/TestJoinPath.py | 1 | 5180 | ##############################################################################
# Package: ormpy
# File: TestJoinPath.py
# Author: Matthew Nizol
##############################################################################
""" This file contains unit tests for :class:`lib.JoinPath.JoinPath` """
from unittest import TestCase
from lib.JoinPath import JoinPath, JoinPathException
from lib.FactType import FactType
from lib.ObjectType import ObjectType
from lib.Constraint import SubtypeConstraint
class TestJoinPath(TestCase):
""" Unit tests for the JoinPath class. """
def setUp(self):
self.obj1 = ObjectType(name="A")
self.obj2 = ObjectType(name="B")
self.obj3 = ObjectType(name="C")
self.fact1 = FactType(name="AHasB")
self.fact1.add_role(self.obj1)
self.fact1.add_role(self.obj2)
self.fact2 = FactType(name="BHasC")
self.fact2.add_role(self.obj2)
self.fact2.add_role(self.obj3)
self.fact3 = FactType(name="ALikesA")
self.fact3.add_role(self.obj1)
self.fact3.add_role(self.obj1)
self.fact4 = FactType(name="ALikesB")
self.fact4.add_role(self.obj1)
self.fact4.add_role(self.obj2)
def test_incompatible_roles(self):
""" Test an attempt to join fact types on incompatible roles."""
join_path = JoinPath()
with self.assertRaises(JoinPathException) as ex:
join_path.add_join(self.fact1.roles[0], self.fact2.roles[0])
msg = "join roles must be played by compatible object types"
self.assertEquals(ex.exception.message, msg)
def test_compatible_roles_via_subtype(self):
""" Test case where join is OK because one role player is a subtype of
the other role player. """
obj1 = ObjectType(name="A")
obj2 = ObjectType(name="B")
obj3 = ObjectType(name="C")
fact1 = FactType("AIsB")
fact1.add_role(obj1)
fact1.add_role(obj2)
fact2 = FactType("BIsC")
fact2.add_role(obj2)
fact2.add_role(obj3)
join1 = (fact1.roles[0], fact2.roles[0])
join2 = (fact2.roles[0], fact1.roles[0])
join_path = JoinPath()
# At this point there is no subtype relation from obj1 to obj2, so the
# join fails
with self.assertRaises(JoinPathException) as ex:
join_path.add_join(*join1)
msg = "join roles must be played by compatible object types"
self.assertEquals(ex.exception.message, msg)
with self.assertRaises(JoinPathException) as ex:
join_path.add_join(*join2)
msg = "join roles must be played by compatible object types"
self.assertEquals(ex.exception.message, msg)
# Create a subtype relation so the join succeeds.
cons = SubtypeConstraint(obj1, obj2)
cons.commit()
join_path.add_join(*join1)
self.assertEquals(join_path.joins, [join1])
# Reset the join path and try the join in the opposite direction
join_path = JoinPath()
join_path.add_join(*join2)
self.assertEquals(join_path.joins, [join2])
def test_disconnected(self):
""" Test that the first join role must be on a fact type already on
the path. """
join_path = JoinPath()
join_path.add_join(self.fact1.roles[1], self.fact2.roles[0])
with self.assertRaises(JoinPathException) as ex:
join_path.add_join(self.fact3.roles[1], self.fact4.roles[0])
msg = "first join role must already be on the join path"
self.assertEquals(ex.exception.message, msg)
def test_cycle_1(self):
""" Test that a self-join is rejected. """
join_path = JoinPath()
with self.assertRaises(JoinPathException) as ex:
join_path.add_join(self.fact3.roles[0], self.fact3.roles[1])
msg = "join would create a cycle in the join path"
self.assertEquals(ex.exception.message, msg)
def test_cycle_2(self):
""" Test that a join to a fact type already in the path is rejected. """
join_path = JoinPath()
join_path.add_join(self.fact1.roles[1], self.fact2.roles[0])
with self.assertRaises(JoinPathException) as ex:
join_path.add_join(self.fact2.roles[0], self.fact1.roles[1])
msg = "join would create a cycle in the join path"
self.assertEquals(ex.exception.message, msg)
def test_valid_join(self):
""" Test that a valid join path is stored as expected. """
join_path = JoinPath()
join_path.add_join(self.fact1.roles[1], self.fact2.roles[0])
join_path.add_join(self.fact1.roles[0], self.fact4.roles[0])
join_path.add_join(self.fact4.roles[0], self.fact3.roles[1])
self.assertEquals(join_path.fact_types, [self.fact1, self.fact2, self.fact4, self.fact3])
self.assertEquals(join_path.joins, [(self.fact1.roles[1], self.fact2.roles[0]),
(self.fact1.roles[0], self.fact4.roles[0]),
(self.fact4.roles[0], self.fact3.roles[1])])
| gpl-2.0 |
gnowxilef/youtube-dl | youtube_dl/extractor/cartoonnetwork.py | 42 | 1776 | # coding: utf-8
from __future__ import unicode_literals
import re
from .turner import TurnerBaseIE
class CartoonNetworkIE(TurnerBaseIE):
_VALID_URL = r'https?://(?:www\.)?cartoonnetwork\.com/video/(?:[^/]+/)+(?P<id>[^/?#]+)-(?:clip|episode)\.html'
_TEST = {
'url': 'http://www.cartoonnetwork.com/video/teen-titans-go/starfire-the-cat-lady-clip.html',
'info_dict': {
'id': '8a250ab04ed07e6c014ef3f1e2f9016c',
'ext': 'mp4',
'title': 'Starfire the Cat Lady',
'description': 'Robin decides to become a cat so that Starfire will finally love him.',
},
'params': {
# m3u8 download
'skip_download': True,
},
}
def _real_extract(self, url):
display_id = self._match_id(url)
webpage = self._download_webpage(url, display_id)
id_type, video_id = re.search(r"_cnglobal\.cvp(Video|Title)Id\s*=\s*'([^']+)';", webpage).groups()
query = ('id' if id_type == 'Video' else 'titleId') + '=' + video_id
return self._extract_cvp_info(
'http://www.cartoonnetwork.com/video-seo-svc/episodeservices/getCvpPlaylist?networkName=CN2&' + query, video_id, {
'secure': {
'media_src': 'http://androidhls-secure.cdn.turner.com/toon/big',
'tokenizer_src': 'http://www.cartoonnetwork.com/cntv/mvpd/processors/services/token_ipadAdobe.do',
},
}, {
'url': url,
'site_name': 'CartoonNetwork',
'auth_required': self._search_regex(
r'_cnglobal\.cvpFullOrPreviewAuth\s*=\s*(true|false);',
webpage, 'auth required', default='false') == 'true',
})
| unlicense |
umitproject/network-admin | netadmin/events/utils.py | 1 | 4469 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (C) 2011 Adriano Monteiro Marques
#
# Authors: Amit Pal <[email protected]>
# Piotrek Wasilewski <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
try:
import simplejson as json
except ImportError:
import json
import datetime
from django.utils.translation import ugettext as _
from netadmin.permissions.utils import filter_user_objects
from netadmin.networks.models import Host
from netadmin.events.models import Event, EventType
class EventParseError(Exception):
pass
def filter_user_events(user):
"""Returns events accessible to specified user.
"""
hosts = filter_user_objects(user, Host)
pks = [host.pk for host in hosts]
return Event.objects.filter(source_host__pk__in=pks)
def get_event_data(request, event_dict):
"""
Creates dictionary with parameters for Event's __init__ method. If needed
function also creates host and event type and saves them. If function
cannot find obligatory fields in event_dict, it raises EventParseError
exception.
"""
required_fields = ['timestamp', 'protocol', 'fields_class', 'event_type',
'description', 'short_description']
base_fields = required_fields + ['is_report', 'hostname',
'source_host_ipv6', 'source_host_ipv4']
# make sure that event_dict contains all fields we need
# (also make sure that these fields aren't empty)
for field_name in required_fields:
if field_name not in event_dict:
raise EventParseError("Following field is not specified: %s" \
% field_name)
if not event_dict[field_name]:
raise EventParseError("Following field must not be empty: %s" \
% field_name)
message = event_dict['description']
short_message = event_dict['short_description']
timestamp = event_dict['timestamp']
protocol = event_dict['protocol']
event_type_name = event_dict['event_type']
fields_class = event_dict['fields_class']
ipv4 = event_dict.get('source_host_ipv4')
ipv6 = event_dict.get('source_host_ipv6')
hostname = event_dict.get('hostname')
try:
if hostname:
source_host = Host.objects.get(name=hostname, user=request.user)
else:
if ipv4 and ipv6:
source_host = Host.objects.get(ipv4=ipv4, ipv6=ipv6,
user=request.user)
elif ipv4:
source_host = Host.objects.get(ipv4=ipv4, user=request.user)
elif ipv6:
source_host = Host.objects.get(ipv6=ipv6, user=request.user)
else:
source_host = None
except Host.DoesNotExist:
source_host = Host(name=hostname, ipv4=ipv4, ipv6=ipv6,
user=request.user)
source_host.save()
try:
event_type = EventType.objects.get(name=event_type_name,
user=request.user)
except EventType.DoesNotExist:
event_type = EventType(name=event_type_name, user=request.user)
event_type.save()
fields_data_dict = {}
for field in event_dict:
if field not in base_fields:
fields_data_dict[field] = event_dict[field]
fields_data = json.dumps(fields_data_dict)
event_data = {
'message': message,
'short_message': short_message,
'timestamp': datetime.datetime.fromtimestamp(float(timestamp)),
'protocol': protocol,
'fields_class': fields_class,
'fields_data': fields_data,
'source_host': source_host,
'event_type': event_type
}
return event_data
| agpl-3.0 |
leeclemens/dnspython | dns/rdtypes/ANY/NSEC3PARAM.py | 8 | 2995 | # Copyright (C) 2004-2007, 2009-2011 Nominum, Inc.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose with or without fee is hereby granted,
# provided that the above copyright notice and this permission notice
# appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
import cStringIO
import struct
import dns.exception
import dns.rdata
class NSEC3PARAM(dns.rdata.Rdata):
"""NSEC3PARAM record
@ivar algorithm: the hash algorithm number
@type algorithm: int
@ivar flags: the flags
@type flags: int
@ivar iterations: the number of iterations
@type iterations: int
@ivar salt: the salt
@type salt: string"""
__slots__ = ['algorithm', 'flags', 'iterations', 'salt']
def __init__(self, rdclass, rdtype, algorithm, flags, iterations, salt):
super(NSEC3PARAM, self).__init__(rdclass, rdtype)
self.algorithm = algorithm
self.flags = flags
self.iterations = iterations
self.salt = salt
def to_text(self, origin=None, relativize=True, **kw):
if self.salt == '':
salt = '-'
else:
salt = self.salt.encode('hex-codec')
return '%u %u %u %s' % (self.algorithm, self.flags, self.iterations, salt)
def from_text(cls, rdclass, rdtype, tok, origin = None, relativize = True):
algorithm = tok.get_uint8()
flags = tok.get_uint8()
iterations = tok.get_uint16()
salt = tok.get_string()
if salt == '-':
salt = ''
else:
salt = salt.decode('hex-codec')
tok.get_eol()
return cls(rdclass, rdtype, algorithm, flags, iterations, salt)
from_text = classmethod(from_text)
def to_wire(self, file, compress = None, origin = None):
l = len(self.salt)
file.write(struct.pack("!BBHB", self.algorithm, self.flags,
self.iterations, l))
file.write(self.salt)
def from_wire(cls, rdclass, rdtype, wire, current, rdlen, origin = None):
(algorithm, flags, iterations, slen) = struct.unpack('!BBHB',
wire[current : current + 5])
current += 5
rdlen -= 5
salt = wire[current : current + slen].unwrap()
current += slen
rdlen -= slen
if rdlen != 0:
raise dns.exception.FormError
return cls(rdclass, rdtype, algorithm, flags, iterations, salt)
from_wire = classmethod(from_wire)
| isc |
ohmini/thaifoodapi | lib/django/db/migrations/operations/special.py | 374 | 7425 | from __future__ import unicode_literals
from django.db import router
from .base import Operation
class SeparateDatabaseAndState(Operation):
"""
Takes two lists of operations - ones that will be used for the database,
and ones that will be used for the state change. This allows operations
that don't support state change to have it applied, or have operations
that affect the state or not the database, or so on.
"""
serialization_expand_args = ['database_operations', 'state_operations']
def __init__(self, database_operations=None, state_operations=None):
self.database_operations = database_operations or []
self.state_operations = state_operations or []
def deconstruct(self):
kwargs = {}
if self.database_operations:
kwargs['database_operations'] = self.database_operations
if self.state_operations:
kwargs['state_operations'] = self.state_operations
return (
self.__class__.__name__,
[],
kwargs
)
def state_forwards(self, app_label, state):
for state_operation in self.state_operations:
state_operation.state_forwards(app_label, state)
def database_forwards(self, app_label, schema_editor, from_state, to_state):
# We calculate state separately in here since our state functions aren't useful
for database_operation in self.database_operations:
to_state = from_state.clone()
database_operation.state_forwards(app_label, to_state)
database_operation.database_forwards(app_label, schema_editor, from_state, to_state)
from_state = to_state
def database_backwards(self, app_label, schema_editor, from_state, to_state):
# We calculate state separately in here since our state functions aren't useful
base_state = to_state
for pos, database_operation in enumerate(reversed(self.database_operations)):
to_state = base_state.clone()
for dbop in self.database_operations[:-(pos + 1)]:
dbop.state_forwards(app_label, to_state)
from_state = base_state.clone()
database_operation.state_forwards(app_label, from_state)
database_operation.database_backwards(app_label, schema_editor, from_state, to_state)
def describe(self):
return "Custom state/database change combination"
class RunSQL(Operation):
"""
Runs some raw SQL. A reverse SQL statement may be provided.
Also accepts a list of operations that represent the state change effected
by this SQL change, in case it's custom column/table creation/deletion.
"""
noop = ''
def __init__(self, sql, reverse_sql=None, state_operations=None, hints=None):
self.sql = sql
self.reverse_sql = reverse_sql
self.state_operations = state_operations or []
self.hints = hints or {}
def deconstruct(self):
kwargs = {
'sql': self.sql,
}
if self.reverse_sql is not None:
kwargs['reverse_sql'] = self.reverse_sql
if self.state_operations:
kwargs['state_operations'] = self.state_operations
if self.hints:
kwargs['hints'] = self.hints
return (
self.__class__.__name__,
[],
kwargs
)
@property
def reversible(self):
return self.reverse_sql is not None
def state_forwards(self, app_label, state):
for state_operation in self.state_operations:
state_operation.state_forwards(app_label, state)
def database_forwards(self, app_label, schema_editor, from_state, to_state):
if router.allow_migrate(schema_editor.connection.alias, app_label, **self.hints):
self._run_sql(schema_editor, self.sql)
def database_backwards(self, app_label, schema_editor, from_state, to_state):
if self.reverse_sql is None:
raise NotImplementedError("You cannot reverse this operation")
if router.allow_migrate(schema_editor.connection.alias, app_label, **self.hints):
self._run_sql(schema_editor, self.reverse_sql)
def describe(self):
return "Raw SQL operation"
def _run_sql(self, schema_editor, sqls):
if isinstance(sqls, (list, tuple)):
for sql in sqls:
params = None
if isinstance(sql, (list, tuple)):
elements = len(sql)
if elements == 2:
sql, params = sql
else:
raise ValueError("Expected a 2-tuple but got %d" % elements)
schema_editor.execute(sql, params=params)
elif sqls != RunSQL.noop:
statements = schema_editor.connection.ops.prepare_sql_script(sqls)
for statement in statements:
schema_editor.execute(statement, params=None)
class RunPython(Operation):
"""
Runs Python code in a context suitable for doing versioned ORM operations.
"""
reduces_to_sql = False
def __init__(self, code, reverse_code=None, atomic=True, hints=None):
self.atomic = atomic
# Forwards code
if not callable(code):
raise ValueError("RunPython must be supplied with a callable")
self.code = code
# Reverse code
if reverse_code is None:
self.reverse_code = None
else:
if not callable(reverse_code):
raise ValueError("RunPython must be supplied with callable arguments")
self.reverse_code = reverse_code
self.hints = hints or {}
def deconstruct(self):
kwargs = {
'code': self.code,
}
if self.reverse_code is not None:
kwargs['reverse_code'] = self.reverse_code
if self.atomic is not True:
kwargs['atomic'] = self.atomic
if self.hints:
kwargs['hints'] = self.hints
return (
self.__class__.__name__,
[],
kwargs
)
@property
def reversible(self):
return self.reverse_code is not None
def state_forwards(self, app_label, state):
# RunPython objects have no state effect. To add some, combine this
# with SeparateDatabaseAndState.
pass
def database_forwards(self, app_label, schema_editor, from_state, to_state):
if router.allow_migrate(schema_editor.connection.alias, app_label, **self.hints):
# We now execute the Python code in a context that contains a 'models'
# object, representing the versioned models as an app registry.
# We could try to override the global cache, but then people will still
# use direct imports, so we go with a documentation approach instead.
self.code(from_state.apps, schema_editor)
def database_backwards(self, app_label, schema_editor, from_state, to_state):
if self.reverse_code is None:
raise NotImplementedError("You cannot reverse this operation")
if router.allow_migrate(schema_editor.connection.alias, app_label, **self.hints):
self.reverse_code(from_state.apps, schema_editor)
def describe(self):
return "Raw Python operation"
@staticmethod
def noop(apps, schema_editor):
return None
| bsd-3-clause |
noba3/KoTos | addons/script.module.youtube.dl/lib/youtube_dl/extractor/macgamestore.py | 142 | 1275 | from __future__ import unicode_literals
from .common import InfoExtractor
from ..utils import ExtractorError
class MacGameStoreIE(InfoExtractor):
IE_NAME = 'macgamestore'
IE_DESC = 'MacGameStore trailers'
_VALID_URL = r'https?://www\.macgamestore\.com/mediaviewer\.php\?trailer=(?P<id>\d+)'
_TEST = {
'url': 'http://www.macgamestore.com/mediaviewer.php?trailer=2450',
'md5': '8649b8ea684b6666b4c5be736ecddc61',
'info_dict': {
'id': '2450',
'ext': 'm4v',
'title': 'Crow',
}
}
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(
url, video_id, 'Downloading trailer page')
if '>Missing Media<' in webpage:
raise ExtractorError(
'Trailer %s does not exist' % video_id, expected=True)
video_title = self._html_search_regex(
r'<title>MacGameStore: (.*?) Trailer</title>', webpage, 'title')
video_url = self._html_search_regex(
r'(?s)<div\s+id="video-player".*?href="([^"]+)"\s*>',
webpage, 'video URL')
return {
'id': video_id,
'url': video_url,
'title': video_title
}
| gpl-2.0 |
yubbie/googleapps-message-recall | message_recall/lib/wtforms/ext/sqlalchemy/fields.py | 54 | 6679 | """
Useful form fields for use with SQLAlchemy ORM.
"""
from __future__ import unicode_literals
import operator
from wtforms import widgets
from wtforms.compat import text_type, string_types
from wtforms.fields import SelectFieldBase
from wtforms.validators import ValidationError
try:
from sqlalchemy.orm.util import identity_key
has_identity_key = True
except ImportError:
has_identity_key = False
__all__ = (
'QuerySelectField', 'QuerySelectMultipleField',
)
class QuerySelectField(SelectFieldBase):
"""
Will display a select drop-down field to choose between ORM results in a
sqlalchemy `Query`. The `data` property actually will store/keep an ORM
model instance, not the ID. Submitting a choice which is not in the query
will result in a validation error.
This field only works for queries on models whose primary key column(s)
have a consistent string representation. This means it mostly only works
for those composed of string, unicode, and integer types. For the most
part, the primary keys will be auto-detected from the model, alternately
pass a one-argument callable to `get_pk` which can return a unique
comparable key.
The `query` property on the field can be set from within a view to assign
a query per-instance to the field. If the property is not set, the
`query_factory` callable passed to the field constructor will be called to
obtain a query.
Specify `get_label` to customize the label associated with each option. If
a string, this is the name of an attribute on the model object to use as
the label text. If a one-argument callable, this callable will be passed
model instance and expected to return the label text. Otherwise, the model
object's `__str__` or `__unicode__` will be used.
If `allow_blank` is set to `True`, then a blank choice will be added to the
top of the list. Selecting this choice will result in the `data` property
being `None`. The label for this blank choice can be set by specifying the
`blank_text` parameter.
"""
widget = widgets.Select()
def __init__(self, label=None, validators=None, query_factory=None,
get_pk=None, get_label=None, allow_blank=False,
blank_text='', **kwargs):
super(QuerySelectField, self).__init__(label, validators, **kwargs)
self.query_factory = query_factory
if get_pk is None:
if not has_identity_key:
raise Exception('The sqlalchemy identity_key function could not be imported.')
self.get_pk = get_pk_from_identity
else:
self.get_pk = get_pk
if get_label is None:
self.get_label = lambda x: x
elif isinstance(get_label, string_types):
self.get_label = operator.attrgetter(get_label)
else:
self.get_label = get_label
self.allow_blank = allow_blank
self.blank_text = blank_text
self.query = None
self._object_list = None
def _get_data(self):
if self._formdata is not None:
for pk, obj in self._get_object_list():
if pk == self._formdata:
self._set_data(obj)
break
return self._data
def _set_data(self, data):
self._data = data
self._formdata = None
data = property(_get_data, _set_data)
def _get_object_list(self):
if self._object_list is None:
query = self.query or self.query_factory()
get_pk = self.get_pk
self._object_list = list((text_type(get_pk(obj)), obj) for obj in query)
return self._object_list
def iter_choices(self):
if self.allow_blank:
yield ('__None', self.blank_text, self.data is None)
for pk, obj in self._get_object_list():
yield (pk, self.get_label(obj), obj == self.data)
def process_formdata(self, valuelist):
if valuelist:
if self.allow_blank and valuelist[0] == '__None':
self.data = None
else:
self._data = None
self._formdata = valuelist[0]
def pre_validate(self, form):
data = self.data
if data is not None:
for pk, obj in self._get_object_list():
if data == obj:
break
else:
raise ValidationError(self.gettext('Not a valid choice'))
elif self._formdata or not self.allow_blank:
raise ValidationError(self.gettext('Not a valid choice'))
class QuerySelectMultipleField(QuerySelectField):
"""
Very similar to QuerySelectField with the difference that this will
display a multiple select. The data property will hold a list with ORM
model instances and will be an empty list when no value is selected.
If any of the items in the data list or submitted form data cannot be
found in the query, this will result in a validation error.
"""
widget = widgets.Select(multiple=True)
def __init__(self, label=None, validators=None, default=None, **kwargs):
if default is None:
default = []
super(QuerySelectMultipleField, self).__init__(label, validators, default=default, **kwargs)
self._invalid_formdata = False
def _get_data(self):
formdata = self._formdata
if formdata is not None:
data = []
for pk, obj in self._get_object_list():
if not formdata:
break
elif pk in formdata:
formdata.remove(pk)
data.append(obj)
if formdata:
self._invalid_formdata = True
self._set_data(data)
return self._data
def _set_data(self, data):
self._data = data
self._formdata = None
data = property(_get_data, _set_data)
def iter_choices(self):
for pk, obj in self._get_object_list():
yield (pk, self.get_label(obj), obj in self.data)
def process_formdata(self, valuelist):
self._formdata = set(valuelist)
def pre_validate(self, form):
if self._invalid_formdata:
raise ValidationError(self.gettext('Not a valid choice'))
elif self.data:
obj_list = list(x[1] for x in self._get_object_list())
for v in self.data:
if v not in obj_list:
raise ValidationError(self.gettext('Not a valid choice'))
def get_pk_from_identity(obj):
cls, key = identity_key(instance=obj)
return ':'.join(text_type(x) for x in key)
| apache-2.0 |
ecoal95/servo | tests/wpt/web-platform-tests/tools/third_party/hyper/hyper/h2/events.py | 37 | 20478 | # -*- coding: utf-8 -*-
"""
h2/events
~~~~~~~~~
Defines Event types for HTTP/2.
Events are returned by the H2 state machine to allow implementations to keep
track of events triggered by receiving data. Each time data is provided to the
H2 state machine it processes the data and returns a list of Event objects.
"""
import binascii
from .settings import ChangedSetting, _setting_code_from_int
class Event(object):
"""
Base class for h2 events.
"""
pass
class RequestReceived(Event):
"""
The RequestReceived event is fired whenever request headers are received.
This event carries the HTTP headers for the given request and the stream ID
of the new stream.
.. versionchanged:: 2.3.0
Changed the type of ``headers`` to :class:`HeaderTuple
<hpack:hpack.HeaderTuple>`. This has no effect on current users.
.. versionchanged:: 2.4.0
Added ``stream_ended`` and ``priority_updated`` properties.
"""
def __init__(self):
#: The Stream ID for the stream this request was made on.
self.stream_id = None
#: The request headers.
self.headers = None
#: If this request also ended the stream, the associated
#: :class:`StreamEnded <h2.events.StreamEnded>` event will be available
#: here.
#:
#: .. versionadded:: 2.4.0
self.stream_ended = None
#: If this request also had associated priority information, the
#: associated :class:`PriorityUpdated <h2.events.PriorityUpdated>`
#: event will be available here.
#:
#: .. versionadded:: 2.4.0
self.priority_updated = None
def __repr__(self):
return "<RequestReceived stream_id:%s, headers:%s>" % (
self.stream_id, self.headers
)
class ResponseReceived(Event):
"""
The ResponseReceived event is fired whenever response headers are received.
This event carries the HTTP headers for the given response and the stream
ID of the new stream.
.. versionchanged:: 2.3.0
Changed the type of ``headers`` to :class:`HeaderTuple
<hpack:hpack.HeaderTuple>`. This has no effect on current users.
.. versionchanged:: 2.4.0
Added ``stream_ended`` and ``priority_updated`` properties.
"""
def __init__(self):
#: The Stream ID for the stream this response was made on.
self.stream_id = None
#: The response headers.
self.headers = None
#: If this response also ended the stream, the associated
#: :class:`StreamEnded <h2.events.StreamEnded>` event will be available
#: here.
#:
#: .. versionadded:: 2.4.0
self.stream_ended = None
#: If this response also had associated priority information, the
#: associated :class:`PriorityUpdated <h2.events.PriorityUpdated>`
#: event will be available here.
#:
#: .. versionadded:: 2.4.0
self.priority_updated = None
def __repr__(self):
return "<ResponseReceived stream_id:%s, headers:%s>" % (
self.stream_id, self.headers
)
class TrailersReceived(Event):
"""
The TrailersReceived event is fired whenever trailers are received on a
stream. Trailers are a set of headers sent after the body of the
request/response, and are used to provide information that wasn't known
ahead of time (e.g. content-length). This event carries the HTTP header
fields that form the trailers and the stream ID of the stream on which they
were received.
.. versionchanged:: 2.3.0
Changed the type of ``headers`` to :class:`HeaderTuple
<hpack:hpack.HeaderTuple>`. This has no effect on current users.
.. versionchanged:: 2.4.0
Added ``stream_ended`` and ``priority_updated`` properties.
"""
def __init__(self):
#: The Stream ID for the stream on which these trailers were received.
self.stream_id = None
#: The trailers themselves.
self.headers = None
#: Trailers always end streams. This property has the associated
#: :class:`StreamEnded <h2.events.StreamEnded>` in it.
#:
#: .. versionadded:: 2.4.0
self.stream_ended = None
#: If the trailers also set associated priority information, the
#: associated :class:`PriorityUpdated <h2.events.PriorityUpdated>`
#: event will be available here.
#:
#: .. versionadded:: 2.4.0
self.priority_updated = None
def __repr__(self):
return "<TrailersReceived stream_id:%s, headers:%s>" % (
self.stream_id, self.headers
)
class _HeadersSent(Event):
"""
The _HeadersSent event is fired whenever headers are sent.
This is an internal event, used to determine validation steps on
outgoing header blocks.
"""
pass
class _ResponseSent(_HeadersSent):
"""
The _ResponseSent event is fired whenever response headers are sent
on a stream.
This is an internal event, used to determine validation steps on
outgoing header blocks.
"""
pass
class _RequestSent(_HeadersSent):
"""
The _RequestSent event is fired whenever request headers are sent
on a stream.
This is an internal event, used to determine validation steps on
outgoing header blocks.
"""
pass
class _TrailersSent(_HeadersSent):
"""
The _TrailersSent event is fired whenever trailers are sent on a
stream. Trailers are a set of headers sent after the body of the
request/response, and are used to provide information that wasn't known
ahead of time (e.g. content-length).
This is an internal event, used to determine validation steps on
outgoing header blocks.
"""
pass
class _PushedRequestSent(_HeadersSent):
"""
The _PushedRequestSent event is fired whenever pushed request headers are
sent.
This is an internal event, used to determine validation steps on outgoing
header blocks.
"""
pass
class InformationalResponseReceived(Event):
"""
The InformationalResponseReceived event is fired when an informational
response (that is, one whose status code is a 1XX code) is received from
the remote peer.
The remote peer may send any number of these, from zero upwards. These
responses are most commonly sent in response to requests that have the
``expect: 100-continue`` header field present. Most users can safely
ignore this event unless you are intending to use the
``expect: 100-continue`` flow, or are for any reason expecting a different
1XX status code.
.. versionadded:: 2.2.0
.. versionchanged:: 2.3.0
Changed the type of ``headers`` to :class:`HeaderTuple
<hpack:hpack.HeaderTuple>`. This has no effect on current users.
.. versionchanged:: 2.4.0
Added ``priority_updated`` property.
"""
def __init__(self):
#: The Stream ID for the stream this informational response was made
#: on.
self.stream_id = None
#: The headers for this informational response.
self.headers = None
#: If this response also had associated priority information, the
#: associated :class:`PriorityUpdated <h2.events.PriorityUpdated>`
#: event will be available here.
#:
#: .. versionadded:: 2.4.0
self.priority_updated = None
def __repr__(self):
return "<InformationalResponseReceived stream_id:%s, headers:%s>" % (
self.stream_id, self.headers
)
class DataReceived(Event):
"""
The DataReceived event is fired whenever data is received on a stream from
the remote peer. The event carries the data itself, and the stream ID on
which the data was received.
.. versionchanged:: 2.4.0
Added ``stream_ended`` property.
"""
def __init__(self):
#: The Stream ID for the stream this data was received on.
self.stream_id = None
#: The data itself.
self.data = None
#: The amount of data received that counts against the flow control
#: window. Note that padding counts against the flow control window, so
#: when adjusting flow control you should always use this field rather
#: than ``len(data)``.
self.flow_controlled_length = None
#: If this data chunk also completed the stream, the associated
#: :class:`StreamEnded <h2.events.StreamEnded>` event will be available
#: here.
#:
#: .. versionadded:: 2.4.0
self.stream_ended = None
def __repr__(self):
return (
"<DataReceived stream_id:%s, "
"flow_controlled_length:%s, "
"data:%s>" % (
self.stream_id,
self.flow_controlled_length,
_bytes_representation(self.data[:20]),
)
)
class WindowUpdated(Event):
"""
The WindowUpdated event is fired whenever a flow control window changes
size. HTTP/2 defines flow control windows for connections and streams: this
event fires for both connections and streams. The event carries the ID of
the stream to which it applies (set to zero if the window update applies to
the connection), and the delta in the window size.
"""
def __init__(self):
#: The Stream ID of the stream whose flow control window was changed.
#: May be ``0`` if the connection window was changed.
self.stream_id = None
#: The window delta.
self.delta = None
def __repr__(self):
return "<WindowUpdated stream_id:%s, delta:%s>" % (
self.stream_id, self.delta
)
class RemoteSettingsChanged(Event):
"""
The RemoteSettingsChanged event is fired whenever the remote peer changes
its settings. It contains a complete inventory of changed settings,
including their previous values.
In HTTP/2, settings changes need to be acknowledged. hyper-h2 automatically
acknowledges settings changes for efficiency. However, it is possible that
the caller may not be happy with the changed setting.
When this event is received, the caller should confirm that the new
settings are acceptable. If they are not acceptable, the user should close
the connection with the error code :data:`PROTOCOL_ERROR
<h2.errors.ErrorCodes.PROTOCOL_ERROR>`.
.. versionchanged:: 2.0.0
Prior to this version the user needed to acknowledge settings changes.
This is no longer the case: hyper-h2 now automatically acknowledges
them.
"""
def __init__(self):
#: A dictionary of setting byte to
#: :class:`ChangedSetting <h2.settings.ChangedSetting>`, representing
#: the changed settings.
self.changed_settings = {}
@classmethod
def from_settings(cls, old_settings, new_settings):
"""
Build a RemoteSettingsChanged event from a set of changed settings.
:param old_settings: A complete collection of old settings, in the form
of a dictionary of ``{setting: value}``.
:param new_settings: All the changed settings and their new values, in
the form of a dictionary of ``{setting: value}``.
"""
e = cls()
for setting, new_value in new_settings.items():
setting = _setting_code_from_int(setting)
original_value = old_settings.get(setting)
change = ChangedSetting(setting, original_value, new_value)
e.changed_settings[setting] = change
return e
def __repr__(self):
return "<RemoteSettingsChanged changed_settings:{%s}>" % (
", ".join(repr(cs) for cs in self.changed_settings.values()),
)
class PingAcknowledged(Event):
"""
The PingAcknowledged event is fired whenever a user-emitted PING is
acknowledged. This contains the data in the ACK'ed PING, allowing the
user to correlate PINGs and calculate RTT.
"""
def __init__(self):
#: The data included on the ping.
self.ping_data = None
def __repr__(self):
return "<PingAcknowledged ping_data:%s>" % (
_bytes_representation(self.ping_data),
)
class StreamEnded(Event):
"""
The StreamEnded event is fired whenever a stream is ended by a remote
party. The stream may not be fully closed if it has not been closed
locally, but no further data or headers should be expected on that stream.
"""
def __init__(self):
#: The Stream ID of the stream that was closed.
self.stream_id = None
def __repr__(self):
return "<StreamEnded stream_id:%s>" % self.stream_id
class StreamReset(Event):
"""
The StreamReset event is fired in two situations. The first is when the
remote party forcefully resets the stream. The second is when the remote
party has made a protocol error which only affects a single stream. In this
case, Hyper-h2 will terminate the stream early and return this event.
.. versionchanged:: 2.0.0
This event is now fired when Hyper-h2 automatically resets a stream.
"""
def __init__(self):
#: The Stream ID of the stream that was reset.
self.stream_id = None
#: The error code given. Either one of :class:`ErrorCodes
#: <h2.errors.ErrorCodes>` or ``int``
self.error_code = None
#: Whether the remote peer sent a RST_STREAM or we did.
self.remote_reset = True
def __repr__(self):
return "<StreamReset stream_id:%s, error_code:%s, remote_reset:%s>" % (
self.stream_id, self.error_code, self.remote_reset
)
class PushedStreamReceived(Event):
"""
The PushedStreamReceived event is fired whenever a pushed stream has been
received from a remote peer. The event carries on it the new stream ID, the
ID of the parent stream, and the request headers pushed by the remote peer.
"""
def __init__(self):
#: The Stream ID of the stream created by the push.
self.pushed_stream_id = None
#: The Stream ID of the stream that the push is related to.
self.parent_stream_id = None
#: The request headers, sent by the remote party in the push.
self.headers = None
def __repr__(self):
return (
"<PushedStreamReceived pushed_stream_id:%s, parent_stream_id:%s, "
"headers:%s>" % (
self.pushed_stream_id,
self.parent_stream_id,
self.headers,
)
)
class SettingsAcknowledged(Event):
"""
The SettingsAcknowledged event is fired whenever a settings ACK is received
from the remote peer. The event carries on it the settings that were
acknowedged, in the same format as
:class:`h2.events.RemoteSettingsChanged`.
"""
def __init__(self):
#: A dictionary of setting byte to
#: :class:`ChangedSetting <h2.settings.ChangedSetting>`, representing
#: the changed settings.
self.changed_settings = {}
def __repr__(self):
return "<SettingsAcknowledged changed_settings:{%s}>" % (
", ".join(repr(cs) for cs in self.changed_settings.values()),
)
class PriorityUpdated(Event):
"""
The PriorityUpdated event is fired whenever a stream sends updated priority
information. This can occur when the stream is opened, or at any time
during the stream lifetime.
This event is purely advisory, and does not need to be acted on.
.. versionadded:: 2.0.0
"""
def __init__(self):
#: The ID of the stream whose priority information is being updated.
self.stream_id = None
#: The new stream weight. May be the same as the original stream
#: weight. An integer between 1 and 256.
self.weight = None
#: The stream ID this stream now depends on. May be ``0``.
self.depends_on = None
#: Whether the stream *exclusively* depends on the parent stream. If it
#: does, this stream should inherit the current children of its new
#: parent.
self.exclusive = None
def __repr__(self):
return (
"<PriorityUpdated stream_id:%s, weight:%s, depends_on:%s, "
"exclusive:%s>" % (
self.stream_id,
self.weight,
self.depends_on,
self.exclusive
)
)
class ConnectionTerminated(Event):
"""
The ConnectionTerminated event is fired when a connection is torn down by
the remote peer using a GOAWAY frame. Once received, no further action may
be taken on the connection: a new connection must be established.
"""
def __init__(self):
#: The error code cited when tearing down the connection. Should be
#: one of :class:`ErrorCodes <h2.errors.ErrorCodes>`, but may not be if
#: unknown HTTP/2 extensions are being used.
self.error_code = None
#: The stream ID of the last stream the remote peer saw. This can
#: provide an indication of what data, if any, never reached the remote
#: peer and so can safely be resent.
self.last_stream_id = None
#: Additional debug data that can be appended to GOAWAY frame.
self.additional_data = None
def __repr__(self):
return (
"<ConnectionTerminated error_code:%s, last_stream_id:%s, "
"additional_data:%s>" % (
self.error_code,
self.last_stream_id,
_bytes_representation(
self.additional_data[:20]
if self.additional_data else None)
)
)
class AlternativeServiceAvailable(Event):
"""
The AlternativeServiceAvailable event is fired when the remote peer
advertises an `RFC 7838 <https://tools.ietf.org/html/rfc7838>`_ Alternative
Service using an ALTSVC frame.
This event always carries the origin to which the ALTSVC information
applies. That origin is either supplied by the server directly, or inferred
by hyper-h2 from the ``:authority`` pseudo-header field that was sent by
the user when initiating a given stream.
This event also carries what RFC 7838 calls the "Alternative Service Field
Value", which is formatted like a HTTP header field and contains the
relevant alternative service information. Hyper-h2 does not parse or in any
way modify that information: the user is required to do that.
This event can only be fired on the client end of a connection.
.. versionadded:: 2.3.0
"""
def __init__(self):
#: The origin to which the alternative service field value applies.
#: This field is either supplied by the server directly, or inferred by
#: hyper-h2 from the ``:authority`` pseudo-header field that was sent
#: by the user when initiating the stream on which the frame was
#: received.
self.origin = None
#: The ALTSVC field value. This contains information about the HTTP
#: alternative service being advertised by the server. Hyper-h2 does
#: not parse this field: it is left exactly as sent by the server. The
#: structure of the data in this field is given by `RFC 7838 Section 3
#: <https://tools.ietf.org/html/rfc7838#section-3>`_.
self.field_value = None
def __repr__(self):
return (
"<AlternativeServiceAvailable origin:%s, field_value:%s>" % (
self.origin.decode('utf-8', 'ignore'),
self.field_value.decode('utf-8', 'ignore'),
)
)
def _bytes_representation(data):
"""
Converts a bytestring into something that is safe to print on all Python
platforms.
This function is relatively expensive, so it should not be called on the
mainline of the code. It's safe to use in things like object repr methods
though.
"""
if data is None:
return None
hex = binascii.hexlify(data)
# This is moderately clever: on all Python versions hexlify returns a byte
# string. On Python 3 we want an actual string, so we just check whether
# that's what we have.
if not isinstance(hex, str): # pragma: no cover
hex = hex.decode('ascii')
return hex
| mpl-2.0 |
tangyiyong/odoo | addons/website/models/ir_http.py | 162 | 13407 | # -*- coding: utf-8 -*-
import logging
import os
import re
import traceback
import werkzeug
import werkzeug.routing
import werkzeug.utils
import openerp
from openerp.addons.base import ir
from openerp.addons.base.ir import ir_qweb
from openerp.addons.website.models.website import slug, url_for, _UNSLUG_RE
from openerp.http import request
from openerp.tools import config
from openerp.osv import orm
from openerp.tools.safe_eval import safe_eval as eval
logger = logging.getLogger(__name__)
class RequestUID(object):
def __init__(self, **kw):
self.__dict__.update(kw)
class ir_http(orm.AbstractModel):
_inherit = 'ir.http'
rerouting_limit = 10
geo_ip_resolver = None
def _get_converters(self):
return dict(
super(ir_http, self)._get_converters(),
model=ModelConverter,
page=PageConverter,
)
def _auth_method_public(self):
# TODO: select user_id from matching website
if not request.session.uid:
request.uid = self.pool['ir.model.data'].xmlid_to_res_id(request.cr, openerp.SUPERUSER_ID, 'base.public_user')
else:
request.uid = request.session.uid
bots = "bot|crawl|slurp|spider|curl|wget|facebookexternalhit".split("|")
def is_a_bot(self):
# We don't use regexp and ustr voluntarily
# timeit has been done to check the optimum method
ua = request.httprequest.environ.get('HTTP_USER_AGENT', '').lower()
try:
return any(bot in ua for bot in self.bots)
except UnicodeDecodeError:
return any(bot in ua.encode('ascii', 'ignore') for bot in self.bots)
def get_nearest_lang(self, lang):
# Try to find a similar lang. Eg: fr_BE and fr_FR
if lang in request.website.get_languages():
return lang
short = lang.split('_')[0]
for code, name in request.website.get_languages():
if code.startswith(short):
return code
return False
def _dispatch(self):
first_pass = not hasattr(request, 'website')
request.website = None
func = None
try:
func, arguments = self._find_handler()
request.website_enabled = func.routing.get('website', False)
except werkzeug.exceptions.NotFound:
# either we have a language prefixed route, either a real 404
# in all cases, website processes them
request.website_enabled = True
request.website_multilang = (
request.website_enabled and
func and func.routing.get('multilang', func.routing['type'] == 'http')
)
if 'geoip' not in request.session:
record = {}
if self.geo_ip_resolver is None:
try:
import GeoIP
# updated database can be downloaded on MaxMind website
# http://dev.maxmind.com/geoip/legacy/install/city/
geofile = config.get('geoip_database')
if os.path.exists(geofile):
self.geo_ip_resolver = GeoIP.open(geofile, GeoIP.GEOIP_STANDARD)
else:
self.geo_ip_resolver = False
logger.warning('GeoIP database file %r does not exists', geofile)
except ImportError:
self.geo_ip_resolver = False
if self.geo_ip_resolver and request.httprequest.remote_addr:
record = self.geo_ip_resolver.record_by_addr(request.httprequest.remote_addr) or {}
request.session['geoip'] = record
cook_lang = request.httprequest.cookies.get('website_lang')
if request.website_enabled:
try:
if func:
self._authenticate(func.routing['auth'])
else:
self._auth_method_public()
except Exception as e:
return self._handle_exception(e)
request.redirect = lambda url, code=302: werkzeug.utils.redirect(url_for(url), code)
request.website = request.registry['website'].get_current_website(request.cr, request.uid, context=request.context)
langs = [lg[0] for lg in request.website.get_languages()]
path = request.httprequest.path.split('/')
if first_pass:
nearest_lang = not func and self.get_nearest_lang(path[1])
url_lang = nearest_lang and path[1]
preferred_lang = ((cook_lang if cook_lang in langs else False)
or self.get_nearest_lang(request.lang)
or request.website.default_lang_code)
is_a_bot = self.is_a_bot()
request.lang = request.context['lang'] = nearest_lang or preferred_lang
# if lang in url but not the displayed or default language --> change or remove
# or no lang in url, and lang to dispay not the default language --> add lang
# and not a POST request
# and not a bot or bot but default lang in url
if ((url_lang and (url_lang != request.lang or url_lang == request.website.default_lang_code))
or (not url_lang and request.website_multilang and request.lang != request.website.default_lang_code)
and request.httprequest.method != 'POST') \
and (not is_a_bot or (url_lang and url_lang == request.website.default_lang_code)):
if url_lang:
path.pop(1)
if request.lang != request.website.default_lang_code:
path.insert(1, request.lang)
path = '/'.join(path) or '/'
redirect = request.redirect(path + '?' + request.httprequest.query_string)
redirect.set_cookie('website_lang', request.lang)
return redirect
elif url_lang:
path.pop(1)
return self.reroute('/'.join(path) or '/')
# bind modified context
request.website = request.website.with_context(request.context)
resp = super(ir_http, self)._dispatch()
if request.website_enabled and cook_lang != request.lang and hasattr(resp, 'set_cookie'):
resp.set_cookie('website_lang', request.lang)
return resp
def reroute(self, path):
if not hasattr(request, 'rerouting'):
request.rerouting = [request.httprequest.path]
if path in request.rerouting:
raise Exception("Rerouting loop is forbidden")
request.rerouting.append(path)
if len(request.rerouting) > self.rerouting_limit:
raise Exception("Rerouting limit exceeded")
request.httprequest.environ['PATH_INFO'] = path
# void werkzeug cached_property. TODO: find a proper way to do this
for key in ('path', 'full_path', 'url', 'base_url'):
request.httprequest.__dict__.pop(key, None)
return self._dispatch()
def _postprocess_args(self, arguments, rule):
super(ir_http, self)._postprocess_args(arguments, rule)
for key, val in arguments.items():
# Replace uid placeholder by the current request.uid
if isinstance(val, orm.BaseModel) and isinstance(val._uid, RequestUID):
arguments[key] = val.sudo(request.uid)
try:
_, path = rule.build(arguments)
assert path is not None
except Exception, e:
return self._handle_exception(e, code=404)
if getattr(request, 'website_multilang', False) and request.httprequest.method in ('GET', 'HEAD'):
generated_path = werkzeug.url_unquote_plus(path)
current_path = werkzeug.url_unquote_plus(request.httprequest.path)
if generated_path != current_path:
if request.lang != request.website.default_lang_code:
path = '/' + request.lang + path
if request.httprequest.query_string:
path += '?' + request.httprequest.query_string
return werkzeug.utils.redirect(path, code=301)
def _handle_exception(self, exception, code=500):
is_website_request = bool(getattr(request, 'website_enabled', False) and request.website)
if not is_website_request:
# Don't touch non website requests exception handling
return super(ir_http, self)._handle_exception(exception)
else:
try:
response = super(ir_http, self)._handle_exception(exception)
if isinstance(response, Exception):
exception = response
else:
# if parent excplicitely returns a plain response, then we don't touch it
return response
except Exception, e:
exception = e
values = dict(
exception=exception,
traceback=traceback.format_exc(exception),
)
code = getattr(exception, 'code', code)
if isinstance(exception, openerp.exceptions.AccessError):
code = 403
if isinstance(exception, ir_qweb.QWebException):
values.update(qweb_exception=exception)
if isinstance(exception.qweb.get('cause'), openerp.exceptions.AccessError):
code = 403
if isinstance(exception, werkzeug.exceptions.HTTPException) and code is None:
# Hand-crafted HTTPException likely coming from abort(),
# usually for a redirect response -> return it directly
return exception
if code == 500:
logger.error("500 Internal Server Error:\n\n%s", values['traceback'])
if 'qweb_exception' in values:
view = request.registry.get("ir.ui.view")
views = view._views_get(request.cr, request.uid, exception.qweb['template'], request.context)
to_reset = [v for v in views if v.model_data_id.noupdate is True and not v.page]
values['views'] = to_reset
elif code == 403:
logger.warn("403 Forbidden:\n\n%s", values['traceback'])
values.update(
status_message=werkzeug.http.HTTP_STATUS_CODES[code],
status_code=code,
)
if not request.uid:
self._auth_method_public()
try:
html = request.website._render('website.%s' % code, values)
except Exception:
html = request.website._render('website.http_error', values)
return werkzeug.wrappers.Response(html, status=code, content_type='text/html;charset=utf-8')
class ModelConverter(ir.ir_http.ModelConverter):
def __init__(self, url_map, model=False, domain='[]'):
super(ModelConverter, self).__init__(url_map, model)
self.domain = domain
self.regex = _UNSLUG_RE.pattern
def to_url(self, value):
return slug(value)
def to_python(self, value):
m = re.match(self.regex, value)
_uid = RequestUID(value=value, match=m, converter=self)
record_id = int(m.group(2))
if record_id < 0:
# limited support for negative IDs due to our slug pattern, assume abs() if not found
if not request.registry[self.model].exists(request.cr, _uid, [record_id]):
record_id = abs(record_id)
return request.registry[self.model].browse(
request.cr, _uid, record_id, context=request.context)
def generate(self, cr, uid, query=None, args=None, context=None):
obj = request.registry[self.model]
domain = eval( self.domain, (args or {}).copy())
if query:
domain.append((obj._rec_name, 'ilike', '%'+query+'%'))
for record in obj.search_read(cr, uid, domain=domain, fields=['write_date',obj._rec_name], context=context):
if record.get(obj._rec_name, False):
yield {'loc': (record['id'], record[obj._rec_name])}
class PageConverter(werkzeug.routing.PathConverter):
""" Only point of this converter is to bundle pages enumeration logic """
def generate(self, cr, uid, query=None, args={}, context=None):
View = request.registry['ir.ui.view']
views = View.search_read(cr, uid, [['page', '=', True]],
fields=['xml_id','priority','write_date'], order='name', context=context)
for view in views:
xid = view['xml_id'].startswith('website.') and view['xml_id'][8:] or view['xml_id']
# the 'page/homepage' url is indexed as '/', avoid aving the same page referenced twice
# when we will have an url mapping mechanism, replace this by a rule: page/homepage --> /
if xid=='homepage': continue
if query and query.lower() not in xid.lower():
continue
record = {'loc': xid}
if view['priority'] <> 16:
record['__priority'] = min(round(view['priority'] / 32.0,1), 1)
if view['write_date']:
record['__lastmod'] = view['write_date'][:10]
yield record
| agpl-3.0 |
lavaner/pywim2 | docs/conf.py | 1 | 8296 | # -*- coding: utf-8 -*-
#
# pywim2 documentation build configuration file, created by
# sphinx-quickstart on Wed Jul 22 00:19:46 2015.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.doctest',
'sphinx.ext.todo',
'sphinx.ext.coverage',
'sphinx.ext.pngmath',
'sphinx.ext.ifconfig',
'sphinx.ext.viewcode',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'pywim2'
copyright = u'2015, LiuYuan'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '1.0.0'
# The full version, including alpha/beta/rc tags.
release = '1.0.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'pywim2doc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'pywim2.tex', u'pywim2 Documentation',
u'LiuYuan', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'pywim2', u'pywim2 Documentation',
[u'LiuYuan'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'pywim2', u'pywim2 Documentation',
u'LiuYuan', 'pywim2', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
| gpl-3.0 |
jiangzhuo/kbengine | kbe/res/scripts/common/Lib/test/test_functools.py | 60 | 60228 | import abc
import collections
from itertools import permutations
import pickle
from random import choice
import sys
from test import support
import unittest
from weakref import proxy
import functools
py_functools = support.import_fresh_module('functools', blocked=['_functools'])
c_functools = support.import_fresh_module('functools', fresh=['_functools'])
decimal = support.import_fresh_module('decimal', fresh=['_decimal'])
def capture(*args, **kw):
"""capture all positional and keyword arguments"""
return args, kw
def signature(part):
""" return the signature of a partial object """
return (part.func, part.args, part.keywords, part.__dict__)
class TestPartial:
def test_basic_examples(self):
p = self.partial(capture, 1, 2, a=10, b=20)
self.assertTrue(callable(p))
self.assertEqual(p(3, 4, b=30, c=40),
((1, 2, 3, 4), dict(a=10, b=30, c=40)))
p = self.partial(map, lambda x: x*10)
self.assertEqual(list(p([1,2,3,4])), [10, 20, 30, 40])
def test_attributes(self):
p = self.partial(capture, 1, 2, a=10, b=20)
# attributes should be readable
self.assertEqual(p.func, capture)
self.assertEqual(p.args, (1, 2))
self.assertEqual(p.keywords, dict(a=10, b=20))
def test_argument_checking(self):
self.assertRaises(TypeError, self.partial) # need at least a func arg
try:
self.partial(2)()
except TypeError:
pass
else:
self.fail('First arg not checked for callability')
def test_protection_of_callers_dict_argument(self):
# a caller's dictionary should not be altered by partial
def func(a=10, b=20):
return a
d = {'a':3}
p = self.partial(func, a=5)
self.assertEqual(p(**d), 3)
self.assertEqual(d, {'a':3})
p(b=7)
self.assertEqual(d, {'a':3})
def test_arg_combinations(self):
# exercise special code paths for zero args in either partial
# object or the caller
p = self.partial(capture)
self.assertEqual(p(), ((), {}))
self.assertEqual(p(1,2), ((1,2), {}))
p = self.partial(capture, 1, 2)
self.assertEqual(p(), ((1,2), {}))
self.assertEqual(p(3,4), ((1,2,3,4), {}))
def test_kw_combinations(self):
# exercise special code paths for no keyword args in
# either the partial object or the caller
p = self.partial(capture)
self.assertEqual(p(), ((), {}))
self.assertEqual(p(a=1), ((), {'a':1}))
p = self.partial(capture, a=1)
self.assertEqual(p(), ((), {'a':1}))
self.assertEqual(p(b=2), ((), {'a':1, 'b':2}))
# keyword args in the call override those in the partial object
self.assertEqual(p(a=3, b=2), ((), {'a':3, 'b':2}))
def test_positional(self):
# make sure positional arguments are captured correctly
for args in [(), (0,), (0,1), (0,1,2), (0,1,2,3)]:
p = self.partial(capture, *args)
expected = args + ('x',)
got, empty = p('x')
self.assertTrue(expected == got and empty == {})
def test_keyword(self):
# make sure keyword arguments are captured correctly
for a in ['a', 0, None, 3.5]:
p = self.partial(capture, a=a)
expected = {'a':a,'x':None}
empty, got = p(x=None)
self.assertTrue(expected == got and empty == ())
def test_no_side_effects(self):
# make sure there are no side effects that affect subsequent calls
p = self.partial(capture, 0, a=1)
args1, kw1 = p(1, b=2)
self.assertTrue(args1 == (0,1) and kw1 == {'a':1,'b':2})
args2, kw2 = p()
self.assertTrue(args2 == (0,) and kw2 == {'a':1})
def test_error_propagation(self):
def f(x, y):
x / y
self.assertRaises(ZeroDivisionError, self.partial(f, 1, 0))
self.assertRaises(ZeroDivisionError, self.partial(f, 1), 0)
self.assertRaises(ZeroDivisionError, self.partial(f), 1, 0)
self.assertRaises(ZeroDivisionError, self.partial(f, y=0), 1)
def test_weakref(self):
f = self.partial(int, base=16)
p = proxy(f)
self.assertEqual(f.func, p.func)
f = None
self.assertRaises(ReferenceError, getattr, p, 'func')
def test_with_bound_and_unbound_methods(self):
data = list(map(str, range(10)))
join = self.partial(str.join, '')
self.assertEqual(join(data), '0123456789')
join = self.partial(''.join)
self.assertEqual(join(data), '0123456789')
@unittest.skipUnless(c_functools, 'requires the C _functools module')
class TestPartialC(TestPartial, unittest.TestCase):
if c_functools:
partial = c_functools.partial
def test_attributes_unwritable(self):
# attributes should not be writable
p = self.partial(capture, 1, 2, a=10, b=20)
self.assertRaises(AttributeError, setattr, p, 'func', map)
self.assertRaises(AttributeError, setattr, p, 'args', (1, 2))
self.assertRaises(AttributeError, setattr, p, 'keywords', dict(a=1, b=2))
p = self.partial(hex)
try:
del p.__dict__
except TypeError:
pass
else:
self.fail('partial object allowed __dict__ to be deleted')
def test_repr(self):
args = (object(), object())
args_repr = ', '.join(repr(a) for a in args)
#kwargs = {'a': object(), 'b': object()}
kwargs = {'a': object()}
kwargs_repr = ', '.join("%s=%r" % (k, v) for k, v in kwargs.items())
if self.partial is c_functools.partial:
name = 'functools.partial'
else:
name = self.partial.__name__
f = self.partial(capture)
self.assertEqual('{}({!r})'.format(name, capture),
repr(f))
f = self.partial(capture, *args)
self.assertEqual('{}({!r}, {})'.format(name, capture, args_repr),
repr(f))
f = self.partial(capture, **kwargs)
self.assertEqual('{}({!r}, {})'.format(name, capture, kwargs_repr),
repr(f))
f = self.partial(capture, *args, **kwargs)
self.assertEqual('{}({!r}, {}, {})'.format(name, capture, args_repr, kwargs_repr),
repr(f))
def test_pickle(self):
f = self.partial(signature, 'asdf', bar=True)
f.add_something_to__dict__ = True
f_copy = pickle.loads(pickle.dumps(f))
self.assertEqual(signature(f), signature(f_copy))
# Issue 6083: Reference counting bug
def test_setstate_refcount(self):
class BadSequence:
def __len__(self):
return 4
def __getitem__(self, key):
if key == 0:
return max
elif key == 1:
return tuple(range(1000000))
elif key in (2, 3):
return {}
raise IndexError
f = self.partial(object)
self.assertRaisesRegex(SystemError,
"new style getargs format but argument is not a tuple",
f.__setstate__, BadSequence())
class TestPartialPy(TestPartial, unittest.TestCase):
partial = staticmethod(py_functools.partial)
if c_functools:
class PartialSubclass(c_functools.partial):
pass
@unittest.skipUnless(c_functools, 'requires the C _functools module')
class TestPartialCSubclass(TestPartialC):
if c_functools:
partial = PartialSubclass
class TestPartialMethod(unittest.TestCase):
class A(object):
nothing = functools.partialmethod(capture)
positional = functools.partialmethod(capture, 1)
keywords = functools.partialmethod(capture, a=2)
both = functools.partialmethod(capture, 3, b=4)
nested = functools.partialmethod(positional, 5)
over_partial = functools.partialmethod(functools.partial(capture, c=6), 7)
static = functools.partialmethod(staticmethod(capture), 8)
cls = functools.partialmethod(classmethod(capture), d=9)
a = A()
def test_arg_combinations(self):
self.assertEqual(self.a.nothing(), ((self.a,), {}))
self.assertEqual(self.a.nothing(5), ((self.a, 5), {}))
self.assertEqual(self.a.nothing(c=6), ((self.a,), {'c': 6}))
self.assertEqual(self.a.nothing(5, c=6), ((self.a, 5), {'c': 6}))
self.assertEqual(self.a.positional(), ((self.a, 1), {}))
self.assertEqual(self.a.positional(5), ((self.a, 1, 5), {}))
self.assertEqual(self.a.positional(c=6), ((self.a, 1), {'c': 6}))
self.assertEqual(self.a.positional(5, c=6), ((self.a, 1, 5), {'c': 6}))
self.assertEqual(self.a.keywords(), ((self.a,), {'a': 2}))
self.assertEqual(self.a.keywords(5), ((self.a, 5), {'a': 2}))
self.assertEqual(self.a.keywords(c=6), ((self.a,), {'a': 2, 'c': 6}))
self.assertEqual(self.a.keywords(5, c=6), ((self.a, 5), {'a': 2, 'c': 6}))
self.assertEqual(self.a.both(), ((self.a, 3), {'b': 4}))
self.assertEqual(self.a.both(5), ((self.a, 3, 5), {'b': 4}))
self.assertEqual(self.a.both(c=6), ((self.a, 3), {'b': 4, 'c': 6}))
self.assertEqual(self.a.both(5, c=6), ((self.a, 3, 5), {'b': 4, 'c': 6}))
self.assertEqual(self.A.both(self.a, 5, c=6), ((self.a, 3, 5), {'b': 4, 'c': 6}))
def test_nested(self):
self.assertEqual(self.a.nested(), ((self.a, 1, 5), {}))
self.assertEqual(self.a.nested(6), ((self.a, 1, 5, 6), {}))
self.assertEqual(self.a.nested(d=7), ((self.a, 1, 5), {'d': 7}))
self.assertEqual(self.a.nested(6, d=7), ((self.a, 1, 5, 6), {'d': 7}))
self.assertEqual(self.A.nested(self.a, 6, d=7), ((self.a, 1, 5, 6), {'d': 7}))
def test_over_partial(self):
self.assertEqual(self.a.over_partial(), ((self.a, 7), {'c': 6}))
self.assertEqual(self.a.over_partial(5), ((self.a, 7, 5), {'c': 6}))
self.assertEqual(self.a.over_partial(d=8), ((self.a, 7), {'c': 6, 'd': 8}))
self.assertEqual(self.a.over_partial(5, d=8), ((self.a, 7, 5), {'c': 6, 'd': 8}))
self.assertEqual(self.A.over_partial(self.a, 5, d=8), ((self.a, 7, 5), {'c': 6, 'd': 8}))
def test_bound_method_introspection(self):
obj = self.a
self.assertIs(obj.both.__self__, obj)
self.assertIs(obj.nested.__self__, obj)
self.assertIs(obj.over_partial.__self__, obj)
self.assertIs(obj.cls.__self__, self.A)
self.assertIs(self.A.cls.__self__, self.A)
def test_unbound_method_retrieval(self):
obj = self.A
self.assertFalse(hasattr(obj.both, "__self__"))
self.assertFalse(hasattr(obj.nested, "__self__"))
self.assertFalse(hasattr(obj.over_partial, "__self__"))
self.assertFalse(hasattr(obj.static, "__self__"))
self.assertFalse(hasattr(self.a.static, "__self__"))
def test_descriptors(self):
for obj in [self.A, self.a]:
with self.subTest(obj=obj):
self.assertEqual(obj.static(), ((8,), {}))
self.assertEqual(obj.static(5), ((8, 5), {}))
self.assertEqual(obj.static(d=8), ((8,), {'d': 8}))
self.assertEqual(obj.static(5, d=8), ((8, 5), {'d': 8}))
self.assertEqual(obj.cls(), ((self.A,), {'d': 9}))
self.assertEqual(obj.cls(5), ((self.A, 5), {'d': 9}))
self.assertEqual(obj.cls(c=8), ((self.A,), {'c': 8, 'd': 9}))
self.assertEqual(obj.cls(5, c=8), ((self.A, 5), {'c': 8, 'd': 9}))
def test_overriding_keywords(self):
self.assertEqual(self.a.keywords(a=3), ((self.a,), {'a': 3}))
self.assertEqual(self.A.keywords(self.a, a=3), ((self.a,), {'a': 3}))
def test_invalid_args(self):
with self.assertRaises(TypeError):
class B(object):
method = functools.partialmethod(None, 1)
def test_repr(self):
self.assertEqual(repr(vars(self.A)['both']),
'functools.partialmethod({}, 3, b=4)'.format(capture))
def test_abstract(self):
class Abstract(abc.ABCMeta):
@abc.abstractmethod
def add(self, x, y):
pass
add5 = functools.partialmethod(add, 5)
self.assertTrue(Abstract.add.__isabstractmethod__)
self.assertTrue(Abstract.add5.__isabstractmethod__)
for func in [self.A.static, self.A.cls, self.A.over_partial, self.A.nested, self.A.both]:
self.assertFalse(getattr(func, '__isabstractmethod__', False))
class TestUpdateWrapper(unittest.TestCase):
def check_wrapper(self, wrapper, wrapped,
assigned=functools.WRAPPER_ASSIGNMENTS,
updated=functools.WRAPPER_UPDATES):
# Check attributes were assigned
for name in assigned:
self.assertIs(getattr(wrapper, name), getattr(wrapped, name))
# Check attributes were updated
for name in updated:
wrapper_attr = getattr(wrapper, name)
wrapped_attr = getattr(wrapped, name)
for key in wrapped_attr:
if name == "__dict__" and key == "__wrapped__":
# __wrapped__ is overwritten by the update code
continue
self.assertIs(wrapped_attr[key], wrapper_attr[key])
# Check __wrapped__
self.assertIs(wrapper.__wrapped__, wrapped)
def _default_update(self):
def f(a:'This is a new annotation'):
"""This is a test"""
pass
f.attr = 'This is also a test'
f.__wrapped__ = "This is a bald faced lie"
def wrapper(b:'This is the prior annotation'):
pass
functools.update_wrapper(wrapper, f)
return wrapper, f
def test_default_update(self):
wrapper, f = self._default_update()
self.check_wrapper(wrapper, f)
self.assertIs(wrapper.__wrapped__, f)
self.assertEqual(wrapper.__name__, 'f')
self.assertEqual(wrapper.__qualname__, f.__qualname__)
self.assertEqual(wrapper.attr, 'This is also a test')
self.assertEqual(wrapper.__annotations__['a'], 'This is a new annotation')
self.assertNotIn('b', wrapper.__annotations__)
@unittest.skipIf(sys.flags.optimize >= 2,
"Docstrings are omitted with -O2 and above")
def test_default_update_doc(self):
wrapper, f = self._default_update()
self.assertEqual(wrapper.__doc__, 'This is a test')
def test_no_update(self):
def f():
"""This is a test"""
pass
f.attr = 'This is also a test'
def wrapper():
pass
functools.update_wrapper(wrapper, f, (), ())
self.check_wrapper(wrapper, f, (), ())
self.assertEqual(wrapper.__name__, 'wrapper')
self.assertNotEqual(wrapper.__qualname__, f.__qualname__)
self.assertEqual(wrapper.__doc__, None)
self.assertEqual(wrapper.__annotations__, {})
self.assertFalse(hasattr(wrapper, 'attr'))
def test_selective_update(self):
def f():
pass
f.attr = 'This is a different test'
f.dict_attr = dict(a=1, b=2, c=3)
def wrapper():
pass
wrapper.dict_attr = {}
assign = ('attr',)
update = ('dict_attr',)
functools.update_wrapper(wrapper, f, assign, update)
self.check_wrapper(wrapper, f, assign, update)
self.assertEqual(wrapper.__name__, 'wrapper')
self.assertNotEqual(wrapper.__qualname__, f.__qualname__)
self.assertEqual(wrapper.__doc__, None)
self.assertEqual(wrapper.attr, 'This is a different test')
self.assertEqual(wrapper.dict_attr, f.dict_attr)
def test_missing_attributes(self):
def f():
pass
def wrapper():
pass
wrapper.dict_attr = {}
assign = ('attr',)
update = ('dict_attr',)
# Missing attributes on wrapped object are ignored
functools.update_wrapper(wrapper, f, assign, update)
self.assertNotIn('attr', wrapper.__dict__)
self.assertEqual(wrapper.dict_attr, {})
# Wrapper must have expected attributes for updating
del wrapper.dict_attr
with self.assertRaises(AttributeError):
functools.update_wrapper(wrapper, f, assign, update)
wrapper.dict_attr = 1
with self.assertRaises(AttributeError):
functools.update_wrapper(wrapper, f, assign, update)
@support.requires_docstrings
@unittest.skipIf(sys.flags.optimize >= 2,
"Docstrings are omitted with -O2 and above")
def test_builtin_update(self):
# Test for bug #1576241
def wrapper():
pass
functools.update_wrapper(wrapper, max)
self.assertEqual(wrapper.__name__, 'max')
self.assertTrue(wrapper.__doc__.startswith('max('))
self.assertEqual(wrapper.__annotations__, {})
class TestWraps(TestUpdateWrapper):
def _default_update(self):
def f():
"""This is a test"""
pass
f.attr = 'This is also a test'
f.__wrapped__ = "This is still a bald faced lie"
@functools.wraps(f)
def wrapper():
pass
return wrapper, f
def test_default_update(self):
wrapper, f = self._default_update()
self.check_wrapper(wrapper, f)
self.assertEqual(wrapper.__name__, 'f')
self.assertEqual(wrapper.__qualname__, f.__qualname__)
self.assertEqual(wrapper.attr, 'This is also a test')
@unittest.skipIf(sys.flags.optimize >= 2,
"Docstrings are omitted with -O2 and above")
def test_default_update_doc(self):
wrapper, _ = self._default_update()
self.assertEqual(wrapper.__doc__, 'This is a test')
def test_no_update(self):
def f():
"""This is a test"""
pass
f.attr = 'This is also a test'
@functools.wraps(f, (), ())
def wrapper():
pass
self.check_wrapper(wrapper, f, (), ())
self.assertEqual(wrapper.__name__, 'wrapper')
self.assertNotEqual(wrapper.__qualname__, f.__qualname__)
self.assertEqual(wrapper.__doc__, None)
self.assertFalse(hasattr(wrapper, 'attr'))
def test_selective_update(self):
def f():
pass
f.attr = 'This is a different test'
f.dict_attr = dict(a=1, b=2, c=3)
def add_dict_attr(f):
f.dict_attr = {}
return f
assign = ('attr',)
update = ('dict_attr',)
@functools.wraps(f, assign, update)
@add_dict_attr
def wrapper():
pass
self.check_wrapper(wrapper, f, assign, update)
self.assertEqual(wrapper.__name__, 'wrapper')
self.assertNotEqual(wrapper.__qualname__, f.__qualname__)
self.assertEqual(wrapper.__doc__, None)
self.assertEqual(wrapper.attr, 'This is a different test')
self.assertEqual(wrapper.dict_attr, f.dict_attr)
class TestReduce(unittest.TestCase):
func = functools.reduce
def test_reduce(self):
class Squares:
def __init__(self, max):
self.max = max
self.sofar = []
def __len__(self):
return len(self.sofar)
def __getitem__(self, i):
if not 0 <= i < self.max: raise IndexError
n = len(self.sofar)
while n <= i:
self.sofar.append(n*n)
n += 1
return self.sofar[i]
def add(x, y):
return x + y
self.assertEqual(self.func(add, ['a', 'b', 'c'], ''), 'abc')
self.assertEqual(
self.func(add, [['a', 'c'], [], ['d', 'w']], []),
['a','c','d','w']
)
self.assertEqual(self.func(lambda x, y: x*y, range(2,8), 1), 5040)
self.assertEqual(
self.func(lambda x, y: x*y, range(2,21), 1),
2432902008176640000
)
self.assertEqual(self.func(add, Squares(10)), 285)
self.assertEqual(self.func(add, Squares(10), 0), 285)
self.assertEqual(self.func(add, Squares(0), 0), 0)
self.assertRaises(TypeError, self.func)
self.assertRaises(TypeError, self.func, 42, 42)
self.assertRaises(TypeError, self.func, 42, 42, 42)
self.assertEqual(self.func(42, "1"), "1") # func is never called with one item
self.assertEqual(self.func(42, "", "1"), "1") # func is never called with one item
self.assertRaises(TypeError, self.func, 42, (42, 42))
self.assertRaises(TypeError, self.func, add, []) # arg 2 must not be empty sequence with no initial value
self.assertRaises(TypeError, self.func, add, "")
self.assertRaises(TypeError, self.func, add, ())
self.assertRaises(TypeError, self.func, add, object())
class TestFailingIter:
def __iter__(self):
raise RuntimeError
self.assertRaises(RuntimeError, self.func, add, TestFailingIter())
self.assertEqual(self.func(add, [], None), None)
self.assertEqual(self.func(add, [], 42), 42)
class BadSeq:
def __getitem__(self, index):
raise ValueError
self.assertRaises(ValueError, self.func, 42, BadSeq())
# Test reduce()'s use of iterators.
def test_iterator_usage(self):
class SequenceClass:
def __init__(self, n):
self.n = n
def __getitem__(self, i):
if 0 <= i < self.n:
return i
else:
raise IndexError
from operator import add
self.assertEqual(self.func(add, SequenceClass(5)), 10)
self.assertEqual(self.func(add, SequenceClass(5), 42), 52)
self.assertRaises(TypeError, self.func, add, SequenceClass(0))
self.assertEqual(self.func(add, SequenceClass(0), 42), 42)
self.assertEqual(self.func(add, SequenceClass(1)), 0)
self.assertEqual(self.func(add, SequenceClass(1), 42), 42)
d = {"one": 1, "two": 2, "three": 3}
self.assertEqual(self.func(add, d), "".join(d.keys()))
class TestCmpToKey:
def test_cmp_to_key(self):
def cmp1(x, y):
return (x > y) - (x < y)
key = self.cmp_to_key(cmp1)
self.assertEqual(key(3), key(3))
self.assertGreater(key(3), key(1))
self.assertGreaterEqual(key(3), key(3))
def cmp2(x, y):
return int(x) - int(y)
key = self.cmp_to_key(cmp2)
self.assertEqual(key(4.0), key('4'))
self.assertLess(key(2), key('35'))
self.assertLessEqual(key(2), key('35'))
self.assertNotEqual(key(2), key('35'))
def test_cmp_to_key_arguments(self):
def cmp1(x, y):
return (x > y) - (x < y)
key = self.cmp_to_key(mycmp=cmp1)
self.assertEqual(key(obj=3), key(obj=3))
self.assertGreater(key(obj=3), key(obj=1))
with self.assertRaises((TypeError, AttributeError)):
key(3) > 1 # rhs is not a K object
with self.assertRaises((TypeError, AttributeError)):
1 < key(3) # lhs is not a K object
with self.assertRaises(TypeError):
key = self.cmp_to_key() # too few args
with self.assertRaises(TypeError):
key = self.cmp_to_key(cmp1, None) # too many args
key = self.cmp_to_key(cmp1)
with self.assertRaises(TypeError):
key() # too few args
with self.assertRaises(TypeError):
key(None, None) # too many args
def test_bad_cmp(self):
def cmp1(x, y):
raise ZeroDivisionError
key = self.cmp_to_key(cmp1)
with self.assertRaises(ZeroDivisionError):
key(3) > key(1)
class BadCmp:
def __lt__(self, other):
raise ZeroDivisionError
def cmp1(x, y):
return BadCmp()
with self.assertRaises(ZeroDivisionError):
key(3) > key(1)
def test_obj_field(self):
def cmp1(x, y):
return (x > y) - (x < y)
key = self.cmp_to_key(mycmp=cmp1)
self.assertEqual(key(50).obj, 50)
def test_sort_int(self):
def mycmp(x, y):
return y - x
self.assertEqual(sorted(range(5), key=self.cmp_to_key(mycmp)),
[4, 3, 2, 1, 0])
def test_sort_int_str(self):
def mycmp(x, y):
x, y = int(x), int(y)
return (x > y) - (x < y)
values = [5, '3', 7, 2, '0', '1', 4, '10', 1]
values = sorted(values, key=self.cmp_to_key(mycmp))
self.assertEqual([int(value) for value in values],
[0, 1, 1, 2, 3, 4, 5, 7, 10])
def test_hash(self):
def mycmp(x, y):
return y - x
key = self.cmp_to_key(mycmp)
k = key(10)
self.assertRaises(TypeError, hash, k)
self.assertNotIsInstance(k, collections.Hashable)
@unittest.skipUnless(c_functools, 'requires the C _functools module')
class TestCmpToKeyC(TestCmpToKey, unittest.TestCase):
if c_functools:
cmp_to_key = c_functools.cmp_to_key
class TestCmpToKeyPy(TestCmpToKey, unittest.TestCase):
cmp_to_key = staticmethod(py_functools.cmp_to_key)
class TestTotalOrdering(unittest.TestCase):
def test_total_ordering_lt(self):
@functools.total_ordering
class A:
def __init__(self, value):
self.value = value
def __lt__(self, other):
return self.value < other.value
def __eq__(self, other):
return self.value == other.value
self.assertTrue(A(1) < A(2))
self.assertTrue(A(2) > A(1))
self.assertTrue(A(1) <= A(2))
self.assertTrue(A(2) >= A(1))
self.assertTrue(A(2) <= A(2))
self.assertTrue(A(2) >= A(2))
self.assertFalse(A(1) > A(2))
def test_total_ordering_le(self):
@functools.total_ordering
class A:
def __init__(self, value):
self.value = value
def __le__(self, other):
return self.value <= other.value
def __eq__(self, other):
return self.value == other.value
self.assertTrue(A(1) < A(2))
self.assertTrue(A(2) > A(1))
self.assertTrue(A(1) <= A(2))
self.assertTrue(A(2) >= A(1))
self.assertTrue(A(2) <= A(2))
self.assertTrue(A(2) >= A(2))
self.assertFalse(A(1) >= A(2))
def test_total_ordering_gt(self):
@functools.total_ordering
class A:
def __init__(self, value):
self.value = value
def __gt__(self, other):
return self.value > other.value
def __eq__(self, other):
return self.value == other.value
self.assertTrue(A(1) < A(2))
self.assertTrue(A(2) > A(1))
self.assertTrue(A(1) <= A(2))
self.assertTrue(A(2) >= A(1))
self.assertTrue(A(2) <= A(2))
self.assertTrue(A(2) >= A(2))
self.assertFalse(A(2) < A(1))
def test_total_ordering_ge(self):
@functools.total_ordering
class A:
def __init__(self, value):
self.value = value
def __ge__(self, other):
return self.value >= other.value
def __eq__(self, other):
return self.value == other.value
self.assertTrue(A(1) < A(2))
self.assertTrue(A(2) > A(1))
self.assertTrue(A(1) <= A(2))
self.assertTrue(A(2) >= A(1))
self.assertTrue(A(2) <= A(2))
self.assertTrue(A(2) >= A(2))
self.assertFalse(A(2) <= A(1))
def test_total_ordering_no_overwrite(self):
# new methods should not overwrite existing
@functools.total_ordering
class A(int):
pass
self.assertTrue(A(1) < A(2))
self.assertTrue(A(2) > A(1))
self.assertTrue(A(1) <= A(2))
self.assertTrue(A(2) >= A(1))
self.assertTrue(A(2) <= A(2))
self.assertTrue(A(2) >= A(2))
def test_no_operations_defined(self):
with self.assertRaises(ValueError):
@functools.total_ordering
class A:
pass
def test_type_error_when_not_implemented(self):
# bug 10042; ensure stack overflow does not occur
# when decorated types return NotImplemented
@functools.total_ordering
class ImplementsLessThan:
def __init__(self, value):
self.value = value
def __eq__(self, other):
if isinstance(other, ImplementsLessThan):
return self.value == other.value
return False
def __lt__(self, other):
if isinstance(other, ImplementsLessThan):
return self.value < other.value
return NotImplemented
@functools.total_ordering
class ImplementsGreaterThan:
def __init__(self, value):
self.value = value
def __eq__(self, other):
if isinstance(other, ImplementsGreaterThan):
return self.value == other.value
return False
def __gt__(self, other):
if isinstance(other, ImplementsGreaterThan):
return self.value > other.value
return NotImplemented
@functools.total_ordering
class ImplementsLessThanEqualTo:
def __init__(self, value):
self.value = value
def __eq__(self, other):
if isinstance(other, ImplementsLessThanEqualTo):
return self.value == other.value
return False
def __le__(self, other):
if isinstance(other, ImplementsLessThanEqualTo):
return self.value <= other.value
return NotImplemented
@functools.total_ordering
class ImplementsGreaterThanEqualTo:
def __init__(self, value):
self.value = value
def __eq__(self, other):
if isinstance(other, ImplementsGreaterThanEqualTo):
return self.value == other.value
return False
def __ge__(self, other):
if isinstance(other, ImplementsGreaterThanEqualTo):
return self.value >= other.value
return NotImplemented
@functools.total_ordering
class ComparatorNotImplemented:
def __init__(self, value):
self.value = value
def __eq__(self, other):
if isinstance(other, ComparatorNotImplemented):
return self.value == other.value
return False
def __lt__(self, other):
return NotImplemented
with self.subTest("LT < 1"), self.assertRaises(TypeError):
ImplementsLessThan(-1) < 1
with self.subTest("LT < LE"), self.assertRaises(TypeError):
ImplementsLessThan(0) < ImplementsLessThanEqualTo(0)
with self.subTest("LT < GT"), self.assertRaises(TypeError):
ImplementsLessThan(1) < ImplementsGreaterThan(1)
with self.subTest("LE <= LT"), self.assertRaises(TypeError):
ImplementsLessThanEqualTo(2) <= ImplementsLessThan(2)
with self.subTest("LE <= GE"), self.assertRaises(TypeError):
ImplementsLessThanEqualTo(3) <= ImplementsGreaterThanEqualTo(3)
with self.subTest("GT > GE"), self.assertRaises(TypeError):
ImplementsGreaterThan(4) > ImplementsGreaterThanEqualTo(4)
with self.subTest("GT > LT"), self.assertRaises(TypeError):
ImplementsGreaterThan(5) > ImplementsLessThan(5)
with self.subTest("GE >= GT"), self.assertRaises(TypeError):
ImplementsGreaterThanEqualTo(6) >= ImplementsGreaterThan(6)
with self.subTest("GE >= LE"), self.assertRaises(TypeError):
ImplementsGreaterThanEqualTo(7) >= ImplementsLessThanEqualTo(7)
with self.subTest("GE when equal"):
a = ComparatorNotImplemented(8)
b = ComparatorNotImplemented(8)
self.assertEqual(a, b)
with self.assertRaises(TypeError):
a >= b
with self.subTest("LE when equal"):
a = ComparatorNotImplemented(9)
b = ComparatorNotImplemented(9)
self.assertEqual(a, b)
with self.assertRaises(TypeError):
a <= b
class TestLRU(unittest.TestCase):
def test_lru(self):
def orig(x, y):
return 3 * x + y
f = functools.lru_cache(maxsize=20)(orig)
hits, misses, maxsize, currsize = f.cache_info()
self.assertEqual(maxsize, 20)
self.assertEqual(currsize, 0)
self.assertEqual(hits, 0)
self.assertEqual(misses, 0)
domain = range(5)
for i in range(1000):
x, y = choice(domain), choice(domain)
actual = f(x, y)
expected = orig(x, y)
self.assertEqual(actual, expected)
hits, misses, maxsize, currsize = f.cache_info()
self.assertTrue(hits > misses)
self.assertEqual(hits + misses, 1000)
self.assertEqual(currsize, 20)
f.cache_clear() # test clearing
hits, misses, maxsize, currsize = f.cache_info()
self.assertEqual(hits, 0)
self.assertEqual(misses, 0)
self.assertEqual(currsize, 0)
f(x, y)
hits, misses, maxsize, currsize = f.cache_info()
self.assertEqual(hits, 0)
self.assertEqual(misses, 1)
self.assertEqual(currsize, 1)
# Test bypassing the cache
self.assertIs(f.__wrapped__, orig)
f.__wrapped__(x, y)
hits, misses, maxsize, currsize = f.cache_info()
self.assertEqual(hits, 0)
self.assertEqual(misses, 1)
self.assertEqual(currsize, 1)
# test size zero (which means "never-cache")
@functools.lru_cache(0)
def f():
nonlocal f_cnt
f_cnt += 1
return 20
self.assertEqual(f.cache_info().maxsize, 0)
f_cnt = 0
for i in range(5):
self.assertEqual(f(), 20)
self.assertEqual(f_cnt, 5)
hits, misses, maxsize, currsize = f.cache_info()
self.assertEqual(hits, 0)
self.assertEqual(misses, 5)
self.assertEqual(currsize, 0)
# test size one
@functools.lru_cache(1)
def f():
nonlocal f_cnt
f_cnt += 1
return 20
self.assertEqual(f.cache_info().maxsize, 1)
f_cnt = 0
for i in range(5):
self.assertEqual(f(), 20)
self.assertEqual(f_cnt, 1)
hits, misses, maxsize, currsize = f.cache_info()
self.assertEqual(hits, 4)
self.assertEqual(misses, 1)
self.assertEqual(currsize, 1)
# test size two
@functools.lru_cache(2)
def f(x):
nonlocal f_cnt
f_cnt += 1
return x*10
self.assertEqual(f.cache_info().maxsize, 2)
f_cnt = 0
for x in 7, 9, 7, 9, 7, 9, 8, 8, 8, 9, 9, 9, 8, 8, 8, 7:
# * * * *
self.assertEqual(f(x), x*10)
self.assertEqual(f_cnt, 4)
hits, misses, maxsize, currsize = f.cache_info()
self.assertEqual(hits, 12)
self.assertEqual(misses, 4)
self.assertEqual(currsize, 2)
def test_lru_with_maxsize_none(self):
@functools.lru_cache(maxsize=None)
def fib(n):
if n < 2:
return n
return fib(n-1) + fib(n-2)
self.assertEqual([fib(n) for n in range(16)],
[0, 1, 1, 2, 3, 5, 8, 13, 21, 34, 55, 89, 144, 233, 377, 610])
self.assertEqual(fib.cache_info(),
functools._CacheInfo(hits=28, misses=16, maxsize=None, currsize=16))
fib.cache_clear()
self.assertEqual(fib.cache_info(),
functools._CacheInfo(hits=0, misses=0, maxsize=None, currsize=0))
def test_lru_with_exceptions(self):
# Verify that user_function exceptions get passed through without
# creating a hard-to-read chained exception.
# http://bugs.python.org/issue13177
for maxsize in (None, 128):
@functools.lru_cache(maxsize)
def func(i):
return 'abc'[i]
self.assertEqual(func(0), 'a')
with self.assertRaises(IndexError) as cm:
func(15)
self.assertIsNone(cm.exception.__context__)
# Verify that the previous exception did not result in a cached entry
with self.assertRaises(IndexError):
func(15)
def test_lru_with_types(self):
for maxsize in (None, 128):
@functools.lru_cache(maxsize=maxsize, typed=True)
def square(x):
return x * x
self.assertEqual(square(3), 9)
self.assertEqual(type(square(3)), type(9))
self.assertEqual(square(3.0), 9.0)
self.assertEqual(type(square(3.0)), type(9.0))
self.assertEqual(square(x=3), 9)
self.assertEqual(type(square(x=3)), type(9))
self.assertEqual(square(x=3.0), 9.0)
self.assertEqual(type(square(x=3.0)), type(9.0))
self.assertEqual(square.cache_info().hits, 4)
self.assertEqual(square.cache_info().misses, 4)
def test_lru_with_keyword_args(self):
@functools.lru_cache()
def fib(n):
if n < 2:
return n
return fib(n=n-1) + fib(n=n-2)
self.assertEqual(
[fib(n=number) for number in range(16)],
[0, 1, 1, 2, 3, 5, 8, 13, 21, 34, 55, 89, 144, 233, 377, 610]
)
self.assertEqual(fib.cache_info(),
functools._CacheInfo(hits=28, misses=16, maxsize=128, currsize=16))
fib.cache_clear()
self.assertEqual(fib.cache_info(),
functools._CacheInfo(hits=0, misses=0, maxsize=128, currsize=0))
def test_lru_with_keyword_args_maxsize_none(self):
@functools.lru_cache(maxsize=None)
def fib(n):
if n < 2:
return n
return fib(n=n-1) + fib(n=n-2)
self.assertEqual([fib(n=number) for number in range(16)],
[0, 1, 1, 2, 3, 5, 8, 13, 21, 34, 55, 89, 144, 233, 377, 610])
self.assertEqual(fib.cache_info(),
functools._CacheInfo(hits=28, misses=16, maxsize=None, currsize=16))
fib.cache_clear()
self.assertEqual(fib.cache_info(),
functools._CacheInfo(hits=0, misses=0, maxsize=None, currsize=0))
def test_need_for_rlock(self):
# This will deadlock on an LRU cache that uses a regular lock
@functools.lru_cache(maxsize=10)
def test_func(x):
'Used to demonstrate a reentrant lru_cache call within a single thread'
return x
class DoubleEq:
'Demonstrate a reentrant lru_cache call within a single thread'
def __init__(self, x):
self.x = x
def __hash__(self):
return self.x
def __eq__(self, other):
if self.x == 2:
test_func(DoubleEq(1))
return self.x == other.x
test_func(DoubleEq(1)) # Load the cache
test_func(DoubleEq(2)) # Load the cache
self.assertEqual(test_func(DoubleEq(2)), # Trigger a re-entrant __eq__ call
DoubleEq(2)) # Verify the correct return value
def test_early_detection_of_bad_call(self):
# Issue #22184
with self.assertRaises(TypeError):
@functools.lru_cache
def f():
pass
class TestSingleDispatch(unittest.TestCase):
def test_simple_overloads(self):
@functools.singledispatch
def g(obj):
return "base"
def g_int(i):
return "integer"
g.register(int, g_int)
self.assertEqual(g("str"), "base")
self.assertEqual(g(1), "integer")
self.assertEqual(g([1,2,3]), "base")
def test_mro(self):
@functools.singledispatch
def g(obj):
return "base"
class A:
pass
class C(A):
pass
class B(A):
pass
class D(C, B):
pass
def g_A(a):
return "A"
def g_B(b):
return "B"
g.register(A, g_A)
g.register(B, g_B)
self.assertEqual(g(A()), "A")
self.assertEqual(g(B()), "B")
self.assertEqual(g(C()), "A")
self.assertEqual(g(D()), "B")
def test_register_decorator(self):
@functools.singledispatch
def g(obj):
return "base"
@g.register(int)
def g_int(i):
return "int %s" % (i,)
self.assertEqual(g(""), "base")
self.assertEqual(g(12), "int 12")
self.assertIs(g.dispatch(int), g_int)
self.assertIs(g.dispatch(object), g.dispatch(str))
# Note: in the assert above this is not g.
# @singledispatch returns the wrapper.
def test_wrapping_attributes(self):
@functools.singledispatch
def g(obj):
"Simple test"
return "Test"
self.assertEqual(g.__name__, "g")
if sys.flags.optimize < 2:
self.assertEqual(g.__doc__, "Simple test")
@unittest.skipUnless(decimal, 'requires _decimal')
@support.cpython_only
def test_c_classes(self):
@functools.singledispatch
def g(obj):
return "base"
@g.register(decimal.DecimalException)
def _(obj):
return obj.args
subn = decimal.Subnormal("Exponent < Emin")
rnd = decimal.Rounded("Number got rounded")
self.assertEqual(g(subn), ("Exponent < Emin",))
self.assertEqual(g(rnd), ("Number got rounded",))
@g.register(decimal.Subnormal)
def _(obj):
return "Too small to care."
self.assertEqual(g(subn), "Too small to care.")
self.assertEqual(g(rnd), ("Number got rounded",))
def test_compose_mro(self):
# None of the examples in this test depend on haystack ordering.
c = collections
mro = functools._compose_mro
bases = [c.Sequence, c.MutableMapping, c.Mapping, c.Set]
for haystack in permutations(bases):
m = mro(dict, haystack)
self.assertEqual(m, [dict, c.MutableMapping, c.Mapping, c.Sized,
c.Iterable, c.Container, object])
bases = [c.Container, c.Mapping, c.MutableMapping, c.OrderedDict]
for haystack in permutations(bases):
m = mro(c.ChainMap, haystack)
self.assertEqual(m, [c.ChainMap, c.MutableMapping, c.Mapping,
c.Sized, c.Iterable, c.Container, object])
# If there's a generic function with implementations registered for
# both Sized and Container, passing a defaultdict to it results in an
# ambiguous dispatch which will cause a RuntimeError (see
# test_mro_conflicts).
bases = [c.Container, c.Sized, str]
for haystack in permutations(bases):
m = mro(c.defaultdict, [c.Sized, c.Container, str])
self.assertEqual(m, [c.defaultdict, dict, c.Sized, c.Container,
object])
# MutableSequence below is registered directly on D. In other words, it
# preceeds MutableMapping which means single dispatch will always
# choose MutableSequence here.
class D(c.defaultdict):
pass
c.MutableSequence.register(D)
bases = [c.MutableSequence, c.MutableMapping]
for haystack in permutations(bases):
m = mro(D, bases)
self.assertEqual(m, [D, c.MutableSequence, c.Sequence,
c.defaultdict, dict, c.MutableMapping,
c.Mapping, c.Sized, c.Iterable, c.Container,
object])
# Container and Callable are registered on different base classes and
# a generic function supporting both should always pick the Callable
# implementation if a C instance is passed.
class C(c.defaultdict):
def __call__(self):
pass
bases = [c.Sized, c.Callable, c.Container, c.Mapping]
for haystack in permutations(bases):
m = mro(C, haystack)
self.assertEqual(m, [C, c.Callable, c.defaultdict, dict, c.Mapping,
c.Sized, c.Iterable, c.Container, object])
def test_register_abc(self):
c = collections
d = {"a": "b"}
l = [1, 2, 3]
s = {object(), None}
f = frozenset(s)
t = (1, 2, 3)
@functools.singledispatch
def g(obj):
return "base"
self.assertEqual(g(d), "base")
self.assertEqual(g(l), "base")
self.assertEqual(g(s), "base")
self.assertEqual(g(f), "base")
self.assertEqual(g(t), "base")
g.register(c.Sized, lambda obj: "sized")
self.assertEqual(g(d), "sized")
self.assertEqual(g(l), "sized")
self.assertEqual(g(s), "sized")
self.assertEqual(g(f), "sized")
self.assertEqual(g(t), "sized")
g.register(c.MutableMapping, lambda obj: "mutablemapping")
self.assertEqual(g(d), "mutablemapping")
self.assertEqual(g(l), "sized")
self.assertEqual(g(s), "sized")
self.assertEqual(g(f), "sized")
self.assertEqual(g(t), "sized")
g.register(c.ChainMap, lambda obj: "chainmap")
self.assertEqual(g(d), "mutablemapping") # irrelevant ABCs registered
self.assertEqual(g(l), "sized")
self.assertEqual(g(s), "sized")
self.assertEqual(g(f), "sized")
self.assertEqual(g(t), "sized")
g.register(c.MutableSequence, lambda obj: "mutablesequence")
self.assertEqual(g(d), "mutablemapping")
self.assertEqual(g(l), "mutablesequence")
self.assertEqual(g(s), "sized")
self.assertEqual(g(f), "sized")
self.assertEqual(g(t), "sized")
g.register(c.MutableSet, lambda obj: "mutableset")
self.assertEqual(g(d), "mutablemapping")
self.assertEqual(g(l), "mutablesequence")
self.assertEqual(g(s), "mutableset")
self.assertEqual(g(f), "sized")
self.assertEqual(g(t), "sized")
g.register(c.Mapping, lambda obj: "mapping")
self.assertEqual(g(d), "mutablemapping") # not specific enough
self.assertEqual(g(l), "mutablesequence")
self.assertEqual(g(s), "mutableset")
self.assertEqual(g(f), "sized")
self.assertEqual(g(t), "sized")
g.register(c.Sequence, lambda obj: "sequence")
self.assertEqual(g(d), "mutablemapping")
self.assertEqual(g(l), "mutablesequence")
self.assertEqual(g(s), "mutableset")
self.assertEqual(g(f), "sized")
self.assertEqual(g(t), "sequence")
g.register(c.Set, lambda obj: "set")
self.assertEqual(g(d), "mutablemapping")
self.assertEqual(g(l), "mutablesequence")
self.assertEqual(g(s), "mutableset")
self.assertEqual(g(f), "set")
self.assertEqual(g(t), "sequence")
g.register(dict, lambda obj: "dict")
self.assertEqual(g(d), "dict")
self.assertEqual(g(l), "mutablesequence")
self.assertEqual(g(s), "mutableset")
self.assertEqual(g(f), "set")
self.assertEqual(g(t), "sequence")
g.register(list, lambda obj: "list")
self.assertEqual(g(d), "dict")
self.assertEqual(g(l), "list")
self.assertEqual(g(s), "mutableset")
self.assertEqual(g(f), "set")
self.assertEqual(g(t), "sequence")
g.register(set, lambda obj: "concrete-set")
self.assertEqual(g(d), "dict")
self.assertEqual(g(l), "list")
self.assertEqual(g(s), "concrete-set")
self.assertEqual(g(f), "set")
self.assertEqual(g(t), "sequence")
g.register(frozenset, lambda obj: "frozen-set")
self.assertEqual(g(d), "dict")
self.assertEqual(g(l), "list")
self.assertEqual(g(s), "concrete-set")
self.assertEqual(g(f), "frozen-set")
self.assertEqual(g(t), "sequence")
g.register(tuple, lambda obj: "tuple")
self.assertEqual(g(d), "dict")
self.assertEqual(g(l), "list")
self.assertEqual(g(s), "concrete-set")
self.assertEqual(g(f), "frozen-set")
self.assertEqual(g(t), "tuple")
def test_c3_abc(self):
c = collections
mro = functools._c3_mro
class A(object):
pass
class B(A):
def __len__(self):
return 0 # implies Sized
@c.Container.register
class C(object):
pass
class D(object):
pass # unrelated
class X(D, C, B):
def __call__(self):
pass # implies Callable
expected = [X, c.Callable, D, C, c.Container, B, c.Sized, A, object]
for abcs in permutations([c.Sized, c.Callable, c.Container]):
self.assertEqual(mro(X, abcs=abcs), expected)
# unrelated ABCs don't appear in the resulting MRO
many_abcs = [c.Mapping, c.Sized, c.Callable, c.Container, c.Iterable]
self.assertEqual(mro(X, abcs=many_abcs), expected)
def test_mro_conflicts(self):
c = collections
@functools.singledispatch
def g(arg):
return "base"
class O(c.Sized):
def __len__(self):
return 0
o = O()
self.assertEqual(g(o), "base")
g.register(c.Iterable, lambda arg: "iterable")
g.register(c.Container, lambda arg: "container")
g.register(c.Sized, lambda arg: "sized")
g.register(c.Set, lambda arg: "set")
self.assertEqual(g(o), "sized")
c.Iterable.register(O)
self.assertEqual(g(o), "sized") # because it's explicitly in __mro__
c.Container.register(O)
self.assertEqual(g(o), "sized") # see above: Sized is in __mro__
c.Set.register(O)
self.assertEqual(g(o), "set") # because c.Set is a subclass of
# c.Sized and c.Container
class P:
pass
p = P()
self.assertEqual(g(p), "base")
c.Iterable.register(P)
self.assertEqual(g(p), "iterable")
c.Container.register(P)
with self.assertRaises(RuntimeError) as re_one:
g(p)
self.assertIn(
str(re_one.exception),
(("Ambiguous dispatch: <class 'collections.abc.Container'> "
"or <class 'collections.abc.Iterable'>"),
("Ambiguous dispatch: <class 'collections.abc.Iterable'> "
"or <class 'collections.abc.Container'>")),
)
class Q(c.Sized):
def __len__(self):
return 0
q = Q()
self.assertEqual(g(q), "sized")
c.Iterable.register(Q)
self.assertEqual(g(q), "sized") # because it's explicitly in __mro__
c.Set.register(Q)
self.assertEqual(g(q), "set") # because c.Set is a subclass of
# c.Sized and c.Iterable
@functools.singledispatch
def h(arg):
return "base"
@h.register(c.Sized)
def _(arg):
return "sized"
@h.register(c.Container)
def _(arg):
return "container"
# Even though Sized and Container are explicit bases of MutableMapping,
# this ABC is implicitly registered on defaultdict which makes all of
# MutableMapping's bases implicit as well from defaultdict's
# perspective.
with self.assertRaises(RuntimeError) as re_two:
h(c.defaultdict(lambda: 0))
self.assertIn(
str(re_two.exception),
(("Ambiguous dispatch: <class 'collections.abc.Container'> "
"or <class 'collections.abc.Sized'>"),
("Ambiguous dispatch: <class 'collections.abc.Sized'> "
"or <class 'collections.abc.Container'>")),
)
class R(c.defaultdict):
pass
c.MutableSequence.register(R)
@functools.singledispatch
def i(arg):
return "base"
@i.register(c.MutableMapping)
def _(arg):
return "mapping"
@i.register(c.MutableSequence)
def _(arg):
return "sequence"
r = R()
self.assertEqual(i(r), "sequence")
class S:
pass
class T(S, c.Sized):
def __len__(self):
return 0
t = T()
self.assertEqual(h(t), "sized")
c.Container.register(T)
self.assertEqual(h(t), "sized") # because it's explicitly in the MRO
class U:
def __len__(self):
return 0
u = U()
self.assertEqual(h(u), "sized") # implicit Sized subclass inferred
# from the existence of __len__()
c.Container.register(U)
# There is no preference for registered versus inferred ABCs.
with self.assertRaises(RuntimeError) as re_three:
h(u)
self.assertIn(
str(re_three.exception),
(("Ambiguous dispatch: <class 'collections.abc.Container'> "
"or <class 'collections.abc.Sized'>"),
("Ambiguous dispatch: <class 'collections.abc.Sized'> "
"or <class 'collections.abc.Container'>")),
)
class V(c.Sized, S):
def __len__(self):
return 0
@functools.singledispatch
def j(arg):
return "base"
@j.register(S)
def _(arg):
return "s"
@j.register(c.Container)
def _(arg):
return "container"
v = V()
self.assertEqual(j(v), "s")
c.Container.register(V)
self.assertEqual(j(v), "container") # because it ends up right after
# Sized in the MRO
def test_cache_invalidation(self):
from collections import UserDict
class TracingDict(UserDict):
def __init__(self, *args, **kwargs):
super(TracingDict, self).__init__(*args, **kwargs)
self.set_ops = []
self.get_ops = []
def __getitem__(self, key):
result = self.data[key]
self.get_ops.append(key)
return result
def __setitem__(self, key, value):
self.set_ops.append(key)
self.data[key] = value
def clear(self):
self.data.clear()
_orig_wkd = functools.WeakKeyDictionary
td = TracingDict()
functools.WeakKeyDictionary = lambda: td
c = collections
@functools.singledispatch
def g(arg):
return "base"
d = {}
l = []
self.assertEqual(len(td), 0)
self.assertEqual(g(d), "base")
self.assertEqual(len(td), 1)
self.assertEqual(td.get_ops, [])
self.assertEqual(td.set_ops, [dict])
self.assertEqual(td.data[dict], g.registry[object])
self.assertEqual(g(l), "base")
self.assertEqual(len(td), 2)
self.assertEqual(td.get_ops, [])
self.assertEqual(td.set_ops, [dict, list])
self.assertEqual(td.data[dict], g.registry[object])
self.assertEqual(td.data[list], g.registry[object])
self.assertEqual(td.data[dict], td.data[list])
self.assertEqual(g(l), "base")
self.assertEqual(g(d), "base")
self.assertEqual(td.get_ops, [list, dict])
self.assertEqual(td.set_ops, [dict, list])
g.register(list, lambda arg: "list")
self.assertEqual(td.get_ops, [list, dict])
self.assertEqual(len(td), 0)
self.assertEqual(g(d), "base")
self.assertEqual(len(td), 1)
self.assertEqual(td.get_ops, [list, dict])
self.assertEqual(td.set_ops, [dict, list, dict])
self.assertEqual(td.data[dict],
functools._find_impl(dict, g.registry))
self.assertEqual(g(l), "list")
self.assertEqual(len(td), 2)
self.assertEqual(td.get_ops, [list, dict])
self.assertEqual(td.set_ops, [dict, list, dict, list])
self.assertEqual(td.data[list],
functools._find_impl(list, g.registry))
class X:
pass
c.MutableMapping.register(X) # Will not invalidate the cache,
# not using ABCs yet.
self.assertEqual(g(d), "base")
self.assertEqual(g(l), "list")
self.assertEqual(td.get_ops, [list, dict, dict, list])
self.assertEqual(td.set_ops, [dict, list, dict, list])
g.register(c.Sized, lambda arg: "sized")
self.assertEqual(len(td), 0)
self.assertEqual(g(d), "sized")
self.assertEqual(len(td), 1)
self.assertEqual(td.get_ops, [list, dict, dict, list])
self.assertEqual(td.set_ops, [dict, list, dict, list, dict])
self.assertEqual(g(l), "list")
self.assertEqual(len(td), 2)
self.assertEqual(td.get_ops, [list, dict, dict, list])
self.assertEqual(td.set_ops, [dict, list, dict, list, dict, list])
self.assertEqual(g(l), "list")
self.assertEqual(g(d), "sized")
self.assertEqual(td.get_ops, [list, dict, dict, list, list, dict])
self.assertEqual(td.set_ops, [dict, list, dict, list, dict, list])
g.dispatch(list)
g.dispatch(dict)
self.assertEqual(td.get_ops, [list, dict, dict, list, list, dict,
list, dict])
self.assertEqual(td.set_ops, [dict, list, dict, list, dict, list])
c.MutableSet.register(X) # Will invalidate the cache.
self.assertEqual(len(td), 2) # Stale cache.
self.assertEqual(g(l), "list")
self.assertEqual(len(td), 1)
g.register(c.MutableMapping, lambda arg: "mutablemapping")
self.assertEqual(len(td), 0)
self.assertEqual(g(d), "mutablemapping")
self.assertEqual(len(td), 1)
self.assertEqual(g(l), "list")
self.assertEqual(len(td), 2)
g.register(dict, lambda arg: "dict")
self.assertEqual(g(d), "dict")
self.assertEqual(g(l), "list")
g._clear_cache()
self.assertEqual(len(td), 0)
functools.WeakKeyDictionary = _orig_wkd
def test_main(verbose=None):
test_classes = (
TestPartialC,
TestPartialPy,
TestPartialCSubclass,
TestPartialMethod,
TestUpdateWrapper,
TestTotalOrdering,
TestCmpToKeyC,
TestCmpToKeyPy,
TestWraps,
TestReduce,
TestLRU,
TestSingleDispatch,
)
support.run_unittest(*test_classes)
# verify reference counting
if verbose and hasattr(sys, "gettotalrefcount"):
import gc
counts = [None] * 5
for i in range(len(counts)):
support.run_unittest(*test_classes)
gc.collect()
counts[i] = sys.gettotalrefcount()
print(counts)
if __name__ == '__main__':
test_main(verbose=True)
| lgpl-3.0 |
cemarchi/biosphere | Src/BioDataManagement/DataAccess/Repositories/GeneAnnotionRepository.py | 1 | 1780 | from typing import List, Dict
from Src.BioDataManagement.CrossCutting.Contracts.GeneAnnotationRepositoryBase import GeneAnnotationRepositoryBase
from Src.BioDataManagement.CrossCutting.DTOs.GeneAnnotationDto import GeneAnnotationDto
from Src.BioDataManagement.CrossCutting.Filters.FeListGeneAnnotation import FeListGeneAnnotation
from Src.BioDataManagement.DataAccess.Entities.GeneAnnotation import GeneAnnotation
from Src.BioDataManagement.DataAccess.Mappers.Mapper import Mapper
from Src.Core.Data.MongoRepositoryActions import MongoRepositoryActions
class GeneAnnotationRepository(GeneAnnotationRepositoryBase):
"""description of class"""
def __init__(self, db):
"""
:param db:
"""
super().__init__(db)
self.__mongo_actions = MongoRepositoryActions(self._collection, Mapper.get_instance())
def add_many(self, genes: List[GeneAnnotationDto]):
"""
:param genes:
"""
self.__mongo_actions.add_many(genes, GeneAnnotation)
def get_many(self, fe_list_gene: FeListGeneAnnotation, dto_class = None,
include_or_exclude_fields: Dict[str, int] = None) -> FeListGeneAnnotation:
"""
:param fe_list_gene:
:param include_or_exclude_fields:
:return:
"""
query = {}
if fe_list_gene.id_entrez_list:
query = {'id_entrez': {'$in': fe_list_gene.id_entrez_list}}
#elif fe_list_gene.symbol_list:
#query = {"$or": [{'symbol': {'$in': fe_list_gene.symbol_list}},
#{'synonyms_genes': {'$in': fe_list_gene.symbol_list}}]}
return self.__mongo_actions.get_many(query, fe_list_gene, GeneAnnotation, dto_class, include_or_exclude_fields)
| bsd-3-clause |
kobotoolbox/kpi | kpi/views/v2/user.py | 1 | 3070 | # coding: utf-8
from django.contrib.auth.models import User
from rest_framework import exceptions, mixins, renderers, status, viewsets
from rest_framework.decorators import action
from rest_framework.response import Response
from rest_framework.reverse import reverse
from kpi.tasks import sync_kobocat_xforms
from kpi.models.authorized_application import ApplicationTokenAuthentication
from kpi.serializers.v2.user import UserSerializer
class UserViewSet(viewsets.GenericViewSet, mixins.RetrieveModelMixin):
"""
This viewset provides only the `detail` action; `list` is *not* provided to
avoid disclosing every username in the database
"""
queryset = User.objects.all()
serializer_class = UserSerializer
lookup_field = 'username'
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.authentication_classes += [ApplicationTokenAuthentication]
def list(self, request, *args, **kwargs):
raise exceptions.PermissionDenied()
@action(detail=True, methods=['GET'],
renderer_classes=[renderers.JSONRenderer],
url_path=r'migrate(?:/(?P<task_id>[\d\w\-]+))?')
def migrate(self, request, task_id: str = None, **kwargs):
"""
A temporary endpoint that allows superusers to migrate other users'
projects, and users to migrate their own projects, from Kobocat to KPI.
This is required while users transition from the legacy interface to
the new.
1. Call this endpoint with `?username=<username>`
2. Fetch url provided to check the state of the Celery task.
It can be:
- 'PENDING'
- 'FAILED'
- 'SUCCESS'
Notes: Be aware that the Celery `res.state` isn't too reliable, it
returns 'PENDING' if task does not exist.
"""
request_user = request.user
migrate_user = kwargs.get('username')
if request_user.is_anonymous or (
not request_user.is_superuser
and request_user.username != migrate_user
):
raise exceptions.PermissionDenied()
if task_id:
from celery.result import AsyncResult
res = AsyncResult(task_id)
if res:
return Response({'status': res.state})
else:
return Response(
{'detail': 'Unknown task_id'},
status=status.HTTP_400_BAD_REQUEST,
)
username = kwargs['username']
task = sync_kobocat_xforms.delay(
username=username,
quiet=True,
populate_xform_kpi_asset_uid=True,
sync_kobocat_form_media=True
)
return Response(
{
'celery_task': reverse(
'user-migrate',
kwargs={
'username': username,
'task_id': task.task_id
},
request=request
)
}
)
| agpl-3.0 |
sophieottaway/EA-New | node_modules/grunt-sass/node_modules/node-sass/node_modules/node-gyp/gyp/gyptest.py | 1752 | 8019 | #!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
__doc__ = """
gyptest.py -- test runner for GYP tests.
"""
import os
import optparse
import subprocess
import sys
class CommandRunner(object):
"""
Executor class for commands, including "commands" implemented by
Python functions.
"""
verbose = True
active = True
def __init__(self, dictionary={}):
self.subst_dictionary(dictionary)
def subst_dictionary(self, dictionary):
self._subst_dictionary = dictionary
def subst(self, string, dictionary=None):
"""
Substitutes (via the format operator) the values in the specified
dictionary into the specified command.
The command can be an (action, string) tuple. In all cases, we
perform substitution on strings and don't worry if something isn't
a string. (It's probably a Python function to be executed.)
"""
if dictionary is None:
dictionary = self._subst_dictionary
if dictionary:
try:
string = string % dictionary
except TypeError:
pass
return string
def display(self, command, stdout=None, stderr=None):
if not self.verbose:
return
if type(command) == type(()):
func = command[0]
args = command[1:]
s = '%s(%s)' % (func.__name__, ', '.join(map(repr, args)))
if type(command) == type([]):
# TODO: quote arguments containing spaces
# TODO: handle meta characters?
s = ' '.join(command)
else:
s = self.subst(command)
if not s.endswith('\n'):
s += '\n'
sys.stdout.write(s)
sys.stdout.flush()
def execute(self, command, stdout=None, stderr=None):
"""
Executes a single command.
"""
if not self.active:
return 0
if type(command) == type(''):
command = self.subst(command)
cmdargs = shlex.split(command)
if cmdargs[0] == 'cd':
command = (os.chdir,) + tuple(cmdargs[1:])
if type(command) == type(()):
func = command[0]
args = command[1:]
return func(*args)
else:
if stdout is sys.stdout:
# Same as passing sys.stdout, except python2.4 doesn't fail on it.
subout = None
else:
# Open pipe for anything else so Popen works on python2.4.
subout = subprocess.PIPE
if stderr is sys.stderr:
# Same as passing sys.stderr, except python2.4 doesn't fail on it.
suberr = None
elif stderr is None:
# Merge with stdout if stderr isn't specified.
suberr = subprocess.STDOUT
else:
# Open pipe for anything else so Popen works on python2.4.
suberr = subprocess.PIPE
p = subprocess.Popen(command,
shell=(sys.platform == 'win32'),
stdout=subout,
stderr=suberr)
p.wait()
if stdout is None:
self.stdout = p.stdout.read()
elif stdout is not sys.stdout:
stdout.write(p.stdout.read())
if stderr not in (None, sys.stderr):
stderr.write(p.stderr.read())
return p.returncode
def run(self, command, display=None, stdout=None, stderr=None):
"""
Runs a single command, displaying it first.
"""
if display is None:
display = command
self.display(display)
return self.execute(command, stdout, stderr)
class Unbuffered(object):
def __init__(self, fp):
self.fp = fp
def write(self, arg):
self.fp.write(arg)
self.fp.flush()
def __getattr__(self, attr):
return getattr(self.fp, attr)
sys.stdout = Unbuffered(sys.stdout)
sys.stderr = Unbuffered(sys.stderr)
def is_test_name(f):
return f.startswith('gyptest') and f.endswith('.py')
def find_all_gyptest_files(directory):
result = []
for root, dirs, files in os.walk(directory):
if '.svn' in dirs:
dirs.remove('.svn')
result.extend([ os.path.join(root, f) for f in files if is_test_name(f) ])
result.sort()
return result
def main(argv=None):
if argv is None:
argv = sys.argv
usage = "gyptest.py [-ahlnq] [-f formats] [test ...]"
parser = optparse.OptionParser(usage=usage)
parser.add_option("-a", "--all", action="store_true",
help="run all tests")
parser.add_option("-C", "--chdir", action="store", default=None,
help="chdir to the specified directory")
parser.add_option("-f", "--format", action="store", default='',
help="run tests with the specified formats")
parser.add_option("-G", '--gyp_option', action="append", default=[],
help="Add -G options to the gyp command line")
parser.add_option("-l", "--list", action="store_true",
help="list available tests and exit")
parser.add_option("-n", "--no-exec", action="store_true",
help="no execute, just print the command line")
parser.add_option("--passed", action="store_true",
help="report passed tests")
parser.add_option("--path", action="append", default=[],
help="additional $PATH directory")
parser.add_option("-q", "--quiet", action="store_true",
help="quiet, don't print test command lines")
opts, args = parser.parse_args(argv[1:])
if opts.chdir:
os.chdir(opts.chdir)
if opts.path:
extra_path = [os.path.abspath(p) for p in opts.path]
extra_path = os.pathsep.join(extra_path)
os.environ['PATH'] = extra_path + os.pathsep + os.environ['PATH']
if not args:
if not opts.all:
sys.stderr.write('Specify -a to get all tests.\n')
return 1
args = ['test']
tests = []
for arg in args:
if os.path.isdir(arg):
tests.extend(find_all_gyptest_files(os.path.normpath(arg)))
else:
if not is_test_name(os.path.basename(arg)):
print >>sys.stderr, arg, 'is not a valid gyp test name.'
sys.exit(1)
tests.append(arg)
if opts.list:
for test in tests:
print test
sys.exit(0)
CommandRunner.verbose = not opts.quiet
CommandRunner.active = not opts.no_exec
cr = CommandRunner()
os.environ['PYTHONPATH'] = os.path.abspath('test/lib')
if not opts.quiet:
sys.stdout.write('PYTHONPATH=%s\n' % os.environ['PYTHONPATH'])
passed = []
failed = []
no_result = []
if opts.format:
format_list = opts.format.split(',')
else:
# TODO: not duplicate this mapping from pylib/gyp/__init__.py
format_list = {
'aix5': ['make'],
'freebsd7': ['make'],
'freebsd8': ['make'],
'openbsd5': ['make'],
'cygwin': ['msvs'],
'win32': ['msvs', 'ninja'],
'linux2': ['make', 'ninja'],
'linux3': ['make', 'ninja'],
'darwin': ['make', 'ninja', 'xcode', 'xcode-ninja'],
}[sys.platform]
for format in format_list:
os.environ['TESTGYP_FORMAT'] = format
if not opts.quiet:
sys.stdout.write('TESTGYP_FORMAT=%s\n' % format)
gyp_options = []
for option in opts.gyp_option:
gyp_options += ['-G', option]
if gyp_options and not opts.quiet:
sys.stdout.write('Extra Gyp options: %s\n' % gyp_options)
for test in tests:
status = cr.run([sys.executable, test] + gyp_options,
stdout=sys.stdout,
stderr=sys.stderr)
if status == 2:
no_result.append(test)
elif status:
failed.append(test)
else:
passed.append(test)
if not opts.quiet:
def report(description, tests):
if tests:
if len(tests) == 1:
sys.stdout.write("\n%s the following test:\n" % description)
else:
fmt = "\n%s the following %d tests:\n"
sys.stdout.write(fmt % (description, len(tests)))
sys.stdout.write("\t" + "\n\t".join(tests) + "\n")
if opts.passed:
report("Passed", passed)
report("Failed", failed)
report("No result from", no_result)
if failed:
return 1
else:
return 0
if __name__ == "__main__":
sys.exit(main())
| mit |
simonmonk/pi_magazine | 03_alarm_clock/Adafruit_7Segment.py | 9 | 1693 | #!/usr/bin/python
import time
import datetime
from Adafruit_LEDBackpack import LEDBackpack
# ===========================================================================
# 7-Segment Display
# ===========================================================================
# This class is meant to be used with the four-character, seven segment
# displays available from Adafruit
class SevenSegment:
disp = None
# Hexadecimal character lookup table (row 1 = 0..9, row 2 = A..F)
digits = [ 0x3F, 0x06, 0x5B, 0x4F, 0x66, 0x6D, 0x7D, 0x07, 0x7F, 0x6F, \
0x77, 0x7C, 0x39, 0x5E, 0x79, 0x71 ]
# Constructor
def __init__(self, address=0x70, debug=False):
if (debug):
print "Initializing a new instance of LEDBackpack at 0x%02X" % address
self.disp = LEDBackpack(address=address, debug=debug)
def writeDigitRaw(self, charNumber, value):
"Sets a digit using the raw 16-bit value"
if (charNumber > 7):
return
# Set the appropriate digit
self.disp.setBufferRow(charNumber, value)
def writeDigit(self, charNumber, value, dot=False):
"Sets a single decimal or hexademical value (0..9 and A..F)"
if (charNumber > 7):
return
if (value > 0xF):
return
# Set the appropriate digit
self.disp.setBufferRow(charNumber, self.digits[value] | (dot << 7))
def setColon(self, state=True):
"Enables or disables the colon character"
# Warning: This function assumes that the colon is character '2',
# which is the case on 4 char displays, but may need to be modified
# if another display type is used
if (state):
self.disp.setBufferRow(2, 0xFFFF)
else:
self.disp.setBufferRow(2, 0)
| mit |
qsnake/gpaw | doc/install/BGP/bgp_xlc_linker.py | 1 | 1654 | #!/usr/bin/env python
"""bgp_xlc.py is a wrapper for the BGP xlc compiler,
converting/removing incompatible gcc args. """
import sys
from subprocess import call
from glob import glob
args2change = {"-fno-strict-aliasing":"",
"-fmessage-length=0":"",
"-Wall":"",
"-std=c99":"-qlanglvl=extc99",
"-fPIC":"",
"-g":"",
"-D_FORTIFY_SOURCE=2":"",
"-DNDEBUG":"",
"-UNDEBUG":"",
"-pthread":"",
"-shared":"-qmkshrobj",
"-Xlinker":"",
"-export-dynamic":"",
"-Wstrict-prototypes":"",
"-dynamic":"",
"-O3":"",
"-O2":"",
"-O1":"",
"-fwrapv":""}
fragile_files = ["test.c"]
qhot_files = ["c/blas.c", "c/utilities.c","c/lfc.c","c/localized_functions.c"]
non_c99files = glob('c/libxc/src/*.c')
cmd = ""
opt = 1
for arg in sys.argv[1:]:
cmd += " "
t = arg.strip()
if t in fragile_files:
opt = 2
if t in non_c99files:
opt = 3
if t in qhot_files:
opt = 4
if t in args2change:
cmd += args2change[t]
else:
cmd += arg
flags_list = {1: "-g -O3 -qlanglvl=extc99 -qflag=w:w -qpic",
2: "-g -O3 -qstrict -qlanglvl=extc99 -qflag=w:w -qpic",
3: "-g -O3 -qflag=w:w -qpic",
4: "-g -O3 -qhot -qlanglvl=extc99 -qflag=w:w -qpic",
}
flags = flags_list[opt]
cmd = "/soft/apps/ibmcmp-jan2010/vac/bg/9.0/bin/bgxlc_r %s %s"%(flags, cmd)
print "\nexecmd: %s\n"%cmd
call(cmd, shell=True)
| gpl-3.0 |
rebase-helper/rebase-helper | rebasehelper/completion.py | 1 | 3433 | # -*- coding: utf-8 -*-
#
# This tool helps you rebase your package to the latest version
# Copyright (C) 2013-2019 Red Hat, Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Authors: Petr Hráček <[email protected]>
# Tomáš Hozza <[email protected]>
# Nikola Forró <[email protected]>
# František Nečas <[email protected]>
import re
import sys
from rebasehelper.cli import CLI
from rebasehelper.archive import Archive
class Completion:
@staticmethod
def extensions():
archives = Archive.get_supported_archives()
return [a.lstrip('.') for a in archives]
@staticmethod
def options():
def get_delimiter(parser, action):
if action.nargs == 0:
return None
fmt = parser._get_formatter() # pylint: disable=protected-access
usage = fmt._format_actions_usage([action], []) # pylint: disable=protected-access
option_string = action.option_strings[0]
idx = usage.find(option_string)
if idx == -1:
return None
return usage[idx + len(option_string)]
parser = CLI.build_parser()
result = []
actions = parser._get_optional_actions() + parser._get_positional_actions() # pylint: disable=protected-access
for action in actions:
if not action.option_strings:
continue
delimiter = get_delimiter(parser, action) or ''
result.append(dict(
options=[o + delimiter.strip() for o in action.option_strings],
choices=action.choices or []))
return result
@classmethod
def dump(cls):
options = cls.options()
return {
# pattern list of extensions
'RH_EXTENSIONS': '@({})'.format('|'.join(cls.extensions())),
# array of options
'RH_OPTIONS': '({})'.format(' '.join('"{}"'.format(' '.join(o['options'])) for o in options)),
# array of choices of respective options
'RH_CHOICES': '({})'.format(' '.join('"{}"'.format(' '.join(o['choices'])) for o in options)),
}
def replace_placeholders(s, **kwargs):
placeholder_re = re.compile(r'@(\w+)@')
matches = list(placeholder_re.finditer(s))
result = s
for match in reversed(matches):
replacement = kwargs.get(match.group(1), '')
result = result[:match.start(0)] + replacement + result[match.end(0):]
return result
def main():
if len(sys.argv) != 3:
return 1
with open(sys.argv[1]) as f:
s = f.read()
s = replace_placeholders(s, **Completion.dump())
with open(sys.argv[2], 'w') as f:
f.write(s)
return 0
if __name__ == '__main__':
main()
| gpl-2.0 |
haku86/icecream | docs/conf.py | 1 | 7738 | # -*- coding: utf-8 -*-
#
# icecream documentation build configuration file, created by
# sphinx-quickstart on Sun Feb 17 11:46:20 2013.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = []
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'icecream'
copyright = u'2013, ChangeMyName'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.1'
# The full version, including alpha/beta/rc tags.
release = '0.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'icecreamdoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'icecream.tex', u'icecream Documentation',
u'ChangeToMyName', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'icecream', u'icecream Documentation',
[u'ChangeToMyName'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'icecream', u'icecream Documentation',
u'ChangeToMyName', 'icecream', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
| mit |
warriorframework/warriorframework | warrior/Framework/ClassUtils/WSelenium/element_operations.py | 1 | 17685 | '''
Copyright 2017, Fujitsu Network Communications, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
""" Selenium element operations library """
from time import sleep
from selenium.webdriver import ActionChains
from selenium.webdriver.common.keys import Keys
from Framework.Utils.print_Utils import print_error, print_info, print_exception
from Framework.Utils.data_Utils import get_object_from_datarepository
from Framework.ClassUtils.WSelenium.element_locator import ElementLocator
try:
from selenium.webdriver.remote.webelement import WebElement
from selenium.common.exceptions import NoSuchElementException
from selenium.common.exceptions import StaleElementReferenceException
except ImportError as exception:
print_exception(exception)
EL = ElementLocator()
ACTIONS = {'click': '_click_element',
'double_click': '_double_click_element',
'send_keys': '_send_keys',
'type': '_type_keys',
'fill': '_type_keys',
'clear_text': '_clear_text',
'get_text': '_get_text',
'clear': '_clear_text',
'drag_and_drop': '_drag_and_drop',
'mouse_over':'_mouse_over',
'drag_and_drop_by_offset':'_drag_and_drop_by_offset',
'get_property': '_get_property',
'check_property': '_check_property',
'perform_keypress': '_perform_keypress'
}
KEYS = {'ADD': Keys.ADD, 'ALT': Keys.ALT, 'ARROW_DOWN': Keys.ARROW_DOWN,
'ARROW_LEFT': Keys.ARROW_LEFT, 'ARROW_RIGHT': Keys.ARROW_RIGHT, 'ARROW_UP': Keys.ARROW_UP,
'BACKSPACE': Keys.BACK_SPACE, 'CANCEL': Keys.CANCEL, 'CLEAR': Keys.CLEAR,
'COMMAND': Keys.COMMAND, 'CONTROL': Keys.CONTROL, 'DECIMAL': Keys.DECIMAL,
'DELETE': Keys.DELETE, 'DIVIDE': Keys.DIVIDE, 'DOWN': Keys.DOWN, 'END': Keys.END,
'ENTER': Keys.RETURN, 'EQUALS': Keys.EQUALS, 'ESCAPE': Keys.ESCAPE, 'F1': Keys.F1,
'F10': Keys.F10, 'F11': Keys.F11, 'F12': Keys.F12, 'F2': Keys.F2, 'F3': Keys.F3, 'F4': Keys.F4,
'F5': Keys.F5, 'F6': Keys.F6, 'F7': Keys.F7, 'F8': Keys.F8, 'F9': Keys.F9,
'HELP': Keys.HELP, 'HOME': Keys.HOME, 'INSERT': Keys.INSERT, 'LEFT': Keys.LEFT,
'LEFT_ALT': Keys.LEFT_ALT, 'LEFT_CONTROL': Keys.LEFT_CONTROL, 'LEFT_SHIFT': Keys.LEFT_SHIFT,
'META': Keys.META, 'MULTIPLY': Keys.MULTIPLY, 'NULL': Keys.NULL, 'NUMPAD0': Keys.NUMPAD0,
'NUMPAD1': Keys.NUMPAD1, 'NUMPAD2': Keys.NUMPAD2, 'NUMPAD3': Keys.NUMPAD3,
'NUMPAD4': Keys.NUMPAD4, 'NUMPAD5': Keys.NUMPAD5, 'NUMPAD6': Keys.NUMPAD6,
'NUMPAD7': Keys.NUMPAD7, 'NUMPAD8': Keys.NUMPAD8, 'NUMPAD9': Keys.NUMPAD9,
'PAGE_DOWN': Keys.PAGE_DOWN, 'PAGE_UP': Keys.PAGE_UP, 'PAUSE': Keys.PAUSE,
'RETURN': Keys.RETURN, 'RIGHT': Keys.RIGHT, 'SEMICOLON': Keys.SEMICOLON, 'SEPARATOR': Keys.SEPARATOR,
'SHIFT': Keys.SHIFT, 'SPACE': Keys.SPACE, 'SUBTRACT': Keys.SUBTRACT, 'TAB': Keys.TAB,
'UP': Keys.UP
}
class ElementOperations():
""" Element operations """
def __init__(self, *args, **kwargs):
""" constructor """
pass
def perform_element_action(self, element_or_browser, locator=None,
action=None, **kwargs):
"""Generic method to perform specific actions on an element
:Currently supported actions and the values that they take
if the user provided action is "get_text", it would return the
value of the particular element and the status of it. If not it would
return only the status"""
browser = kwargs.get('browser')
status = True
if action != "perform_keypress":
element = self._get_element(element_or_browser, locator)
else:
element = element_or_browser
if element:
action_function = self._get_action_function(action.lower())
if not action_function:
print_error((action + " is not a supported "
"a supported value."))
else:
count = 0
while (count <= 3):
try:
if action == "get_text":
status, value = action_function(element, **kwargs)
if status is True:
return status, value
else:
status, count = self.wait_time(count, browser,
locator,
action)
if status is True:
return status
else:
status = action_function(element, **kwargs)
if status is True:
return status
else:
status, count = self.wait_time(count, browser,
locator,
action)
if status is True:
return status
except StaleElementReferenceException:
status = False
try:
if action == "get_text":
count = count + 1
print_info("waiting for 3 seconds "
"before retrying")
sleep(3)
status, value = action_function(element,
**kwargs)
if status is True:
return status, value
else:
status, count = self.wait_time(count, browser,
locator,
action)
if status is True:
return status
except StaleElementReferenceException:
status = False
except Exception as exception:
status = False
print_exception(exception)
return status
else:
print_error("StaleElementReferenceException occured."
"Tried three times to locate the element")
status = False
else:
status = False
print_error("Provide a valid WebElement to perform "
"a {0} operation got {1}".format(action, element))
return status
def wait_time(self, count, browser, locator, action):
""" wait time to find the element again """
count = count + 1
print_info("waiting for 3 seconds before retrying")
sleep(3)
status = self._stale_element_exception(browser, locator, action)
return status, count
def get_page_source(self, browser):
'''
Get page source of the browser
'''
return browser.getPageSource()
def verify_text(self, **kwargs):
"""stores the text from element in data repository with var variable
and verifies if it is same as expected if expected is provided
:Arguments:
1. var = variable in which to store the text
2. expected = value to compare with as a list separated by comma
"""
status = True
value = get_object_from_datarepository(kwargs.get('var'))
expected = kwargs.get('expected').split(',')
if value not in expected:
print_error("element text expected to be <<{}>> "
"but found to be <<{}>>".format(', '.join(expected),
value))
status = False
return status
# Private methods
def _stale_element_exception(self, browser, locator, action, **kwargs):
element = EL.get_element(browser, locator)
if element is not None:
action_function = self._get_action_function(action.lower())
status = action_function(element, **kwargs)
return status
def _get_element(self, element_or_browser, locator):
"""Get the element based on the provided input"""
value = None
if isinstance(element_or_browser, WebElement):
value = element_or_browser
else:
value = EL.get_element(element_or_browser, locator)
return value
def _mouse_over(self, element, **kwargs ):
"""Moving the mouse to the middle of an element """
status = False
print_info("mouse over operation")
browser_instance = kwargs.get('browser')
if element is not None:
ActionChains(browser_instance).move_to_element(element).perform()
status = True
return status
def _get_action_function(self, action):
"""Gets the function call corresponding to the
action to be performed"""
action_function = ACTIONS.get(action.lower().replace(' ', ''), None)
return getattr(self, action_function) if action_function else None
def _click_element(self, element, **kwargs):
""" Clicks on the provided element
:Arguments:
1. element = a valid WebElement
"""
status = True
print_info("Click on element")
try:
if element is not None:
element.click()
except Exception as e:
print_error("An Exception Occurred {}".format(e))
status = False
return status
def _double_click_element(self, element, **kwargs):
""" Double clicks on the provided element
:Arguments:
1. element = a valid WebElement
"""
status = True
print_info("Double click on element")
try:
browser_instance = kwargs.get('browser')
ActionChains(browser_instance).double_click(element)
except Exception as e:
print_error("An Exception Occurred {}".format(e))
status = False
return status
def _type_keys(self, element, **kwargs):
"""Send values to a particular element,
simulates typing into a element
:Arguments:
1. element = a valid WebElement
2. value = a string that has to be typed into the element.
"""
status = True
value = kwargs.get('value', '')
print_info("Sending '{0}' to element".format(value))
try:
element.send_keys(value)
except Exception as e:
print_error("An Exception Occurred {}".format(e))
status = False
return status
def _send_keys(self, element, **kwargs):
"""Send values to a particular element,
simulates typing into a element
:Arguments:
1. element = a valid WebElement
2. value = a Keys object that has to be sent to the element.
"""
status = True
value = kwargs.get('value', '')
try:
KEYS[value.upper()]
except KeyError:
print_error("{0} is not supported by Selenium.".format(value))
status = False
else:
print_info("Type text='{0}' into element".format(value))
element.send_keys(KEYS[value.upper()])
return status
def _drag_and_drop(self, source, **kwargs):
"""Send values to a particular element,
simulates typing into a element
:Arguments:
1. source = a valid WebElement
2. target = a valid WebElement
"""
status = True
print_info("Simulate a drag and drop")
try:
browser_instance = kwargs.get('browser')
target = self._get_element(browser_instance,
kwargs.get('target_locator'))
if source is not None and target is not None:
ActionChains(browser_instance).drag_and_drop(source,
target).perform()
except Exception as e:
print_error("An Exception Occurred {}".format(e))
status = False
return status
def _drag_and_drop_by_offset(self, source, **kwargs):
"""Holds down the left mouse button on the source element,
then moves to the target offset and releases the mouse button
:Arguments:
1. source = a valid WebElement
2. xoffset = X offset to move to
3. yoffset = Y offset to move to
"""
status = True
print_info("drag and drop an element with offset")
try:
xoffset = kwargs.get('xoffset')
yoffset = kwargs.get('yoffset')
browser_instance = kwargs.get('browser')
actions = ActionChains(browser_instance)
actions.drag_and_drop_by_offset(source, xoffset, yoffset).perform()
except NoSuchElementException as e:
print_error("NoSuchElementException occurred")
status = False
except Exception as e:
print_error("An Exception Occurred {}".format(e))
status = False
return status
def _clear_text(self, element, **kwargs):
"""Clears the text if it is a text element
:Arguments:
1. element = a valid WebElement
"""
status = True
print_info("Clear element")
try:
element.clear()
except Exception as e:
print_error("An Exception Occurred {}".format(e))
status = False
return status
def _get_text(self, element, **kwargs):
"""gets the text from element
:Arguments:
1. element = a valid WebElement
"""
print_info("get element text")
print_info("tag: "+element.tag_name)
if element.tag_name == "input":
value = element.get_attribute("value")
else:
value = element.text
if value is not None:
status = True
print_info("The text for this element is {}".format(value))
return status, value
def _get_property(self, element, **kwargs):
status = True
if element is not None:
attribute_name = kwargs.get('attribute_name')
attr_properties = element.get_attribute(attribute_name)
if attr_properties is not None:
print_info("The properties of the attribute '{0}' "
"are {1}".format(attribute_name, attr_properties))
else:
print_error("Could not find attribute '{0}', hence could not "
"retrieve its properties.".format(attribute_name))
status = False
else:
status = False
return status
def _check_property(self, element, **kwargs):
status = True
if element is not None:
attribute_name = kwargs.get('attribute_name')
property_name = kwargs.get('property_name')
attr_properties = element.get_attribute(attribute_name)
if attr_properties is not None:
if property_name in attr_properties:
print_info("{0} has a property called {1}. Verification "
"success!".format(attribute_name, property_name))
else:
print_error("{0} does not have a property called {1}. "
"Verification failed!".format(attribute_name,
property_name))
status = False
else:
print_error("Could not find attribute '{0}', hence could not "
"retrieve its properties.".format(attribute_name))
status = False
else:
status = False
return status
def _perform_keypress(self, element, **kwargs):
"""
This function expects to receive a browser instance through the
"browser" argument and a key "keys" through the kwargs.
The value for "keys" would be a list of keys tha need to pressed.
"""
status = True
flag = False
keys = kwargs.get('keys')
actions = ActionChains(element)
for key in keys:
try:
selenium_key = KEYS[key.upper()]
except KeyError:
print_error("{0} is not supported by Selenium.".format(key))
status = False
else:
flag = True
actions.send_keys(selenium_key)
if flag:
actions.perform()
sleep(2)
return status
| apache-2.0 |
blooparksystems/odoo | openerp/addons/base/res/res_bank.py | 25 | 3705 | # -*- coding: utf-8 -*-
import re
from openerp import api, fields, models, _
from openerp.osv import expression
from openerp.exceptions import UserError
def sanitize_account_number(acc_number):
if acc_number:
return re.sub(r'\W+', '', acc_number).upper()
return False
class Bank(models.Model):
_description = 'Bank'
_name = 'res.bank'
_order = 'name'
name = fields.Char(required=True)
street = fields.Char()
street2 = fields.Char()
zip = fields.Char()
city = fields.Char()
state = fields.Many2one('res.country.state', 'Fed. State', domain="[('country_id', '=', country)]")
country = fields.Many2one('res.country')
email = fields.Char()
phone = fields.Char()
fax = fields.Char()
active = fields.Boolean(default=True)
bic = fields.Char('Bank Identifier Code', select=True, help="Sometimes called BIC or Swift.")
@api.multi
@api.depends('name', 'bic')
def name_get(self):
result = []
for bank in self:
name = bank.name + (bank.bic and (' - ' + bank.bic) or '')
result.append((bank.id, name))
return result
@api.model
def name_search(self, name, args=None, operator='ilike', limit=100):
args = args or []
domain = []
if name:
domain = ['|', ('bic', '=ilike', name + '%'), ('name', operator, name)]
if operator in expression.NEGATIVE_TERM_OPERATORS:
domain = ['&'] + domain
banks = self.search(domain + args, limit=limit)
return banks.name_get()
class ResPartnerBank(models.Model):
_name = 'res.partner.bank'
_rec_name = 'acc_number'
_description = 'Bank Accounts'
_order = 'sequence'
acc_type = fields.Char(compute='_compute_acc_type', help='Bank account type, inferred from account number')
acc_number = fields.Char('Account Number', required=True)
sanitized_acc_number = fields.Char(compute='_compute_sanitized_acc_number', string='Sanitized Account Number', readonly=True, store=True)
partner_id = fields.Many2one('res.partner', 'Account Holder', ondelete='cascade', select=True, domain=['|', ('is_company', '=', True), ('parent_id', '=', False)])
bank_id = fields.Many2one('res.bank', string='Bank')
bank_name = fields.Char(related='bank_id.name')
bank_bic = fields.Char(related='bank_id.bic')
sequence = fields.Integer()
currency_id = fields.Many2one('res.currency', string='Currency')
company_id = fields.Many2one('res.company', 'Company', default=lambda self: self.env.user.company_id, ondelete='cascade')
_sql_constraints = [
('unique_number', 'unique(sanitized_acc_number)', 'Account Number must be unique'),
]
@api.one
@api.depends('acc_number')
def _compute_sanitized_acc_number(self):
self.sanitized_acc_number = sanitize_account_number(self.acc_number)
@api.one
@api.depends('acc_type')
def _compute_acc_type(self):
self.acc_type = 'bank'
@api.model
def search(self, args, offset=0, limit=None, order=None, count=False):
pos = 0
while pos < len(args):
if args[pos][0] == 'acc_number':
op = args[pos][1]
value = args[pos][2]
if hasattr(value, '__iter__'):
value = [sanitize_account_number(i) for i in value]
else:
value = sanitize_account_number(value)
if 'like' in op:
value = '%' + value + '%'
args[pos] = ('sanitized_acc_number', op, value)
pos += 1
return super(ResPartnerBank, self).search(args, offset, limit, order, count=count)
| gpl-3.0 |
chiragjogi/odoo | addons/sale/sale.py | 55 | 74103 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from datetime import datetime, timedelta
import time
from openerp import SUPERUSER_ID
from openerp.osv import fields, osv
from openerp.tools.translate import _
from openerp.tools import DEFAULT_SERVER_DATE_FORMAT, DEFAULT_SERVER_DATETIME_FORMAT
import openerp.addons.decimal_precision as dp
from openerp import workflow
class res_company(osv.Model):
_inherit = "res.company"
_columns = {
'sale_note': fields.text('Default Terms and Conditions', translate=True, help="Default terms and conditions for quotations."),
}
class sale_order(osv.osv):
_name = "sale.order"
_inherit = ['mail.thread', 'ir.needaction_mixin']
_description = "Sales Order"
_track = {
'state': {
'sale.mt_order_confirmed': lambda self, cr, uid, obj, ctx=None: obj.state in ['manual', 'progress'],
'sale.mt_order_sent': lambda self, cr, uid, obj, ctx=None: obj.state in ['sent']
},
}
def _amount_line_tax(self, cr, uid, line, context=None):
val = 0.0
line_obj = self.pool['sale.order.line']
price = line_obj._calc_line_base_price(cr, uid, line, context=context)
qty = line_obj._calc_line_quantity(cr, uid, line, context=context)
for c in self.pool['account.tax'].compute_all(
cr, uid, line.tax_id, price, qty, line.product_id,
line.order_id.partner_id)['taxes']:
val += c.get('amount', 0.0)
return val
def _amount_all_wrapper(self, cr, uid, ids, field_name, arg, context=None):
""" Wrapper because of direct method passing as parameter for function fields """
return self._amount_all(cr, uid, ids, field_name, arg, context=context)
def _amount_all(self, cr, uid, ids, field_name, arg, context=None):
cur_obj = self.pool.get('res.currency')
res = {}
for order in self.browse(cr, uid, ids, context=context):
res[order.id] = {
'amount_untaxed': 0.0,
'amount_tax': 0.0,
'amount_total': 0.0,
}
val = val1 = 0.0
cur = order.pricelist_id.currency_id
for line in order.order_line:
val1 += line.price_subtotal
val += self._amount_line_tax(cr, uid, line, context=context)
res[order.id]['amount_tax'] = cur_obj.round(cr, uid, cur, val)
res[order.id]['amount_untaxed'] = cur_obj.round(cr, uid, cur, val1)
res[order.id]['amount_total'] = res[order.id]['amount_untaxed'] + res[order.id]['amount_tax']
return res
def _invoiced_rate(self, cursor, user, ids, name, arg, context=None):
res = {}
for sale in self.browse(cursor, user, ids, context=context):
if sale.invoiced:
res[sale.id] = 100.0
continue
tot = 0.0
for invoice in sale.invoice_ids:
if invoice.state not in ('draft', 'cancel'):
tot += invoice.amount_untaxed
if tot:
res[sale.id] = min(100.0, tot * 100.0 / (sale.amount_untaxed or 1.00))
else:
res[sale.id] = 0.0
return res
def _invoice_exists(self, cursor, user, ids, name, arg, context=None):
res = {}
for sale in self.browse(cursor, user, ids, context=context):
res[sale.id] = False
if sale.invoice_ids:
res[sale.id] = True
return res
def _invoiced(self, cursor, user, ids, name, arg, context=None):
res = {}
for sale in self.browse(cursor, user, ids, context=context):
res[sale.id] = True
invoice_existence = False
for invoice in sale.invoice_ids:
if invoice.state!='cancel':
invoice_existence = True
if invoice.state != 'paid':
res[sale.id] = False
break
if not invoice_existence or sale.state == 'manual':
res[sale.id] = False
return res
def _invoiced_search(self, cursor, user, obj, name, args, context=None):
if not len(args):
return []
clause = ''
sale_clause = ''
no_invoiced = False
for arg in args:
if (arg[1] == '=' and arg[2]) or (arg[1] == '!=' and not arg[2]):
clause += 'AND inv.state = \'paid\''
else:
clause += 'AND inv.state != \'cancel\' AND sale.state != \'cancel\' AND inv.state <> \'paid\' AND rel.order_id = sale.id '
sale_clause = ', sale_order AS sale '
no_invoiced = True
cursor.execute('SELECT rel.order_id ' \
'FROM sale_order_invoice_rel AS rel, account_invoice AS inv '+ sale_clause + \
'WHERE rel.invoice_id = inv.id ' + clause)
res = cursor.fetchall()
if no_invoiced:
cursor.execute('SELECT sale.id ' \
'FROM sale_order AS sale ' \
'WHERE sale.id NOT IN ' \
'(SELECT rel.order_id ' \
'FROM sale_order_invoice_rel AS rel) and sale.state != \'cancel\'')
res.extend(cursor.fetchall())
if not res:
return [('id', '=', 0)]
return [('id', 'in', [x[0] for x in res])]
def _get_order(self, cr, uid, ids, context=None):
result = {}
for line in self.pool.get('sale.order.line').browse(cr, uid, ids, context=context):
result[line.order_id.id] = True
return result.keys()
def _get_default_company(self, cr, uid, context=None):
company_id = self.pool.get('res.users')._get_company(cr, uid, context=context)
if not company_id:
raise osv.except_osv(_('Error!'), _('There is no default company for the current user!'))
return company_id
def _get_default_section_id(self, cr, uid, context=None):
""" Gives default section by checking if present in the context """
section_id = self._resolve_section_id_from_context(cr, uid, context=context) or False
if not section_id:
section_id = self.pool.get('res.users').browse(cr, uid, uid, context).default_section_id.id or False
return section_id
def _resolve_section_id_from_context(self, cr, uid, context=None):
""" Returns ID of section based on the value of 'section_id'
context key, or None if it cannot be resolved to a single
Sales Team.
"""
if context is None:
context = {}
if type(context.get('default_section_id')) in (int, long):
return context.get('default_section_id')
if isinstance(context.get('default_section_id'), basestring):
section_ids = self.pool.get('crm.case.section').name_search(cr, uid, name=context['default_section_id'], context=context)
if len(section_ids) == 1:
return int(section_ids[0][0])
return None
_columns = {
'name': fields.char('Order Reference', required=True, copy=False,
readonly=True, states={'draft': [('readonly', False)], 'sent': [('readonly', False)]}, select=True),
'origin': fields.char('Source Document', help="Reference of the document that generated this sales order request."),
'client_order_ref': fields.char('Reference/Description', copy=False),
'state': fields.selection([
('draft', 'Draft Quotation'),
('sent', 'Quotation Sent'),
('cancel', 'Cancelled'),
('waiting_date', 'Waiting Schedule'),
('progress', 'Sales Order'),
('manual', 'Sale to Invoice'),
('shipping_except', 'Shipping Exception'),
('invoice_except', 'Invoice Exception'),
('done', 'Done'),
], 'Status', readonly=True, copy=False, help="Gives the status of the quotation or sales order.\
\nThe exception status is automatically set when a cancel operation occurs \
in the invoice validation (Invoice Exception) or in the picking list process (Shipping Exception).\nThe 'Waiting Schedule' status is set when the invoice is confirmed\
but waiting for the scheduler to run on the order date.", select=True),
'date_order': fields.datetime('Date', required=True, readonly=True, select=True, states={'draft': [('readonly', False)], 'sent': [('readonly', False)]}, copy=False),
'create_date': fields.datetime('Creation Date', readonly=True, select=True, help="Date on which sales order is created."),
'date_confirm': fields.date('Confirmation Date', readonly=True, select=True, help="Date on which sales order is confirmed.", copy=False),
'user_id': fields.many2one('res.users', 'Salesperson', states={'draft': [('readonly', False)], 'sent': [('readonly', False)]}, select=True, track_visibility='onchange'),
'partner_id': fields.many2one('res.partner', 'Customer', readonly=True, states={'draft': [('readonly', False)], 'sent': [('readonly', False)]}, required=True, change_default=True, select=True, track_visibility='always'),
'partner_invoice_id': fields.many2one('res.partner', 'Invoice Address', readonly=True, required=True, states={'draft': [('readonly', False)], 'sent': [('readonly', False)]}, help="Invoice address for current sales order."),
'partner_shipping_id': fields.many2one('res.partner', 'Delivery Address', readonly=True, required=True, states={'draft': [('readonly', False)], 'sent': [('readonly', False)]}, help="Delivery address for current sales order."),
'order_policy': fields.selection([
('manual', 'On Demand'),
], 'Create Invoice', required=True, readonly=True, states={'draft': [('readonly', False)], 'sent': [('readonly', False)]},
help="""This field controls how invoice and delivery operations are synchronized."""),
'pricelist_id': fields.many2one('product.pricelist', 'Pricelist', required=True, readonly=True, states={'draft': [('readonly', False)], 'sent': [('readonly', False)]}, help="Pricelist for current sales order."),
'currency_id': fields.related('pricelist_id', 'currency_id', type="many2one", relation="res.currency", string="Currency", readonly=True, required=True),
'project_id': fields.many2one('account.analytic.account', 'Contract / Analytic', readonly=True, states={'draft': [('readonly', False)], 'sent': [('readonly', False)]}, help="The analytic account related to a sales order."),
'order_line': fields.one2many('sale.order.line', 'order_id', 'Order Lines', readonly=True, states={'draft': [('readonly', False)], 'sent': [('readonly', False)]}, copy=True),
'invoice_ids': fields.many2many('account.invoice', 'sale_order_invoice_rel', 'order_id', 'invoice_id', 'Invoices', readonly=True, copy=False, help="This is the list of invoices that have been generated for this sales order. The same sales order may have been invoiced in several times (by line for example)."),
'invoiced_rate': fields.function(_invoiced_rate, string='Invoiced Ratio', type='float'),
'invoiced': fields.function(_invoiced, string='Paid',
fnct_search=_invoiced_search, type='boolean', help="It indicates that an invoice has been paid."),
'invoice_exists': fields.function(_invoice_exists, string='Invoiced',
fnct_search=_invoiced_search, type='boolean', help="It indicates that sales order has at least one invoice."),
'note': fields.text('Terms and conditions'),
'amount_untaxed': fields.function(_amount_all_wrapper, digits_compute=dp.get_precision('Account'), string='Untaxed Amount',
store={
'sale.order': (lambda self, cr, uid, ids, c={}: ids, ['order_line'], 10),
'sale.order.line': (_get_order, ['price_unit', 'tax_id', 'discount', 'product_uom_qty'], 10),
},
multi='sums', help="The amount without tax.", track_visibility='always'),
'amount_tax': fields.function(_amount_all_wrapper, digits_compute=dp.get_precision('Account'), string='Taxes',
store={
'sale.order': (lambda self, cr, uid, ids, c={}: ids, ['order_line'], 10),
'sale.order.line': (_get_order, ['price_unit', 'tax_id', 'discount', 'product_uom_qty'], 10),
},
multi='sums', help="The tax amount."),
'amount_total': fields.function(_amount_all_wrapper, digits_compute=dp.get_precision('Account'), string='Total',
store={
'sale.order': (lambda self, cr, uid, ids, c={}: ids, ['order_line'], 10),
'sale.order.line': (_get_order, ['price_unit', 'tax_id', 'discount', 'product_uom_qty'], 10),
},
multi='sums', help="The total amount."),
'payment_term': fields.many2one('account.payment.term', 'Payment Term'),
'fiscal_position': fields.many2one('account.fiscal.position', 'Fiscal Position'),
'company_id': fields.many2one('res.company', 'Company'),
'section_id': fields.many2one('crm.case.section', 'Sales Team'),
'procurement_group_id': fields.many2one('procurement.group', 'Procurement group', copy=False),
'product_id': fields.related('order_line', 'product_id', type='many2one', relation='product.product', string='Product'),
}
_defaults = {
'date_order': fields.datetime.now,
'order_policy': 'manual',
'company_id': _get_default_company,
'state': 'draft',
'user_id': lambda obj, cr, uid, context: uid,
'name': lambda obj, cr, uid, context: '/',
'partner_invoice_id': lambda self, cr, uid, context: context.get('partner_id', False) and self.pool.get('res.partner').address_get(cr, uid, [context['partner_id']], ['invoice'])['invoice'],
'partner_shipping_id': lambda self, cr, uid, context: context.get('partner_id', False) and self.pool.get('res.partner').address_get(cr, uid, [context['partner_id']], ['delivery'])['delivery'],
'note': lambda self, cr, uid, context: self.pool.get('res.users').browse(cr, uid, uid, context=context).company_id.sale_note,
'section_id': lambda s, cr, uid, c: s._get_default_section_id(cr, uid, c),
}
_sql_constraints = [
('name_uniq', 'unique(name, company_id)', 'Order Reference must be unique per Company!'),
]
_order = 'date_order desc, id desc'
# Form filling
def unlink(self, cr, uid, ids, context=None):
sale_orders = self.read(cr, uid, ids, ['state'], context=context)
unlink_ids = []
for s in sale_orders:
if s['state'] in ['draft', 'cancel']:
unlink_ids.append(s['id'])
else:
raise osv.except_osv(_('Invalid Action!'), _('In order to delete a confirmed sales order, you must cancel it before!'))
return osv.osv.unlink(self, cr, uid, unlink_ids, context=context)
def copy_quotation(self, cr, uid, ids, context=None):
id = self.copy(cr, uid, ids[0], context=context)
view_ref = self.pool.get('ir.model.data').get_object_reference(cr, uid, 'sale', 'view_order_form')
view_id = view_ref and view_ref[1] or False,
return {
'type': 'ir.actions.act_window',
'name': _('Sales Order'),
'res_model': 'sale.order',
'res_id': id,
'view_type': 'form',
'view_mode': 'form',
'view_id': view_id,
'target': 'current',
'nodestroy': True,
}
def onchange_pricelist_id(self, cr, uid, ids, pricelist_id, order_lines, context=None):
context = context or {}
if not pricelist_id:
return {}
value = {
'currency_id': self.pool.get('product.pricelist').browse(cr, uid, pricelist_id, context=context).currency_id.id
}
if not order_lines or order_lines == [(6, 0, [])]:
return {'value': value}
warning = {
'title': _('Pricelist Warning!'),
'message' : _('If you change the pricelist of this order (and eventually the currency), prices of existing order lines will not be updated.')
}
return {'warning': warning, 'value': value}
def get_salenote(self, cr, uid, ids, partner_id, context=None):
context_lang = context.copy()
if partner_id:
partner_lang = self.pool.get('res.partner').browse(cr, uid, partner_id, context=context).lang
context_lang.update({'lang': partner_lang})
return self.pool.get('res.users').browse(cr, uid, uid, context=context_lang).company_id.sale_note
def onchange_delivery_id(self, cr, uid, ids, company_id, partner_id, delivery_id, fiscal_position, context=None):
r = {'value': {}}
if not company_id:
company_id = self._get_default_company(cr, uid, context=context)
fiscal_position = self.pool['account.fiscal.position'].get_fiscal_position(cr, uid, company_id, partner_id, delivery_id, context=context)
if fiscal_position:
r['value']['fiscal_position'] = fiscal_position
return r
def onchange_partner_id(self, cr, uid, ids, part, context=None):
if not part:
return {'value': {'partner_invoice_id': False, 'partner_shipping_id': False, 'payment_term': False, 'fiscal_position': False}}
part = self.pool.get('res.partner').browse(cr, uid, part, context=context)
addr = self.pool.get('res.partner').address_get(cr, uid, [part.id], ['delivery', 'invoice', 'contact'])
pricelist = part.property_product_pricelist and part.property_product_pricelist.id or False
invoice_part = self.pool.get('res.partner').browse(cr, uid, addr['invoice'], context=context)
payment_term = invoice_part.property_payment_term and invoice_part.property_payment_term.id or False
dedicated_salesman = part.user_id and part.user_id.id or uid
val = {
'partner_invoice_id': addr['invoice'],
'partner_shipping_id': addr['delivery'],
'payment_term': payment_term,
'user_id': dedicated_salesman,
}
delivery_onchange = self.onchange_delivery_id(cr, uid, ids, False, part.id, addr['delivery'], False, context=context)
val.update(delivery_onchange['value'])
if pricelist:
val['pricelist_id'] = pricelist
if not self._get_default_section_id(cr, uid, context=context) and part.section_id:
val['section_id'] = part.section_id.id
sale_note = self.get_salenote(cr, uid, ids, part.id, context=context)
if sale_note: val.update({'note': sale_note})
return {'value': val}
def create(self, cr, uid, vals, context=None):
if context is None:
context = {}
if vals.get('name', '/') == '/':
vals['name'] = self.pool.get('ir.sequence').get(cr, uid, 'sale.order', context=context) or '/'
if vals.get('partner_id') and any(f not in vals for f in ['partner_invoice_id', 'partner_shipping_id', 'pricelist_id', 'fiscal_position']):
defaults = self.onchange_partner_id(cr, uid, [], vals['partner_id'], context=context)['value']
if not vals.get('fiscal_position') and vals.get('partner_shipping_id'):
delivery_onchange = self.onchange_delivery_id(cr, uid, [], vals.get('company_id'), None, vals['partner_id'], vals.get('partner_shipping_id'), context=context)
defaults.update(delivery_onchange['value'])
vals = dict(defaults, **vals)
ctx = dict(context or {}, mail_create_nolog=True)
new_id = super(sale_order, self).create(cr, uid, vals, context=ctx)
self.message_post(cr, uid, [new_id], body=_("Quotation created"), context=ctx)
return new_id
def button_dummy(self, cr, uid, ids, context=None):
return True
# FIXME: deprecated method, overriders should be using _prepare_invoice() instead.
# can be removed after 6.1.
def _inv_get(self, cr, uid, order, context=None):
return {}
def _prepare_invoice(self, cr, uid, order, lines, context=None):
"""Prepare the dict of values to create the new invoice for a
sales order. This method may be overridden to implement custom
invoice generation (making sure to call super() to establish
a clean extension chain).
:param browse_record order: sale.order record to invoice
:param list(int) line: list of invoice line IDs that must be
attached to the invoice
:return: dict of value to create() the invoice
"""
if context is None:
context = {}
journal_ids = self.pool.get('account.journal').search(cr, uid,
[('type', '=', 'sale'), ('company_id', '=', order.company_id.id)],
limit=1)
if not journal_ids:
raise osv.except_osv(_('Error!'),
_('Please define sales journal for this company: "%s" (id:%d).') % (order.company_id.name, order.company_id.id))
invoice_vals = {
'name': order.client_order_ref or '',
'origin': order.name,
'type': 'out_invoice',
'reference': order.client_order_ref or order.name,
'account_id': order.partner_invoice_id.property_account_receivable.id,
'partner_id': order.partner_invoice_id.id,
'journal_id': journal_ids[0],
'invoice_line': [(6, 0, lines)],
'currency_id': order.pricelist_id.currency_id.id,
'comment': order.note,
'payment_term': order.payment_term and order.payment_term.id or False,
'fiscal_position': order.fiscal_position.id or order.partner_invoice_id.property_account_position.id,
'date_invoice': context.get('date_invoice', False),
'company_id': order.company_id.id,
'user_id': order.user_id and order.user_id.id or False,
'section_id' : order.section_id.id
}
# Care for deprecated _inv_get() hook - FIXME: to be removed after 6.1
invoice_vals.update(self._inv_get(cr, uid, order, context=context))
return invoice_vals
def _make_invoice(self, cr, uid, order, lines, context=None):
inv_obj = self.pool.get('account.invoice')
obj_invoice_line = self.pool.get('account.invoice.line')
if context is None:
context = {}
invoiced_sale_line_ids = self.pool.get('sale.order.line').search(cr, uid, [('order_id', '=', order.id), ('invoiced', '=', True)], context=context)
from_line_invoice_ids = []
for invoiced_sale_line_id in self.pool.get('sale.order.line').browse(cr, uid, invoiced_sale_line_ids, context=context):
for invoice_line_id in invoiced_sale_line_id.invoice_lines:
if invoice_line_id.invoice_id.id not in from_line_invoice_ids:
from_line_invoice_ids.append(invoice_line_id.invoice_id.id)
for preinv in order.invoice_ids:
if preinv.state not in ('cancel',) and preinv.id not in from_line_invoice_ids:
for preline in preinv.invoice_line:
inv_line_id = obj_invoice_line.copy(cr, uid, preline.id, {'invoice_id': False, 'price_unit': -preline.price_unit})
lines.append(inv_line_id)
inv = self._prepare_invoice(cr, uid, order, lines, context=context)
inv_id = inv_obj.create(cr, uid, inv, context=context)
data = inv_obj.onchange_payment_term_date_invoice(cr, uid, [inv_id], inv['payment_term'], time.strftime(DEFAULT_SERVER_DATE_FORMAT))
if data.get('value', False):
inv_obj.write(cr, uid, [inv_id], data['value'], context=context)
inv_obj.button_compute(cr, uid, [inv_id])
return inv_id
def print_quotation(self, cr, uid, ids, context=None):
'''
This function prints the sales order and mark it as sent, so that we can see more easily the next step of the workflow
'''
assert len(ids) == 1, 'This option should only be used for a single id at a time'
self.signal_workflow(cr, uid, ids, 'quotation_sent')
return self.pool['report'].get_action(cr, uid, ids, 'sale.report_saleorder', context=context)
def manual_invoice(self, cr, uid, ids, context=None):
""" create invoices for the given sales orders (ids), and open the form
view of one of the newly created invoices
"""
mod_obj = self.pool.get('ir.model.data')
# create invoices through the sales orders' workflow
inv_ids0 = set(inv.id for sale in self.browse(cr, uid, ids, context) for inv in sale.invoice_ids)
self.signal_workflow(cr, uid, ids, 'manual_invoice')
inv_ids1 = set(inv.id for sale in self.browse(cr, uid, ids, context) for inv in sale.invoice_ids)
# determine newly created invoices
new_inv_ids = list(inv_ids1 - inv_ids0)
res = mod_obj.get_object_reference(cr, uid, 'account', 'invoice_form')
res_id = res and res[1] or False,
return {
'name': _('Customer Invoices'),
'view_type': 'form',
'view_mode': 'form',
'view_id': [res_id],
'res_model': 'account.invoice',
'context': "{'type':'out_invoice'}",
'type': 'ir.actions.act_window',
'nodestroy': True,
'target': 'current',
'res_id': new_inv_ids and new_inv_ids[0] or False,
}
def action_view_invoice(self, cr, uid, ids, context=None):
'''
This function returns an action that display existing invoices of given sales order ids. It can either be a in a list or in a form view, if there is only one invoice to show.
'''
mod_obj = self.pool.get('ir.model.data')
act_obj = self.pool.get('ir.actions.act_window')
result = mod_obj.get_object_reference(cr, uid, 'account', 'action_invoice_tree1')
id = result and result[1] or False
result = act_obj.read(cr, uid, [id], context=context)[0]
#compute the number of invoices to display
inv_ids = []
for so in self.browse(cr, uid, ids, context=context):
inv_ids += [invoice.id for invoice in so.invoice_ids]
#choose the view_mode accordingly
if len(inv_ids)>1:
result['domain'] = "[('id','in',["+','.join(map(str, inv_ids))+"])]"
else:
res = mod_obj.get_object_reference(cr, uid, 'account', 'invoice_form')
result['views'] = [(res and res[1] or False, 'form')]
result['res_id'] = inv_ids and inv_ids[0] or False
return result
def test_no_product(self, cr, uid, order, context):
for line in order.order_line:
if line.state == 'cancel':
continue
if line.product_id and (line.product_id.type<>'service'):
return False
return True
def action_invoice_create(self, cr, uid, ids, grouped=False, states=None, date_invoice = False, context=None):
if states is None:
states = ['confirmed', 'done', 'exception']
res = False
invoices = {}
invoice_ids = []
invoice = self.pool.get('account.invoice')
obj_sale_order_line = self.pool.get('sale.order.line')
partner_currency = {}
# If date was specified, use it as date invoiced, usefull when invoices are generated this month and put the
# last day of the last month as invoice date
if date_invoice:
context = dict(context or {}, date_invoice=date_invoice)
for o in self.browse(cr, uid, ids, context=context):
currency_id = o.pricelist_id.currency_id.id
if (o.partner_id.id in partner_currency) and (partner_currency[o.partner_id.id] <> currency_id):
raise osv.except_osv(
_('Error!'),
_('You cannot group sales having different currencies for the same partner.'))
partner_currency[o.partner_id.id] = currency_id
lines = []
for line in o.order_line:
if line.invoiced:
continue
elif (line.state in states):
lines.append(line.id)
created_lines = obj_sale_order_line.invoice_line_create(cr, uid, lines)
if created_lines:
invoices.setdefault(o.partner_invoice_id.id or o.partner_id.id, []).append((o, created_lines))
if not invoices:
for o in self.browse(cr, uid, ids, context=context):
for i in o.invoice_ids:
if i.state == 'draft':
return i.id
for val in invoices.values():
if grouped:
res = self._make_invoice(cr, uid, val[0][0], reduce(lambda x, y: x + y, [l for o, l in val], []), context=context)
invoice_ref = ''
origin_ref = ''
for o, l in val:
invoice_ref += (o.client_order_ref or o.name) + '|'
origin_ref += (o.origin or o.name) + '|'
self.write(cr, uid, [o.id], {'state': 'progress'})
cr.execute('insert into sale_order_invoice_rel (order_id,invoice_id) values (%s,%s)', (o.id, res))
self.invalidate_cache(cr, uid, ['invoice_ids'], [o.id], context=context)
#remove last '|' in invoice_ref
if len(invoice_ref) >= 1:
invoice_ref = invoice_ref[:-1]
if len(origin_ref) >= 1:
origin_ref = origin_ref[:-1]
invoice.write(cr, uid, [res], {'origin': origin_ref, 'name': invoice_ref})
else:
for order, il in val:
res = self._make_invoice(cr, uid, order, il, context=context)
invoice_ids.append(res)
self.write(cr, uid, [order.id], {'state': 'progress'})
cr.execute('insert into sale_order_invoice_rel (order_id,invoice_id) values (%s,%s)', (order.id, res))
self.invalidate_cache(cr, uid, ['invoice_ids'], [order.id], context=context)
return res
def action_invoice_cancel(self, cr, uid, ids, context=None):
self.write(cr, uid, ids, {'state': 'invoice_except'}, context=context)
return True
def action_invoice_end(self, cr, uid, ids, context=None):
for this in self.browse(cr, uid, ids, context=context):
for line in this.order_line:
if line.state == 'exception':
line.write({'state': 'confirmed'})
if this.state == 'invoice_except':
this.write({'state': 'progress'})
return True
def action_cancel(self, cr, uid, ids, context=None):
if context is None:
context = {}
sale_order_line_obj = self.pool.get('sale.order.line')
account_invoice_obj = self.pool.get('account.invoice')
for sale in self.browse(cr, uid, ids, context=context):
for inv in sale.invoice_ids:
if inv.state not in ('draft', 'cancel'):
raise osv.except_osv(
_('Cannot cancel this sales order!'),
_('First cancel all invoices attached to this sales order.'))
inv.signal_workflow('invoice_cancel')
line_ids = [l.id for l in sale.order_line if l.state != 'cancel']
sale_order_line_obj.button_cancel(cr, uid, line_ids, context=context)
self.write(cr, uid, ids, {'state': 'cancel'})
return True
def action_button_confirm(self, cr, uid, ids, context=None):
if not context:
context = {}
assert len(ids) == 1, 'This option should only be used for a single id at a time.'
self.signal_workflow(cr, uid, ids, 'order_confirm')
if context.get('send_email'):
self.force_quotation_send(cr, uid, ids, context=context)
return True
def action_wait(self, cr, uid, ids, context=None):
context = context or {}
for o in self.browse(cr, uid, ids):
if not any(line.state != 'cancel' for line in o.order_line):
raise osv.except_osv(_('Error!'),_('You cannot confirm a sales order which has no line.'))
noprod = self.test_no_product(cr, uid, o, context)
if (o.order_policy == 'manual') or noprod:
self.write(cr, uid, [o.id], {'state': 'manual', 'date_confirm': fields.date.context_today(self, cr, uid, context=context)})
else:
self.write(cr, uid, [o.id], {'state': 'progress', 'date_confirm': fields.date.context_today(self, cr, uid, context=context)})
self.pool.get('sale.order.line').button_confirm(cr, uid, [x.id for x in o.order_line if x.state != 'cancel'])
return True
def action_quotation_send(self, cr, uid, ids, context=None):
'''
This function opens a window to compose an email, with the edi sale template message loaded by default
'''
assert len(ids) == 1, 'This option should only be used for a single id at a time.'
ir_model_data = self.pool.get('ir.model.data')
try:
template_id = ir_model_data.get_object_reference(cr, uid, 'sale', 'email_template_edi_sale')[1]
except ValueError:
template_id = False
try:
compose_form_id = ir_model_data.get_object_reference(cr, uid, 'mail', 'email_compose_message_wizard_form')[1]
except ValueError:
compose_form_id = False
ctx = dict()
ctx.update({
'default_model': 'sale.order',
'default_res_id': ids[0],
'default_use_template': bool(template_id),
'default_template_id': template_id,
'default_composition_mode': 'comment',
'mark_so_as_sent': True
})
return {
'type': 'ir.actions.act_window',
'view_type': 'form',
'view_mode': 'form',
'res_model': 'mail.compose.message',
'views': [(compose_form_id, 'form')],
'view_id': compose_form_id,
'target': 'new',
'context': ctx,
}
def force_quotation_send(self, cr, uid, ids, context=None):
for order_id in ids:
email_act = self.action_quotation_send(cr, uid, [order_id], context=context)
if email_act and email_act.get('context'):
composer_obj = self.pool['mail.compose.message']
composer_values = {}
email_ctx = email_act['context']
template_values = [
email_ctx.get('default_template_id'),
email_ctx.get('default_composition_mode'),
email_ctx.get('default_model'),
email_ctx.get('default_res_id'),
]
composer_values.update(composer_obj.onchange_template_id(cr, uid, None, *template_values, context=context).get('value', {}))
if not composer_values.get('email_from'):
composer_values['email_from'] = self.browse(cr, uid, order_id, context=context).company_id.email
for key in ['attachment_ids', 'partner_ids']:
if composer_values.get(key):
composer_values[key] = [(6, 0, composer_values[key])]
composer_id = composer_obj.create(cr, uid, composer_values, context=email_ctx)
composer_obj.send_mail(cr, uid, [composer_id], context=email_ctx)
return True
def action_done(self, cr, uid, ids, context=None):
for order in self.browse(cr, uid, ids, context=context):
self.pool.get('sale.order.line').write(cr, uid, [line.id for line in order.order_line if line.state != 'cancel'], {'state': 'done'}, context=context)
return self.write(cr, uid, ids, {'state': 'done'}, context=context)
def _prepare_order_line_procurement(self, cr, uid, order, line, group_id=False, context=None):
date_planned = self._get_date_planned(cr, uid, order, line, order.date_order, context=context)
return {
'name': line.name,
'origin': order.name,
'date_planned': date_planned,
'product_id': line.product_id.id,
'product_qty': line.product_uom_qty,
'product_uom': line.product_uom.id,
'product_uos_qty': (line.product_uos and line.product_uos_qty) or line.product_uom_qty,
'product_uos': (line.product_uos and line.product_uos.id) or line.product_uom.id,
'company_id': order.company_id.id,
'group_id': group_id,
'invoice_state': (order.order_policy == 'picking') and '2binvoiced' or 'none',
'sale_line_id': line.id
}
def _get_date_planned(self, cr, uid, order, line, start_date, context=None):
date_planned = datetime.strptime(start_date, DEFAULT_SERVER_DATETIME_FORMAT) + timedelta(days=line.delay or 0.0)
return date_planned
def _prepare_procurement_group(self, cr, uid, order, context=None):
return {'name': order.name, 'partner_id': order.partner_shipping_id.id}
def procurement_needed(self, cr, uid, ids, context=None):
#when sale is installed only, there is no need to create procurements, that's only
#further installed modules (sale_service, sale_stock) that will change this.
sale_line_obj = self.pool.get('sale.order.line')
res = []
for order in self.browse(cr, uid, ids, context=context):
res.append(sale_line_obj.need_procurement(cr, uid, [line.id for line in order.order_line if line.state != 'cancel'], context=context))
return any(res)
def action_ignore_delivery_exception(self, cr, uid, ids, context=None):
for sale_order in self.browse(cr, uid, ids, context=context):
self.write(cr, uid, ids, {'state': 'progress' if sale_order.invoice_exists else 'manual'}, context=context)
return True
def action_ship_create(self, cr, uid, ids, context=None):
"""Create the required procurements to supply sales order lines, also connecting
the procurements to appropriate stock moves in order to bring the goods to the
sales order's requested location.
:return: True
"""
context = context or {}
context['lang'] = self.pool['res.users'].browse(cr, uid, uid).lang
procurement_obj = self.pool.get('procurement.order')
sale_line_obj = self.pool.get('sale.order.line')
for order in self.browse(cr, uid, ids, context=context):
proc_ids = []
vals = self._prepare_procurement_group(cr, uid, order, context=context)
if not order.procurement_group_id:
group_id = self.pool.get("procurement.group").create(cr, uid, vals, context=context)
order.write({'procurement_group_id': group_id})
for line in order.order_line:
if line.state == 'cancel':
continue
#Try to fix exception procurement (possible when after a shipping exception the user choose to recreate)
if line.procurement_ids:
#first check them to see if they are in exception or not (one of the related moves is cancelled)
procurement_obj.check(cr, uid, [x.id for x in line.procurement_ids if x.state not in ['cancel', 'done']])
line.refresh()
#run again procurement that are in exception in order to trigger another move
except_proc_ids = [x.id for x in line.procurement_ids if x.state in ('exception', 'cancel')]
procurement_obj.reset_to_confirmed(cr, uid, except_proc_ids, context=context)
proc_ids += except_proc_ids
elif sale_line_obj.need_procurement(cr, uid, [line.id], context=context):
if (line.state == 'done') or not line.product_id:
continue
vals = self._prepare_order_line_procurement(cr, uid, order, line, group_id=order.procurement_group_id.id, context=context)
ctx = context.copy()
ctx['procurement_autorun_defer'] = True
proc_id = procurement_obj.create(cr, uid, vals, context=ctx)
proc_ids.append(proc_id)
#Confirm procurement order such that rules will be applied on it
#note that the workflow normally ensure proc_ids isn't an empty list
procurement_obj.run(cr, uid, proc_ids, context=context)
#if shipping was in exception and the user choose to recreate the delivery order, write the new status of SO
if order.state == 'shipping_except':
val = {'state': 'progress', 'shipped': False}
if (order.order_policy == 'manual'):
for line in order.order_line:
if (not line.invoiced) and (line.state not in ('cancel', 'draft')):
val['state'] = 'manual'
break
order.write(val)
return True
def onchange_fiscal_position(self, cr, uid, ids, fiscal_position, order_lines, context=None):
'''Update taxes of order lines for each line where a product is defined
:param list ids: not used
:param int fiscal_position: sale order fiscal position
:param list order_lines: command list for one2many write method
'''
order_line = []
fiscal_obj = self.pool.get('account.fiscal.position')
product_obj = self.pool.get('product.product')
line_obj = self.pool.get('sale.order.line')
fpos = False
if fiscal_position:
fpos = fiscal_obj.browse(cr, uid, fiscal_position, context=context)
for line in order_lines:
# create (0, 0, { fields })
# update (1, ID, { fields })
if line[0] in [0, 1]:
prod = None
if line[2].get('product_id'):
prod = product_obj.browse(cr, uid, line[2]['product_id'], context=context)
elif line[1]:
prod = line_obj.browse(cr, uid, line[1], context=context).product_id
if prod and prod.taxes_id:
line[2]['tax_id'] = [[6, 0, fiscal_obj.map_tax(cr, uid, fpos, prod.taxes_id)]]
order_line.append(line)
# link (4, ID)
# link all (6, 0, IDS)
elif line[0] in [4, 6]:
line_ids = line[0] == 4 and [line[1]] or line[2]
for line_id in line_ids:
prod = line_obj.browse(cr, uid, line_id, context=context).product_id
if prod and prod.taxes_id:
order_line.append([1, line_id, {'tax_id': [[6, 0, fiscal_obj.map_tax(cr, uid, fpos, prod.taxes_id)]]}])
else:
order_line.append([4, line_id])
else:
order_line.append(line)
return {'value': {'order_line': order_line, 'amount_untaxed': False, 'amount_tax': False, 'amount_total': False}}
def test_procurements_done(self, cr, uid, ids, context=None):
for sale in self.browse(cr, uid, ids, context=context):
for line in sale.order_line:
if line.state == 'cancel':
continue
if not all([x.state == 'done' for x in line.procurement_ids]):
return False
return True
def test_procurements_except(self, cr, uid, ids, context=None):
for sale in self.browse(cr, uid, ids, context=context):
for line in sale.order_line:
if line.state == 'cancel':
continue
if any([x.state == 'cancel' for x in line.procurement_ids]):
return True
return False
# TODO add a field price_unit_uos
# - update it on change product and unit price
# - use it in report if there is a uos
class sale_order_line(osv.osv):
def need_procurement(self, cr, uid, ids, context=None):
#when sale is installed only, there is no need to create procurements, that's only
#further installed modules (sale_service, sale_stock) that will change this.
prod_obj = self.pool.get('product.product')
for line in self.browse(cr, uid, ids, context=context):
if prod_obj.need_procurement(cr, uid, [line.product_id.id], context=context):
return True
return False
def _calc_line_base_price(self, cr, uid, line, context=None):
return line.price_unit * (1 - (line.discount or 0.0) / 100.0)
def _calc_line_quantity(self, cr, uid, line, context=None):
return line.product_uom_qty
def _amount_line(self, cr, uid, ids, field_name, arg, context=None):
tax_obj = self.pool.get('account.tax')
cur_obj = self.pool.get('res.currency')
res = {}
if context is None:
context = {}
for line in self.browse(cr, uid, ids, context=context):
price = self._calc_line_base_price(cr, uid, line, context=context)
qty = self._calc_line_quantity(cr, uid, line, context=context)
taxes = tax_obj.compute_all(cr, uid, line.tax_id, price, qty,
line.product_id,
line.order_id.partner_id)
cur = line.order_id.pricelist_id.currency_id
res[line.id] = cur_obj.round(cr, uid, cur, taxes['total'])
return res
def _get_uom_id(self, cr, uid, *args):
try:
proxy = self.pool.get('ir.model.data')
result = proxy.get_object_reference(cr, uid, 'product', 'product_uom_unit')
return result[1]
except Exception, ex:
return False
def _fnct_line_invoiced(self, cr, uid, ids, field_name, args, context=None):
res = dict.fromkeys(ids, False)
for this in self.browse(cr, uid, ids, context=context):
res[this.id] = this.invoice_lines and \
all(iline.invoice_id.state != 'cancel' for iline in this.invoice_lines)
return res
def _order_lines_from_invoice(self, cr, uid, ids, context=None):
# direct access to the m2m table is the less convoluted way to achieve this (and is ok ACL-wise)
cr.execute("""SELECT DISTINCT sol.id FROM sale_order_invoice_rel rel JOIN
sale_order_line sol ON (sol.order_id = rel.order_id)
WHERE rel.invoice_id = ANY(%s)""", (list(ids),))
return [i[0] for i in cr.fetchall()]
def _get_price_reduce(self, cr, uid, ids, field_name, arg, context=None):
res = dict.fromkeys(ids, 0.0)
for line in self.browse(cr, uid, ids, context=context):
res[line.id] = line.price_subtotal / line.product_uom_qty
return res
_name = 'sale.order.line'
_description = 'Sales Order Line'
_columns = {
'order_id': fields.many2one('sale.order', 'Order Reference', required=True, ondelete='cascade', select=True, readonly=True, states={'draft':[('readonly',False)]}),
'name': fields.text('Description', required=True, readonly=True, states={'draft': [('readonly', False)]}),
'sequence': fields.integer('Sequence', help="Gives the sequence order when displaying a list of sales order lines."),
'product_id': fields.many2one('product.product', 'Product', domain=[('sale_ok', '=', True)], change_default=True, readonly=True, states={'draft': [('readonly', False)]}, ondelete='restrict'),
'invoice_lines': fields.many2many('account.invoice.line', 'sale_order_line_invoice_rel', 'order_line_id', 'invoice_id', 'Invoice Lines', readonly=True, copy=False),
'invoiced': fields.function(_fnct_line_invoiced, string='Invoiced', type='boolean',
store={
'account.invoice': (_order_lines_from_invoice, ['state'], 10),
'sale.order.line': (lambda self,cr,uid,ids,ctx=None: ids, ['invoice_lines'], 10)
}),
'price_unit': fields.float('Unit Price', required=True, digits_compute= dp.get_precision('Product Price'), readonly=True, states={'draft': [('readonly', False)]}),
'price_subtotal': fields.function(_amount_line, string='Subtotal', digits_compute= dp.get_precision('Account')),
'price_reduce': fields.function(_get_price_reduce, type='float', string='Price Reduce', digits_compute=dp.get_precision('Product Price')),
'tax_id': fields.many2many('account.tax', 'sale_order_tax', 'order_line_id', 'tax_id', 'Taxes', readonly=True, states={'draft': [('readonly', False)]}),
'address_allotment_id': fields.many2one('res.partner', 'Allotment Partner',help="A partner to whom the particular product needs to be allotted."),
'product_uom_qty': fields.float('Quantity', digits_compute= dp.get_precision('Product UoS'), required=True, readonly=True, states={'draft': [('readonly', False)]}),
'product_uom': fields.many2one('product.uom', 'Unit of Measure ', required=True, readonly=True, states={'draft': [('readonly', False)]}),
'product_uos_qty': fields.float('Quantity (UoS)' ,digits_compute= dp.get_precision('Product UoS'), readonly=True, states={'draft': [('readonly', False)]}),
'product_uos': fields.many2one('product.uom', 'Product UoS'),
'discount': fields.float('Discount (%)', digits_compute= dp.get_precision('Discount'), readonly=True, states={'draft': [('readonly', False)]}),
'th_weight': fields.float('Weight', readonly=True, states={'draft': [('readonly', False)]}),
'state': fields.selection(
[('cancel', 'Cancelled'),('draft', 'Draft'),('confirmed', 'Confirmed'),('exception', 'Exception'),('done', 'Done')],
'Status', required=True, readonly=True, copy=False,
help='* The \'Draft\' status is set when the related sales order in draft status. \
\n* The \'Confirmed\' status is set when the related sales order is confirmed. \
\n* The \'Exception\' status is set when the related sales order is set as exception. \
\n* The \'Done\' status is set when the sales order line has been picked. \
\n* The \'Cancelled\' status is set when a user cancel the sales order related.'),
'order_partner_id': fields.related('order_id', 'partner_id', type='many2one', relation='res.partner', store=True, string='Customer'),
'salesman_id':fields.related('order_id', 'user_id', type='many2one', relation='res.users', store=True, string='Salesperson'),
'company_id': fields.related('order_id', 'company_id', type='many2one', relation='res.company', string='Company', store=True, readonly=True),
'delay': fields.float('Delivery Lead Time', required=True, help="Number of days between the order confirmation and the shipping of the products to the customer", readonly=True, states={'draft': [('readonly', False)]}),
'procurement_ids': fields.one2many('procurement.order', 'sale_line_id', 'Procurements'),
}
_order = 'order_id desc, sequence, id'
_defaults = {
'product_uom' : _get_uom_id,
'discount': 0.0,
'product_uom_qty': 1,
'product_uos_qty': 1,
'sequence': 10,
'state': 'draft',
'price_unit': 0.0,
'delay': 0.0,
}
def _get_line_qty(self, cr, uid, line, context=None):
if line.product_uos:
return line.product_uos_qty or 0.0
return line.product_uom_qty
def _get_line_uom(self, cr, uid, line, context=None):
if line.product_uos:
return line.product_uos.id
return line.product_uom.id
def _prepare_order_line_invoice_line(self, cr, uid, line, account_id=False, context=None):
"""Prepare the dict of values to create the new invoice line for a
sales order line. This method may be overridden to implement custom
invoice generation (making sure to call super() to establish
a clean extension chain).
:param browse_record line: sale.order.line record to invoice
:param int account_id: optional ID of a G/L account to force
(this is used for returning products including service)
:return: dict of values to create() the invoice line
"""
res = {}
if not line.invoiced:
if not account_id:
if line.product_id:
account_id = line.product_id.property_account_income.id
if not account_id:
account_id = line.product_id.categ_id.property_account_income_categ.id
if not account_id:
raise osv.except_osv(_('Error!'),
_('Please define income account for this product: "%s" (id:%d).') % \
(line.product_id.name, line.product_id.id,))
else:
prop = self.pool.get('ir.property').get(cr, uid,
'property_account_income_categ', 'product.category',
context=context)
account_id = prop and prop.id or False
uosqty = self._get_line_qty(cr, uid, line, context=context)
uos_id = self._get_line_uom(cr, uid, line, context=context)
pu = 0.0
if uosqty:
pu = round(line.price_unit * line.product_uom_qty / uosqty,
self.pool.get('decimal.precision').precision_get(cr, uid, 'Product Price'))
fpos = line.order_id.fiscal_position or False
account_id = self.pool.get('account.fiscal.position').map_account(cr, uid, fpos, account_id)
if not account_id:
raise osv.except_osv(_('Error!'),
_('There is no Fiscal Position defined or Income category account defined for default properties of Product categories.'))
res = {
'name': line.name,
'sequence': line.sequence,
'origin': line.order_id.name,
'account_id': account_id,
'price_unit': pu,
'quantity': uosqty,
'discount': line.discount,
'uos_id': uos_id,
'product_id': line.product_id.id or False,
'invoice_line_tax_id': [(6, 0, [x.id for x in line.tax_id])],
'account_analytic_id': line.order_id.project_id and line.order_id.project_id.id or False,
}
return res
def invoice_line_create(self, cr, uid, ids, context=None):
if context is None:
context = {}
create_ids = []
sales = set()
for line in self.browse(cr, uid, ids, context=context):
vals = self._prepare_order_line_invoice_line(cr, uid, line, False, context)
if vals:
inv_id = self.pool.get('account.invoice.line').create(cr, uid, vals, context=context)
self.write(cr, uid, [line.id], {'invoice_lines': [(4, inv_id)]}, context=context)
sales.add(line.order_id.id)
create_ids.append(inv_id)
# Trigger workflow events
for sale_id in sales:
workflow.trg_write(uid, 'sale.order', sale_id, cr)
return create_ids
def button_cancel(self, cr, uid, ids, context=None):
lines = self.browse(cr, uid, ids, context=context)
for line in lines:
if line.invoiced:
raise osv.except_osv(_('Invalid Action!'), _('You cannot cancel a sales order line that has already been invoiced.'))
procurement_obj = self.pool['procurement.order']
procurement_obj.cancel(cr, uid, sum([l.procurement_ids.ids for l in lines], []), context=context)
return self.write(cr, uid, ids, {'state': 'cancel'})
def button_confirm(self, cr, uid, ids, context=None):
return self.write(cr, uid, ids, {'state': 'confirmed'})
def button_done(self, cr, uid, ids, context=None):
res = self.write(cr, uid, ids, {'state': 'done'})
for line in self.browse(cr, uid, ids, context=context):
workflow.trg_write(uid, 'sale.order', line.order_id.id, cr)
return res
def uos_change(self, cr, uid, ids, product_uos, product_uos_qty=0, product_id=None):
product_obj = self.pool.get('product.product')
if not product_id:
return {'value': {'product_uom': product_uos,
'product_uom_qty': product_uos_qty}, 'domain': {}}
product = product_obj.browse(cr, uid, product_id)
value = {
'product_uom': product.uom_id.id,
}
# FIXME must depend on uos/uom of the product and not only of the coeff.
try:
value.update({
'product_uom_qty': product_uos_qty / product.uos_coeff,
'th_weight': product_uos_qty / product.uos_coeff * product.weight
})
except ZeroDivisionError:
pass
return {'value': value}
def create(self, cr, uid, values, context=None):
if values.get('order_id') and values.get('product_id') and any(f not in values for f in ['name', 'price_unit', 'product_uom_qty', 'product_uom']):
order = self.pool['sale.order'].read(cr, uid, values['order_id'], ['pricelist_id', 'partner_id', 'date_order', 'fiscal_position'], context=context)
defaults = self.product_id_change(cr, uid, [], order['pricelist_id'][0], values['product_id'],
qty=float(values.get('product_uom_qty', False)),
uom=values.get('product_uom', False),
qty_uos=float(values.get('product_uos_qty', False)),
uos=values.get('product_uos', False),
name=values.get('name', False),
partner_id=order['partner_id'][0],
date_order=order['date_order'],
fiscal_position=order['fiscal_position'][0] if order['fiscal_position'] else False,
flag=False, # Force name update
context=dict(context or {}, company_id=values.get('company_id'))
)['value']
if defaults.get('tax_id'):
defaults['tax_id'] = [[6, 0, defaults['tax_id']]]
values = dict(defaults, **values)
return super(sale_order_line, self).create(cr, uid, values, context=context)
def product_id_change(self, cr, uid, ids, pricelist, product, qty=0,
uom=False, qty_uos=0, uos=False, name='', partner_id=False,
lang=False, update_tax=True, date_order=False, packaging=False, fiscal_position=False, flag=False, context=None):
context = context or {}
lang = lang or context.get('lang', False)
if not partner_id:
raise osv.except_osv(_('No Customer Defined!'), _('Before choosing a product,\n select a customer in the sales form.'))
warning = False
product_uom_obj = self.pool.get('product.uom')
partner_obj = self.pool.get('res.partner')
product_obj = self.pool.get('product.product')
partner = partner_obj.browse(cr, uid, partner_id)
lang = partner.lang
context_partner = context.copy()
context_partner.update({'lang': lang, 'partner_id': partner_id})
if not product:
return {'value': {'th_weight': 0,
'product_uos_qty': qty}, 'domain': {'product_uom': [],
'product_uos': []}}
if not date_order:
date_order = time.strftime(DEFAULT_SERVER_DATE_FORMAT)
result = {}
warning_msgs = ''
product_obj = product_obj.browse(cr, uid, product, context=context_partner)
uom2 = False
if uom:
uom2 = product_uom_obj.browse(cr, uid, uom)
if product_obj.uom_id.category_id.id != uom2.category_id.id:
uom = False
if uos:
if product_obj.uos_id:
uos2 = product_uom_obj.browse(cr, uid, uos)
if product_obj.uos_id.category_id.id != uos2.category_id.id:
uos = False
else:
uos = False
fpos = False
if not fiscal_position:
fpos = partner.property_account_position or False
else:
fpos = self.pool.get('account.fiscal.position').browse(cr, uid, fiscal_position)
if update_tax: #The quantity only have changed
# The superuser is used by website_sale in order to create a sale order. We need to make
# sure we only select the taxes related to the company of the partner. This should only
# apply if the partner is linked to a company.
if uid == SUPERUSER_ID and context.get('company_id'):
taxes = product_obj.taxes_id.filtered(lambda r: r.company_id.id == context['company_id'])
else:
taxes = product_obj.taxes_id
result['tax_id'] = self.pool.get('account.fiscal.position').map_tax(cr, uid, fpos, taxes)
if not flag:
result['name'] = self.pool.get('product.product').name_get(cr, uid, [product_obj.id], context=context_partner)[0][1]
if product_obj.description_sale:
result['name'] += '\n'+product_obj.description_sale
domain = {}
if (not uom) and (not uos):
result['product_uom'] = product_obj.uom_id.id
if product_obj.uos_id:
result['product_uos'] = product_obj.uos_id.id
result['product_uos_qty'] = qty * product_obj.uos_coeff
uos_category_id = product_obj.uos_id.category_id.id
else:
result['product_uos'] = False
result['product_uos_qty'] = qty
uos_category_id = False
result['th_weight'] = qty * product_obj.weight
domain = {'product_uom':
[('category_id', '=', product_obj.uom_id.category_id.id)],
'product_uos':
[('category_id', '=', uos_category_id)]}
elif uos and not uom: # only happens if uom is False
result['product_uom'] = product_obj.uom_id and product_obj.uom_id.id
result['product_uom_qty'] = qty_uos / product_obj.uos_coeff
result['th_weight'] = result['product_uom_qty'] * product_obj.weight
elif uom: # whether uos is set or not
default_uom = product_obj.uom_id and product_obj.uom_id.id
q = product_uom_obj._compute_qty(cr, uid, uom, qty, default_uom)
if product_obj.uos_id:
result['product_uos'] = product_obj.uos_id.id
result['product_uos_qty'] = qty * product_obj.uos_coeff
else:
result['product_uos'] = False
result['product_uos_qty'] = qty
result['th_weight'] = q * product_obj.weight # Round the quantity up
if not uom2:
uom2 = product_obj.uom_id
# get unit price
if not pricelist:
warn_msg = _('You have to select a pricelist or a customer in the sales form !\n'
'Please set one before choosing a product.')
warning_msgs += _("No Pricelist ! : ") + warn_msg +"\n\n"
else:
ctx = dict(
context,
uom=uom or result.get('product_uom'),
date=date_order,
)
price = self.pool.get('product.pricelist').price_get(cr, uid, [pricelist],
product, qty or 1.0, partner_id, ctx)[pricelist]
if price is False:
warn_msg = _("Cannot find a pricelist line matching this product and quantity.\n"
"You have to change either the product, the quantity or the pricelist.")
warning_msgs += _("No valid pricelist line found ! :") + warn_msg +"\n\n"
else:
if update_tax:
price = self.pool['account.tax']._fix_tax_included_price(cr, uid, price, product_obj.taxes_id, result['tax_id'])
result.update({'price_unit': price})
if context.get('uom_qty_change', False):
values = {'price_unit': price}
if result.get('product_uos_qty'):
values['product_uos_qty'] = result['product_uos_qty']
return {'value': values, 'domain': {}, 'warning': False}
if warning_msgs:
warning = {
'title': _('Configuration Error!'),
'message' : warning_msgs
}
return {'value': result, 'domain': domain, 'warning': warning}
def product_uom_change(self, cursor, user, ids, pricelist, product, qty=0,
uom=False, qty_uos=0, uos=False, name='', partner_id=False,
lang=False, update_tax=True, date_order=False, context=None):
context = context or {}
lang = lang or ('lang' in context and context['lang'])
if not uom:
return {'value': {'price_unit': 0.0, 'product_uom' : uom or False}}
return self.product_id_change(cursor, user, ids, pricelist, product,
qty=qty, uom=uom, qty_uos=qty_uos, uos=uos, name=name,
partner_id=partner_id, lang=lang, update_tax=update_tax,
date_order=date_order, context=context)
def unlink(self, cr, uid, ids, context=None):
if context is None:
context = {}
"""Allows to delete sales order lines in draft,cancel states"""
for rec in self.browse(cr, uid, ids, context=context):
if rec.state not in ['draft', 'cancel']:
raise osv.except_osv(_('Invalid Action!'), _('Cannot delete a sales order line which is in state \'%s\'.') %(rec.state,))
return super(sale_order_line, self).unlink(cr, uid, ids, context=context)
class mail_compose_message(osv.Model):
_inherit = 'mail.compose.message'
def send_mail(self, cr, uid, ids, context=None):
context = context or {}
if context.get('default_model') == 'sale.order' and context.get('default_res_id') and context.get('mark_so_as_sent'):
context = dict(context, mail_post_autofollow=True)
self.pool.get('sale.order').signal_workflow(cr, uid, [context['default_res_id']], 'quotation_sent')
return super(mail_compose_message, self).send_mail(cr, uid, ids, context=context)
class account_invoice(osv.Model):
_inherit = 'account.invoice'
def _get_default_section_id(self, cr, uid, context=None):
""" Gives default section by checking if present in the context """
section_id = self._resolve_section_id_from_context(cr, uid, context=context) or False
if not section_id:
section_id = self.pool.get('res.users').browse(cr, uid, uid, context).default_section_id.id or False
return section_id
def _resolve_section_id_from_context(self, cr, uid, context=None):
""" Returns ID of section based on the value of 'section_id'
context key, or None if it cannot be resolved to a single
Sales Team.
"""
if context is None:
context = {}
if type(context.get('default_section_id')) in (int, long):
return context.get('default_section_id')
if isinstance(context.get('default_section_id'), basestring):
section_ids = self.pool.get('crm.case.section').name_search(cr, uid, name=context['default_section_id'], context=context)
if len(section_ids) == 1:
return int(section_ids[0][0])
return None
_columns = {
'section_id': fields.many2one('crm.case.section', 'Sales Team'),
}
_defaults = {
'section_id': lambda self, cr, uid, c=None: self._get_default_section_id(cr, uid, context=c)
}
def confirm_paid(self, cr, uid, ids, context=None):
sale_order_obj = self.pool.get('sale.order')
res = super(account_invoice, self).confirm_paid(cr, uid, ids, context=context)
so_ids = sale_order_obj.search(cr, uid, [('invoice_ids', 'in', ids)], context=context)
for so_id in so_ids:
sale_order_obj.message_post(cr, uid, so_id, body=_("Invoice paid"), context=context)
return res
def unlink(self, cr, uid, ids, context=None):
""" Overwrite unlink method of account invoice to send a trigger to the sale workflow upon invoice deletion """
invoice_ids = self.search(cr, uid, [('id', 'in', ids), ('state', 'in', ['draft', 'cancel'])], context=context)
#if we can't cancel all invoices, do nothing
if len(invoice_ids) == len(ids):
#Cancel invoice(s) first before deleting them so that if any sale order is associated with them
#it will trigger the workflow to put the sale order in an 'invoice exception' state
for id in ids:
workflow.trg_validate(uid, 'account.invoice', id, 'invoice_cancel', cr)
return super(account_invoice, self).unlink(cr, uid, ids, context=context)
class procurement_order(osv.osv):
_inherit = 'procurement.order'
_columns = {
'sale_line_id': fields.many2one('sale.order.line', string='Sale Order Line'),
}
def write(self, cr, uid, ids, vals, context=None):
if isinstance(ids, (int, long)):
ids = [ids]
res = super(procurement_order, self).write(cr, uid, ids, vals, context=context)
from openerp import workflow
if vals.get('state') in ['done', 'cancel', 'exception']:
for proc in self.browse(cr, uid, ids, context=context):
if proc.sale_line_id and proc.sale_line_id.order_id:
order_id = proc.sale_line_id.order_id.id
if self.pool.get('sale.order').test_procurements_done(cr, uid, [order_id], context=context):
workflow.trg_validate(uid, 'sale.order', order_id, 'ship_end', cr)
if self.pool.get('sale.order').test_procurements_except(cr, uid, [order_id], context=context):
workflow.trg_validate(uid, 'sale.order', order_id, 'ship_except', cr)
return res
class product_product(osv.Model):
_inherit = 'product.product'
def _sales_count(self, cr, uid, ids, field_name, arg, context=None):
r = dict.fromkeys(ids, 0)
domain = [
('state', 'in', ['confirmed', 'done']),
('product_id', 'in', ids),
]
for group in self.pool['sale.report'].read_group(cr, uid, domain, ['product_id', 'product_uom_qty'], ['product_id'], context=context):
r[group['product_id'][0]] = group['product_uom_qty']
return r
def action_view_sales(self, cr, uid, ids, context=None):
result = self.pool['ir.model.data'].xmlid_to_res_id(cr, uid, 'sale.action_order_line_product_tree', raise_if_not_found=True)
result = self.pool['ir.actions.act_window'].read(cr, uid, [result], context=context)[0]
result['domain'] = "[('product_id','in',[" + ','.join(map(str, ids)) + "])]"
return result
_columns = {
'sales_count': fields.function(_sales_count, string='# Sales', type='integer'),
}
class product_template(osv.Model):
_inherit = 'product.template'
def _sales_count(self, cr, uid, ids, field_name, arg, context=None):
res = dict.fromkeys(ids, 0)
for template in self.browse(cr, uid, ids, context=context):
res[template.id] = sum([p.sales_count for p in template.product_variant_ids])
return res
def action_view_sales(self, cr, uid, ids, context=None):
act_obj = self.pool.get('ir.actions.act_window')
mod_obj = self.pool.get('ir.model.data')
product_ids = []
for template in self.browse(cr, uid, ids, context=context):
product_ids += [x.id for x in template.product_variant_ids]
result = mod_obj.xmlid_to_res_id(cr, uid, 'sale.action_order_line_product_tree',raise_if_not_found=True)
result = act_obj.read(cr, uid, [result], context=context)[0]
result['domain'] = "[('product_id','in',[" + ','.join(map(str, product_ids)) + "])]"
return result
_columns = {
'sales_count': fields.function(_sales_count, string='# Sales', type='integer'),
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
jonnary/keystone | keystone/common/wsgi.py | 8 | 30934 | # Copyright 2012 OpenStack Foundation
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# Copyright 2010 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Utility methods for working with WSGI servers."""
import copy
import itertools
import wsgiref.util
from oslo_config import cfg
import oslo_i18n
from oslo_log import log
from oslo_serialization import jsonutils
from oslo_utils import importutils
from oslo_utils import strutils
import routes.middleware
import six
import webob.dec
import webob.exc
from keystone.common import dependency
from keystone.common import json_home
from keystone.common import utils
from keystone import exception
from keystone.i18n import _
from keystone.i18n import _LI
from keystone.i18n import _LW
from keystone.models import token_model
CONF = cfg.CONF
LOG = log.getLogger(__name__)
# Environment variable used to pass the request context
CONTEXT_ENV = 'openstack.context'
# Environment variable used to pass the request params
PARAMS_ENV = 'openstack.params'
JSON_ENCODE_CONTENT_TYPES = set(['application/json',
'application/json-home'])
def validate_token_bind(context, token_ref):
bind_mode = CONF.token.enforce_token_bind
if bind_mode == 'disabled':
return
if not isinstance(token_ref, token_model.KeystoneToken):
raise exception.UnexpectedError(_('token reference must be a '
'KeystoneToken type, got: %s') %
type(token_ref))
bind = token_ref.bind
# permissive and strict modes don't require there to be a bind
permissive = bind_mode in ('permissive', 'strict')
# get the named mode if bind_mode is not one of the known
name = None if permissive or bind_mode == 'required' else bind_mode
if not bind:
if permissive:
# no bind provided and none required
return
else:
LOG.info(_LI("No bind information present in token"))
raise exception.Unauthorized()
if name and name not in bind:
LOG.info(_LI("Named bind mode %s not in bind information"), name)
raise exception.Unauthorized()
for bind_type, identifier in bind.items():
if bind_type == 'kerberos':
if not (context['environment'].get('AUTH_TYPE', '').lower()
== 'negotiate'):
LOG.info(_LI("Kerberos credentials required and not present"))
raise exception.Unauthorized()
if not context['environment'].get('REMOTE_USER') == identifier:
LOG.info(_LI("Kerberos credentials do not match "
"those in bind"))
raise exception.Unauthorized()
LOG.info(_LI("Kerberos bind authentication successful"))
elif bind_mode == 'permissive':
LOG.debug(("Ignoring unknown bind for permissive mode: "
"{%(bind_type)s: %(identifier)s}"),
{'bind_type': bind_type, 'identifier': identifier})
else:
LOG.info(_LI("Couldn't verify unknown bind: "
"{%(bind_type)s: %(identifier)s}"),
{'bind_type': bind_type, 'identifier': identifier})
raise exception.Unauthorized()
def best_match_language(req):
"""Determines the best available locale from the Accept-Language
HTTP header passed in the request.
"""
if not req.accept_language:
return None
return req.accept_language.best_match(
oslo_i18n.get_available_languages('keystone'))
class BaseApplication(object):
"""Base WSGI application wrapper. Subclasses need to implement __call__."""
@classmethod
def factory(cls, global_config, **local_config):
"""Used for paste app factories in paste.deploy config files.
Any local configuration (that is, values under the [app:APPNAME]
section of the paste config) will be passed into the `__init__` method
as kwargs.
A hypothetical configuration would look like:
[app:wadl]
latest_version = 1.3
paste.app_factory = keystone.fancy_api:Wadl.factory
which would result in a call to the `Wadl` class as
import keystone.fancy_api
keystone.fancy_api.Wadl(latest_version='1.3')
You could of course re-implement the `factory` method in subclasses,
but using the kwarg passing it shouldn't be necessary.
"""
return cls(**local_config)
def __call__(self, environ, start_response):
r"""Subclasses will probably want to implement __call__ like this:
@webob.dec.wsgify()
def __call__(self, req):
# Any of the following objects work as responses:
# Option 1: simple string
res = 'message\n'
# Option 2: a nicely formatted HTTP exception page
res = exc.HTTPForbidden(explanation='Nice try')
# Option 3: a webob Response object (in case you need to play with
# headers, or you want to be treated like an iterable, or or or)
res = Response();
res.app_iter = open('somefile')
# Option 4: any wsgi app to be run next
res = self.application
# Option 5: you can get a Response object for a wsgi app, too, to
# play with headers etc
res = req.get_response(self.application)
# You can then just return your response...
return res
# ... or set req.response and return None.
req.response = res
See the end of http://pythonpaste.org/webob/modules/dec.html
for more info.
"""
raise NotImplementedError('You must implement __call__')
@dependency.requires('assignment_api', 'policy_api', 'token_provider_api')
class Application(BaseApplication):
@webob.dec.wsgify()
def __call__(self, req):
arg_dict = req.environ['wsgiorg.routing_args'][1]
action = arg_dict.pop('action')
del arg_dict['controller']
# allow middleware up the stack to provide context, params and headers.
context = req.environ.get(CONTEXT_ENV, {})
try:
context['query_string'] = dict(req.params.items())
except UnicodeDecodeError as e:
# The webob package throws UnicodeError when a request cannot be
# decoded. Raise ValidationError instead to avoid an UnknownError.
msg = _('Query string is not UTF-8 encoded')
raise exception.ValidationError(msg)
context['headers'] = dict(req.headers.items())
context['path'] = req.environ['PATH_INFO']
scheme = (None if not CONF.secure_proxy_ssl_header
else req.environ.get(CONF.secure_proxy_ssl_header))
if scheme:
# NOTE(andrey-mp): "wsgi.url_scheme" contains the protocol used
# before the proxy removed it ('https' usually). So if
# the webob.Request instance is modified in order to use this
# scheme instead of the one defined by API, the call to
# webob.Request.relative_url() will return a URL with the correct
# scheme.
req.environ['wsgi.url_scheme'] = scheme
context['host_url'] = req.host_url
params = req.environ.get(PARAMS_ENV, {})
# authentication and authorization attributes are set as environment
# values by the container and processed by the pipeline. The complete
# set is not yet known.
context['environment'] = req.environ
context['accept_header'] = req.accept
req.environ = None
params.update(arg_dict)
context.setdefault('is_admin', False)
# TODO(termie): do some basic normalization on methods
method = getattr(self, action)
# NOTE(morganfainberg): use the request method to normalize the
# response code between GET and HEAD requests. The HTTP status should
# be the same.
LOG.info('%(req_method)s %(uri)s', {
'req_method': req.environ['REQUEST_METHOD'].upper(),
'uri': wsgiref.util.request_uri(req.environ),
})
params = self._normalize_dict(params)
try:
result = method(context, **params)
except exception.Unauthorized as e:
LOG.warning(
_LW("Authorization failed. %(exception)s from "
"%(remote_addr)s"),
{'exception': e, 'remote_addr': req.environ['REMOTE_ADDR']})
return render_exception(e, context=context,
user_locale=best_match_language(req))
except exception.Error as e:
LOG.warning(six.text_type(e))
return render_exception(e, context=context,
user_locale=best_match_language(req))
except TypeError as e:
LOG.exception(six.text_type(e))
return render_exception(exception.ValidationError(e),
context=context,
user_locale=best_match_language(req))
except Exception as e:
LOG.exception(six.text_type(e))
return render_exception(exception.UnexpectedError(exception=e),
context=context,
user_locale=best_match_language(req))
if result is None:
return render_response(status=(204, 'No Content'))
elif isinstance(result, six.string_types):
return result
elif isinstance(result, webob.Response):
return result
elif isinstance(result, webob.exc.WSGIHTTPException):
return result
response_code = self._get_response_code(req)
return render_response(body=result, status=response_code,
method=req.environ['REQUEST_METHOD'])
def _get_response_code(self, req):
req_method = req.environ['REQUEST_METHOD']
controller = importutils.import_class('keystone.common.controller')
code = None
if isinstance(self, controller.V3Controller) and req_method == 'POST':
code = (201, 'Created')
return code
def _normalize_arg(self, arg):
return arg.replace(':', '_').replace('-', '_')
def _normalize_dict(self, d):
return {self._normalize_arg(k): v for (k, v) in d.items()}
def assert_admin(self, context):
"""Ensure the user is an admin.
:raises keystone.exception.Unauthorized: if a token could not be
found/authorized, a user is invalid, or a tenant is
invalid/not scoped.
:raises keystone.exception.Forbidden: if the user is not an admin and
does not have the admin role
"""
if not context['is_admin']:
user_token_ref = utils.get_token_ref(context)
validate_token_bind(context, user_token_ref)
creds = copy.deepcopy(user_token_ref.metadata)
try:
creds['user_id'] = user_token_ref.user_id
except exception.UnexpectedError:
LOG.debug('Invalid user')
raise exception.Unauthorized()
if user_token_ref.project_scoped:
creds['tenant_id'] = user_token_ref.project_id
else:
LOG.debug('Invalid tenant')
raise exception.Unauthorized()
creds['roles'] = user_token_ref.role_names
# Accept either is_admin or the admin role
self.policy_api.enforce(creds, 'admin_required', {})
def _attribute_is_empty(self, ref, attribute):
"""Returns true if the attribute in the given ref (which is a
dict) is empty or None.
"""
return ref.get(attribute) is None or ref.get(attribute) == ''
def _require_attribute(self, ref, attribute):
"""Ensures the reference contains the specified attribute.
Raise a ValidationError if the given attribute is not present
"""
if self._attribute_is_empty(ref, attribute):
msg = _('%s field is required and cannot be empty') % attribute
raise exception.ValidationError(message=msg)
def _require_attributes(self, ref, attrs):
"""Ensures the reference contains the specified attributes.
Raise a ValidationError if any of the given attributes is not present
"""
missing_attrs = [attribute for attribute in attrs
if self._attribute_is_empty(ref, attribute)]
if missing_attrs:
msg = _('%s field(s) cannot be empty') % ', '.join(missing_attrs)
raise exception.ValidationError(message=msg)
def _get_trust_id_for_request(self, context):
"""Get the trust_id for a call.
Retrieve the trust_id from the token
Returns None if token is not trust scoped
"""
if ('token_id' not in context or
context.get('token_id') == CONF.admin_token):
LOG.debug(('will not lookup trust as the request auth token is '
'either absent or it is the system admin token'))
return None
token_ref = utils.get_token_ref(context)
return token_ref.trust_id
@classmethod
def base_url(cls, context, endpoint_type):
url = CONF['%s_endpoint' % endpoint_type]
if url:
substitutions = dict(
itertools.chain(CONF.items(), CONF.eventlet_server.items()))
url = url % substitutions
else:
# NOTE(jamielennox): if url is not set via the config file we
# should set it relative to the url that the user used to get here
# so as not to mess with version discovery. This is not perfect.
# host_url omits the path prefix, but there isn't another good
# solution that will work for all urls.
url = context['host_url']
return url.rstrip('/')
class Middleware(Application):
"""Base WSGI middleware.
These classes require an application to be
initialized that will be called next. By default the middleware will
simply call its wrapped app, or you can override __call__ to customize its
behavior.
"""
@classmethod
def factory(cls, global_config, **local_config):
"""Used for paste app factories in paste.deploy config files.
Any local configuration (that is, values under the [filter:APPNAME]
section of the paste config) will be passed into the `__init__` method
as kwargs.
A hypothetical configuration would look like:
[filter:analytics]
redis_host = 127.0.0.1
paste.filter_factory = keystone.analytics:Analytics.factory
which would result in a call to the `Analytics` class as
import keystone.analytics
keystone.analytics.Analytics(app, redis_host='127.0.0.1')
You could of course re-implement the `factory` method in subclasses,
but using the kwarg passing it shouldn't be necessary.
"""
def _factory(app):
conf = global_config.copy()
conf.update(local_config)
return cls(app, **local_config)
return _factory
def __init__(self, application):
super(Middleware, self).__init__()
self.application = application
def process_request(self, request):
"""Called on each request.
If this returns None, the next application down the stack will be
executed. If it returns a response then that response will be returned
and execution will stop here.
"""
return None
def process_response(self, request, response):
"""Do whatever you'd like to the response, based on the request."""
return response
@webob.dec.wsgify()
def __call__(self, request):
try:
response = self.process_request(request)
if response:
return response
response = request.get_response(self.application)
return self.process_response(request, response)
except exception.Error as e:
LOG.warning(six.text_type(e))
return render_exception(e, request=request,
user_locale=best_match_language(request))
except TypeError as e:
LOG.exception(six.text_type(e))
return render_exception(exception.ValidationError(e),
request=request,
user_locale=best_match_language(request))
except Exception as e:
LOG.exception(six.text_type(e))
return render_exception(exception.UnexpectedError(exception=e),
request=request,
user_locale=best_match_language(request))
class Debug(Middleware):
"""Helper class for debugging a WSGI application.
Can be inserted into any WSGI application chain to get information
about the request and response.
"""
@webob.dec.wsgify()
def __call__(self, req):
if not hasattr(LOG, 'isEnabledFor') or LOG.isEnabledFor(LOG.debug):
LOG.debug('%s %s %s', ('*' * 20), 'REQUEST ENVIRON', ('*' * 20))
for key, value in req.environ.items():
LOG.debug('%s = %s', key,
strutils.mask_password(value))
LOG.debug('')
LOG.debug('%s %s %s', ('*' * 20), 'REQUEST BODY', ('*' * 20))
for line in req.body_file:
LOG.debug('%s', strutils.mask_password(line))
LOG.debug('')
resp = req.get_response(self.application)
if not hasattr(LOG, 'isEnabledFor') or LOG.isEnabledFor(LOG.debug):
LOG.debug('%s %s %s', ('*' * 20), 'RESPONSE HEADERS', ('*' * 20))
for (key, value) in resp.headers.items():
LOG.debug('%s = %s', key, value)
LOG.debug('')
resp.app_iter = self.print_generator(resp.app_iter)
return resp
@staticmethod
def print_generator(app_iter):
"""Iterator that prints the contents of a wrapper string."""
LOG.debug('%s %s %s', ('*' * 20), 'RESPONSE BODY', ('*' * 20))
for part in app_iter:
LOG.debug(part)
yield part
class Router(object):
"""WSGI middleware that maps incoming requests to WSGI apps."""
def __init__(self, mapper):
"""Create a router for the given routes.Mapper.
Each route in `mapper` must specify a 'controller', which is a
WSGI app to call. You'll probably want to specify an 'action' as
well and have your controller be an object that can route
the request to the action-specific method.
Examples:
mapper = routes.Mapper()
sc = ServerController()
# Explicit mapping of one route to a controller+action
mapper.connect(None, '/svrlist', controller=sc, action='list')
# Actions are all implicitly defined
mapper.resource('server', 'servers', controller=sc)
# Pointing to an arbitrary WSGI app. You can specify the
# {path_info:.*} parameter so the target app can be handed just that
# section of the URL.
mapper.connect(None, '/v1.0/{path_info:.*}', controller=BlogApp())
"""
self.map = mapper
self._router = routes.middleware.RoutesMiddleware(self._dispatch,
self.map)
@webob.dec.wsgify()
def __call__(self, req):
"""Route the incoming request to a controller based on self.map.
If no match, return a 404.
"""
return self._router
@staticmethod
@webob.dec.wsgify()
def _dispatch(req):
"""Dispatch the request to the appropriate controller.
Called by self._router after matching the incoming request to a route
and putting the information into req.environ. Either returns 404
or the routed WSGI app's response.
"""
match = req.environ['wsgiorg.routing_args'][1]
if not match:
msg = _('The resource could not be found.')
return render_exception(exception.NotFound(msg),
request=req,
user_locale=best_match_language(req))
app = match['controller']
return app
class ComposingRouter(Router):
def __init__(self, mapper=None, routers=None):
if mapper is None:
mapper = routes.Mapper()
if routers is None:
routers = []
for router in routers:
router.add_routes(mapper)
super(ComposingRouter, self).__init__(mapper)
class ComposableRouter(Router):
"""Router that supports use by ComposingRouter."""
def __init__(self, mapper=None):
if mapper is None:
mapper = routes.Mapper()
self.add_routes(mapper)
super(ComposableRouter, self).__init__(mapper)
def add_routes(self, mapper):
"""Add routes to given mapper."""
pass
class ExtensionRouter(Router):
"""A router that allows extensions to supplement or overwrite routes.
Expects to be subclassed.
"""
def __init__(self, application, mapper=None):
if mapper is None:
mapper = routes.Mapper()
self.application = application
self.add_routes(mapper)
mapper.connect('/{path_info:.*}', controller=self.application)
super(ExtensionRouter, self).__init__(mapper)
def add_routes(self, mapper):
pass
@classmethod
def factory(cls, global_config, **local_config):
"""Used for paste app factories in paste.deploy config files.
Any local configuration (that is, values under the [filter:APPNAME]
section of the paste config) will be passed into the `__init__` method
as kwargs.
A hypothetical configuration would look like:
[filter:analytics]
redis_host = 127.0.0.1
paste.filter_factory = keystone.analytics:Analytics.factory
which would result in a call to the `Analytics` class as
import keystone.analytics
keystone.analytics.Analytics(app, redis_host='127.0.0.1')
You could of course re-implement the `factory` method in subclasses,
but using the kwarg passing it shouldn't be necessary.
"""
def _factory(app):
conf = global_config.copy()
conf.update(local_config)
return cls(app, **local_config)
return _factory
class RoutersBase(object):
"""Base class for Routers."""
def __init__(self):
self.v3_resources = []
def append_v3_routers(self, mapper, routers):
"""Append v3 routers.
Subclasses should override this method to map its routes.
Use self._add_resource() to map routes for a resource.
"""
def _add_resource(self, mapper, controller, path, rel,
get_action=None, head_action=None, get_head_action=None,
put_action=None, post_action=None, patch_action=None,
delete_action=None, get_post_action=None,
path_vars=None, status=json_home.Status.STABLE,
new_path=None):
if get_head_action:
getattr(controller, get_head_action) # ensure the attribute exists
mapper.connect(path, controller=controller, action=get_head_action,
conditions=dict(method=['GET', 'HEAD']))
if get_action:
getattr(controller, get_action) # ensure the attribute exists
mapper.connect(path, controller=controller, action=get_action,
conditions=dict(method=['GET']))
if head_action:
getattr(controller, head_action) # ensure the attribute exists
mapper.connect(path, controller=controller, action=head_action,
conditions=dict(method=['HEAD']))
if put_action:
getattr(controller, put_action) # ensure the attribute exists
mapper.connect(path, controller=controller, action=put_action,
conditions=dict(method=['PUT']))
if post_action:
getattr(controller, post_action) # ensure the attribute exists
mapper.connect(path, controller=controller, action=post_action,
conditions=dict(method=['POST']))
if patch_action:
getattr(controller, patch_action) # ensure the attribute exists
mapper.connect(path, controller=controller, action=patch_action,
conditions=dict(method=['PATCH']))
if delete_action:
getattr(controller, delete_action) # ensure the attribute exists
mapper.connect(path, controller=controller, action=delete_action,
conditions=dict(method=['DELETE']))
if get_post_action:
getattr(controller, get_post_action) # ensure the attribute exists
mapper.connect(path, controller=controller, action=get_post_action,
conditions=dict(method=['GET', 'POST']))
resource_data = dict()
if path_vars:
resource_data['href-template'] = new_path or path
resource_data['href-vars'] = path_vars
else:
resource_data['href'] = new_path or path
json_home.Status.update_resource_data(resource_data, status)
self.v3_resources.append((rel, resource_data))
class V3ExtensionRouter(ExtensionRouter, RoutersBase):
"""Base class for V3 extension router."""
def __init__(self, application, mapper=None):
self.v3_resources = list()
super(V3ExtensionRouter, self).__init__(application, mapper)
def _update_version_response(self, response_data):
response_data['resources'].update(self.v3_resources)
@webob.dec.wsgify()
def __call__(self, request):
if request.path_info != '/':
# Not a request for version info so forward to super.
return super(V3ExtensionRouter, self).__call__(request)
response = request.get_response(self.application)
if response.status_code != 200:
# The request failed, so don't update the response.
return response
if response.headers['Content-Type'] != 'application/json-home':
# Not a request for JSON Home document, so don't update the
# response.
return response
response_data = jsonutils.loads(response.body)
self._update_version_response(response_data)
response.body = jsonutils.dumps(response_data,
cls=utils.SmarterEncoder)
return response
def render_response(body=None, status=None, headers=None, method=None):
"""Forms a WSGI response."""
if headers is None:
headers = []
else:
headers = list(headers)
headers.append(('Vary', 'X-Auth-Token'))
if body is None:
body = ''
status = status or (204, 'No Content')
else:
content_types = [v for h, v in headers if h == 'Content-Type']
if content_types:
content_type = content_types[0]
else:
content_type = None
if content_type is None or content_type in JSON_ENCODE_CONTENT_TYPES:
body = jsonutils.dumps(body, cls=utils.SmarterEncoder)
if content_type is None:
headers.append(('Content-Type', 'application/json'))
status = status or (200, 'OK')
resp = webob.Response(body=body,
status='%s %s' % status,
headerlist=headers)
if method and method.upper() == 'HEAD':
# NOTE(morganfainberg): HEAD requests should return the same status
# as a GET request and same headers (including content-type and
# content-length). The webob.Response object automatically changes
# content-length (and other headers) if the body is set to b''. Capture
# all headers and reset them on the response object after clearing the
# body. The body can only be set to a binary-type (not TextType or
# NoneType), so b'' is used here and should be compatible with
# both py2x and py3x.
stored_headers = resp.headers.copy()
resp.body = b''
for header, value in stored_headers.items():
resp.headers[header] = value
return resp
def render_exception(error, context=None, request=None, user_locale=None):
"""Forms a WSGI response based on the current error."""
error_message = error.args[0]
message = oslo_i18n.translate(error_message, desired_locale=user_locale)
if message is error_message:
# translate() didn't do anything because it wasn't a Message,
# convert to a string.
message = six.text_type(message)
body = {'error': {
'code': error.code,
'title': error.title,
'message': message,
}}
headers = []
if isinstance(error, exception.AuthPluginException):
body['error']['identity'] = error.authentication
elif isinstance(error, exception.Unauthorized):
url = CONF.public_endpoint
if not url:
if request:
context = {'host_url': request.host_url}
if context:
url = Application.base_url(context, 'public')
else:
url = 'http://localhost:%d' % CONF.eventlet_server.public_port
else:
substitutions = dict(
itertools.chain(CONF.items(), CONF.eventlet_server.items()))
url = url % substitutions
headers.append(('WWW-Authenticate', 'Keystone uri="%s"' % url))
return render_response(status=(error.code, error.title),
body=body,
headers=headers)
| apache-2.0 |
J-Liu/qemu | tests/migration/guestperf/plot.py | 14 | 19005 | #
# Migration test graph plotting
#
# Copyright (c) 2016 Red Hat, Inc.
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, see <http://www.gnu.org/licenses/>.
#
import sys
class Plot(object):
# Generated using
# http://tools.medialab.sciences-po.fr/iwanthue/
COLORS = ["#CD54D0",
"#79D94C",
"#7470CD",
"#D2D251",
"#863D79",
"#76DDA6",
"#D4467B",
"#61923D",
"#CB9CCA",
"#D98F36",
"#8CC8DA",
"#CE4831",
"#5E7693",
"#9B803F",
"#412F4C",
"#CECBA6",
"#6D3229",
"#598B73",
"#C8827C",
"#394427"]
def __init__(self,
reports,
migration_iters,
total_guest_cpu,
split_guest_cpu,
qemu_cpu,
vcpu_cpu):
self._reports = reports
self._migration_iters = migration_iters
self._total_guest_cpu = total_guest_cpu
self._split_guest_cpu = split_guest_cpu
self._qemu_cpu = qemu_cpu
self._vcpu_cpu = vcpu_cpu
self._color_idx = 0
def _next_color(self):
color = self.COLORS[self._color_idx]
self._color_idx += 1
if self._color_idx >= len(self.COLORS):
self._color_idx = 0
return color
def _get_progress_label(self, progress):
if progress:
return "\n\n" + "\n".join(
["Status: %s" % progress._status,
"Iteration: %d" % progress._ram._iterations,
"Throttle: %02d%%" % progress._throttle_pcent,
"Dirty rate: %dMB/s" % (progress._ram._dirty_rate_pps * 4 / 1024.0)])
else:
return "\n\n" + "\n".join(
["Status: %s" % "none",
"Iteration: %d" % 0])
def _find_start_time(self, report):
startqemu = report._qemu_timings._records[0]._timestamp
startguest = report._guest_timings._records[0]._timestamp
if startqemu < startguest:
return startqemu
else:
return stasrtguest
def _get_guest_max_value(self, report):
maxvalue = 0
for record in report._guest_timings._records:
if record._value > maxvalue:
maxvalue = record._value
return maxvalue
def _get_qemu_max_value(self, report):
maxvalue = 0
oldvalue = None
oldtime = None
for record in report._qemu_timings._records:
if oldvalue is not None:
cpudelta = (record._value - oldvalue) / 1000.0
timedelta = record._timestamp - oldtime
if timedelta == 0:
continue
util = cpudelta / timedelta * 100.0
else:
util = 0
oldvalue = record._value
oldtime = record._timestamp
if util > maxvalue:
maxvalue = util
return maxvalue
def _get_total_guest_cpu_graph(self, report, starttime):
xaxis = []
yaxis = []
labels = []
progress_idx = -1
for record in report._guest_timings._records:
while ((progress_idx + 1) < len(report._progress_history) and
report._progress_history[progress_idx + 1]._now < record._timestamp):
progress_idx = progress_idx + 1
if progress_idx >= 0:
progress = report._progress_history[progress_idx]
else:
progress = None
xaxis.append(record._timestamp - starttime)
yaxis.append(record._value)
labels.append(self._get_progress_label(progress))
from plotly import graph_objs as go
return go.Scatter(x=xaxis,
y=yaxis,
name="Guest PIDs: %s" % report._scenario._name,
mode='lines',
line={
"dash": "solid",
"color": self._next_color(),
"shape": "linear",
"width": 1
},
text=labels)
def _get_split_guest_cpu_graphs(self, report, starttime):
threads = {}
for record in report._guest_timings._records:
if record._tid in threads:
continue
threads[record._tid] = {
"xaxis": [],
"yaxis": [],
"labels": [],
}
progress_idx = -1
for record in report._guest_timings._records:
while ((progress_idx + 1) < len(report._progress_history) and
report._progress_history[progress_idx + 1]._now < record._timestamp):
progress_idx = progress_idx + 1
if progress_idx >= 0:
progress = report._progress_history[progress_idx]
else:
progress = None
threads[record._tid]["xaxis"].append(record._timestamp - starttime)
threads[record._tid]["yaxis"].append(record._value)
threads[record._tid]["labels"].append(self._get_progress_label(progress))
graphs = []
from plotly import graph_objs as go
for tid in threads.keys():
graphs.append(
go.Scatter(x=threads[tid]["xaxis"],
y=threads[tid]["yaxis"],
name="PID %s: %s" % (tid, report._scenario._name),
mode="lines",
line={
"dash": "solid",
"color": self._next_color(),
"shape": "linear",
"width": 1
},
text=threads[tid]["labels"]))
return graphs
def _get_migration_iters_graph(self, report, starttime):
xaxis = []
yaxis = []
labels = []
for progress in report._progress_history:
xaxis.append(progress._now - starttime)
yaxis.append(0)
labels.append(self._get_progress_label(progress))
from plotly import graph_objs as go
return go.Scatter(x=xaxis,
y=yaxis,
text=labels,
name="Migration iterations",
mode="markers",
marker={
"color": self._next_color(),
"symbol": "star",
"size": 5
})
def _get_qemu_cpu_graph(self, report, starttime):
xaxis = []
yaxis = []
labels = []
progress_idx = -1
first = report._qemu_timings._records[0]
abstimestamps = [first._timestamp]
absvalues = [first._value]
for record in report._qemu_timings._records[1:]:
while ((progress_idx + 1) < len(report._progress_history) and
report._progress_history[progress_idx + 1]._now < record._timestamp):
progress_idx = progress_idx + 1
if progress_idx >= 0:
progress = report._progress_history[progress_idx]
else:
progress = None
oldvalue = absvalues[-1]
oldtime = abstimestamps[-1]
cpudelta = (record._value - oldvalue) / 1000.0
timedelta = record._timestamp - oldtime
if timedelta == 0:
continue
util = cpudelta / timedelta * 100.0
abstimestamps.append(record._timestamp)
absvalues.append(record._value)
xaxis.append(record._timestamp - starttime)
yaxis.append(util)
labels.append(self._get_progress_label(progress))
from plotly import graph_objs as go
return go.Scatter(x=xaxis,
y=yaxis,
yaxis="y2",
name="QEMU: %s" % report._scenario._name,
mode='lines',
line={
"dash": "solid",
"color": self._next_color(),
"shape": "linear",
"width": 1
},
text=labels)
def _get_vcpu_cpu_graphs(self, report, starttime):
threads = {}
for record in report._vcpu_timings._records:
if record._tid in threads:
continue
threads[record._tid] = {
"xaxis": [],
"yaxis": [],
"labels": [],
"absvalue": [record._value],
"abstime": [record._timestamp],
}
progress_idx = -1
for record in report._vcpu_timings._records:
while ((progress_idx + 1) < len(report._progress_history) and
report._progress_history[progress_idx + 1]._now < record._timestamp):
progress_idx = progress_idx + 1
if progress_idx >= 0:
progress = report._progress_history[progress_idx]
else:
progress = None
oldvalue = threads[record._tid]["absvalue"][-1]
oldtime = threads[record._tid]["abstime"][-1]
cpudelta = (record._value - oldvalue) / 1000.0
timedelta = record._timestamp - oldtime
if timedelta == 0:
continue
util = cpudelta / timedelta * 100.0
if util > 100:
util = 100
threads[record._tid]["absvalue"].append(record._value)
threads[record._tid]["abstime"].append(record._timestamp)
threads[record._tid]["xaxis"].append(record._timestamp - starttime)
threads[record._tid]["yaxis"].append(util)
threads[record._tid]["labels"].append(self._get_progress_label(progress))
graphs = []
from plotly import graph_objs as go
for tid in threads.keys():
graphs.append(
go.Scatter(x=threads[tid]["xaxis"],
y=threads[tid]["yaxis"],
yaxis="y2",
name="VCPU %s: %s" % (tid, report._scenario._name),
mode="lines",
line={
"dash": "solid",
"color": self._next_color(),
"shape": "linear",
"width": 1
},
text=threads[tid]["labels"]))
return graphs
def _generate_chart_report(self, report):
graphs = []
starttime = self._find_start_time(report)
if self._total_guest_cpu:
graphs.append(self._get_total_guest_cpu_graph(report, starttime))
if self._split_guest_cpu:
graphs.extend(self._get_split_guest_cpu_graphs(report, starttime))
if self._qemu_cpu:
graphs.append(self._get_qemu_cpu_graph(report, starttime))
if self._vcpu_cpu:
graphs.extend(self._get_vcpu_cpu_graphs(report, starttime))
if self._migration_iters:
graphs.append(self._get_migration_iters_graph(report, starttime))
return graphs
def _generate_annotation(self, starttime, progress):
return {
"text": progress._status,
"x": progress._now - starttime,
"y": 10,
}
def _generate_annotations(self, report):
starttime = self._find_start_time(report)
annotations = {}
started = False
for progress in report._progress_history:
if progress._status == "setup":
continue
if progress._status not in annotations:
annotations[progress._status] = self._generate_annotation(starttime, progress)
return annotations.values()
def _generate_chart(self):
from plotly.offline import plot
from plotly import graph_objs as go
graphs = []
yaxismax = 0
yaxismax2 = 0
for report in self._reports:
graphs.extend(self._generate_chart_report(report))
maxvalue = self._get_guest_max_value(report)
if maxvalue > yaxismax:
yaxismax = maxvalue
maxvalue = self._get_qemu_max_value(report)
if maxvalue > yaxismax2:
yaxismax2 = maxvalue
yaxismax += 100
if not self._qemu_cpu:
yaxismax2 = 110
yaxismax2 += 10
annotations = []
if self._migration_iters:
for report in self._reports:
annotations.extend(self._generate_annotations(report))
layout = go.Layout(title="Migration comparison",
xaxis={
"title": "Wallclock time (secs)",
"showgrid": False,
},
yaxis={
"title": "Memory update speed (ms/GB)",
"showgrid": False,
"range": [0, yaxismax],
},
yaxis2={
"title": "Hostutilization (%)",
"overlaying": "y",
"side": "right",
"range": [0, yaxismax2],
"showgrid": False,
},
annotations=annotations)
figure = go.Figure(data=graphs, layout=layout)
return plot(figure,
show_link=False,
include_plotlyjs=False,
output_type="div")
def _generate_report(self):
pieces = []
for report in self._reports:
pieces.append("""
<h3>Report %s</h3>
<table>
""" % report._scenario._name)
pieces.append("""
<tr class="subhead">
<th colspan="2">Test config</th>
</tr>
<tr>
<th>Emulator:</th>
<td>%s</td>
</tr>
<tr>
<th>Kernel:</th>
<td>%s</td>
</tr>
<tr>
<th>Ramdisk:</th>
<td>%s</td>
</tr>
<tr>
<th>Transport:</th>
<td>%s</td>
</tr>
<tr>
<th>Host:</th>
<td>%s</td>
</tr>
""" % (report._binary, report._kernel,
report._initrd, report._transport, report._dst_host))
hardware = report._hardware
pieces.append("""
<tr class="subhead">
<th colspan="2">Hardware config</th>
</tr>
<tr>
<th>CPUs:</th>
<td>%d</td>
</tr>
<tr>
<th>RAM:</th>
<td>%d GB</td>
</tr>
<tr>
<th>Source CPU bind:</th>
<td>%s</td>
</tr>
<tr>
<th>Source RAM bind:</th>
<td>%s</td>
</tr>
<tr>
<th>Dest CPU bind:</th>
<td>%s</td>
</tr>
<tr>
<th>Dest RAM bind:</th>
<td>%s</td>
</tr>
<tr>
<th>Preallocate RAM:</th>
<td>%s</td>
</tr>
<tr>
<th>Locked RAM:</th>
<td>%s</td>
</tr>
<tr>
<th>Huge pages:</th>
<td>%s</td>
</tr>
""" % (hardware._cpus, hardware._mem,
",".join(hardware._src_cpu_bind),
",".join(hardware._src_mem_bind),
",".join(hardware._dst_cpu_bind),
",".join(hardware._dst_mem_bind),
"yes" if hardware._prealloc_pages else "no",
"yes" if hardware._locked_pages else "no",
"yes" if hardware._huge_pages else "no"))
scenario = report._scenario
pieces.append("""
<tr class="subhead">
<th colspan="2">Scenario config</th>
</tr>
<tr>
<th>Max downtime:</th>
<td>%d milli-sec</td>
</tr>
<tr>
<th>Max bandwidth:</th>
<td>%d MB/sec</td>
</tr>
<tr>
<th>Max iters:</th>
<td>%d</td>
</tr>
<tr>
<th>Max time:</th>
<td>%d secs</td>
</tr>
<tr>
<th>Pause:</th>
<td>%s</td>
</tr>
<tr>
<th>Pause iters:</th>
<td>%d</td>
</tr>
<tr>
<th>Post-copy:</th>
<td>%s</td>
</tr>
<tr>
<th>Post-copy iters:</th>
<td>%d</td>
</tr>
<tr>
<th>Auto-converge:</th>
<td>%s</td>
</tr>
<tr>
<th>Auto-converge iters:</th>
<td>%d</td>
</tr>
<tr>
<th>MT compression:</th>
<td>%s</td>
</tr>
<tr>
<th>MT compression threads:</th>
<td>%d</td>
</tr>
<tr>
<th>XBZRLE compression:</th>
<td>%s</td>
</tr>
<tr>
<th>XBZRLE compression cache:</th>
<td>%d%% of RAM</td>
</tr>
""" % (scenario._downtime, scenario._bandwidth,
scenario._max_iters, scenario._max_time,
"yes" if scenario._pause else "no", scenario._pause_iters,
"yes" if scenario._post_copy else "no", scenario._post_copy_iters,
"yes" if scenario._auto_converge else "no", scenario._auto_converge_step,
"yes" if scenario._compression_mt else "no", scenario._compression_mt_threads,
"yes" if scenario._compression_xbzrle else "no", scenario._compression_xbzrle_cache))
pieces.append("""
</table>
""")
return "\n".join(pieces)
def _generate_style(self):
return """
#report table tr th {
text-align: right;
}
#report table tr td {
text-align: left;
}
#report table tr.subhead th {
background: rgb(192, 192, 192);
text-align: center;
}
"""
def generate_html(self, fh):
print >>fh, """<html>
<head>
<script type="text/javascript" src="plotly.min.js">
</script>
<style type="text/css">
%s
</style>
<title>Migration report</title>
</head>
<body>
<h1>Migration report</h1>
<h2>Chart summary</h2>
<div id="chart">
""" % self._generate_style()
print >>fh, self._generate_chart()
print >>fh, """
</div>
<h2>Report details</h2>
<div id="report">
"""
print >>fh, self._generate_report()
print >>fh, """
</div>
</body>
</html>
"""
def generate(self, filename):
if filename is None:
self.generate_html(sys.stdout)
else:
with open(filename, "w") as fh:
self.generate_html(fh)
| gpl-2.0 |
domenicosolazzo/practice-django | venv/lib/python2.7/site-packages/django/contrib/auth/tests/custom_user.py | 185 | 5668 | from django.db import models
from django.contrib.auth.models import (
BaseUserManager,
AbstractBaseUser,
AbstractUser,
UserManager,
PermissionsMixin,
Group,
Permission,
)
# The custom User uses email as the unique identifier, and requires
# that every user provide a date of birth. This lets us test
# changes in username datatype, and non-text required fields.
class CustomUserManager(BaseUserManager):
def create_user(self, email, date_of_birth, password=None):
"""
Creates and saves a User with the given email and password.
"""
if not email:
raise ValueError('Users must have an email address')
user = self.model(
email=self.normalize_email(email),
date_of_birth=date_of_birth,
)
user.set_password(password)
user.save(using=self._db)
return user
def create_superuser(self, email, password, date_of_birth):
u = self.create_user(email, password=password, date_of_birth=date_of_birth)
u.is_admin = True
u.save(using=self._db)
return u
class CustomUser(AbstractBaseUser):
email = models.EmailField(verbose_name='email address', max_length=255, unique=True)
is_active = models.BooleanField(default=True)
is_admin = models.BooleanField(default=False)
date_of_birth = models.DateField()
custom_objects = CustomUserManager()
USERNAME_FIELD = 'email'
REQUIRED_FIELDS = ['date_of_birth']
class Meta:
app_label = 'auth'
def get_full_name(self):
return self.email
def get_short_name(self):
return self.email
def __unicode__(self):
return self.email
# Maybe required?
def get_group_permissions(self, obj=None):
return set()
def get_all_permissions(self, obj=None):
return set()
def has_perm(self, perm, obj=None):
return True
def has_perms(self, perm_list, obj=None):
return True
def has_module_perms(self, app_label):
return True
# Admin required fields
@property
def is_staff(self):
return self.is_admin
# At this point, temporarily remove the groups and user_permissions M2M
# fields from the AbstractUser class, so they don't clash with the related_name
# that sets.
old_au_local_m2m = AbstractUser._meta.local_many_to_many
old_pm_local_m2m = PermissionsMixin._meta.local_many_to_many
groups = models.ManyToManyField(Group, blank=True)
groups.contribute_to_class(PermissionsMixin, "groups")
user_permissions = models.ManyToManyField(Permission, blank=True)
user_permissions.contribute_to_class(PermissionsMixin, "user_permissions")
PermissionsMixin._meta.local_many_to_many = [groups, user_permissions]
AbstractUser._meta.local_many_to_many = [groups, user_permissions]
# The extension user is a simple extension of the built-in user class,
# adding a required date_of_birth field. This allows us to check for
# any hard references to the name "User" in forms/handlers etc.
class ExtensionUser(AbstractUser):
date_of_birth = models.DateField()
custom_objects = UserManager()
REQUIRED_FIELDS = AbstractUser.REQUIRED_FIELDS + ['date_of_birth']
class Meta:
app_label = 'auth'
# The CustomPermissionsUser users email as the identifier, but uses the normal
# Django permissions model. This allows us to check that the PermissionsMixin
# includes everything that is needed to interact with the ModelBackend.
class CustomPermissionsUserManager(CustomUserManager):
def create_superuser(self, email, password, date_of_birth):
u = self.create_user(email, password=password, date_of_birth=date_of_birth)
u.is_superuser = True
u.save(using=self._db)
return u
class CustomPermissionsUser(AbstractBaseUser, PermissionsMixin):
email = models.EmailField(verbose_name='email address', max_length=255, unique=True)
date_of_birth = models.DateField()
custom_objects = CustomPermissionsUserManager()
USERNAME_FIELD = 'email'
REQUIRED_FIELDS = ['date_of_birth']
class Meta:
app_label = 'auth'
def get_full_name(self):
return self.email
def get_short_name(self):
return self.email
def __unicode__(self):
return self.email
class IsActiveTestUser1(AbstractBaseUser):
"""
This test user class and derivatives test the default is_active behavior
"""
username = models.CharField(max_length=30, unique=True)
custom_objects = BaseUserManager()
USERNAME_FIELD = 'username'
class Meta:
app_label = 'auth'
# the is_active attr is provided by AbstractBaseUser
class CustomUserNonUniqueUsername(AbstractBaseUser):
"A user with a non-unique username"
username = models.CharField(max_length=30)
USERNAME_FIELD = 'username'
class Meta:
app_label = 'auth'
class CustomUserNonListRequiredFields(AbstractBaseUser):
"A user with a non-list REQUIRED_FIELDS"
username = models.CharField(max_length=30, unique=True)
date_of_birth = models.DateField()
USERNAME_FIELD = 'username'
REQUIRED_FIELDS = 'date_of_birth'
class Meta:
app_label = 'auth'
class CustomUserBadRequiredFields(AbstractBaseUser):
"A user with a non-unique username"
username = models.CharField(max_length=30, unique=True)
date_of_birth = models.DateField()
USERNAME_FIELD = 'username'
REQUIRED_FIELDS = ['username', 'date_of_birth']
class Meta:
app_label = 'auth'
# Undo swap hack
AbstractUser._meta.local_many_to_many = old_au_local_m2m
PermissionsMixin._meta.local_many_to_many = old_pm_local_m2m
| mit |
haad/ansible | lib/ansible/modules/network/panos/panos_import.py | 13 | 5075 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Ansible module to manage PaloAltoNetworks Firewall
# (c) 2016, techbizdev <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: panos_import
short_description: import file on PAN-OS devices
description:
- Import file on PAN-OS device
author: "Luigi Mori (@jtschichold), Ivan Bojer (@ivanbojer)"
version_added: "2.3"
requirements:
- pan-python
- requests
- requests_toolbelt
options:
ip_address:
description:
- IP address (or hostname) of PAN-OS device.
required: true
password:
description:
- Password for device authentication.
required: true
username:
description:
- Username for device authentication.
required: false
default: "admin"
category:
description:
- Category of file uploaded. The default is software.
required: false
default: software
file:
description:
- Location of the file to import into device.
required: false
default: None
url:
description:
- URL of the file that will be imported to device.
required: false
default: None
'''
EXAMPLES = '''
# import software image PanOS_vm-6.1.1 on 192.168.1.1
- name: import software image into PAN-OS
panos_import:
ip_address: 192.168.1.1
username: admin
password: admin
file: /tmp/PanOS_vm-6.1.1
category: software
'''
RETURN = '''
# Default return values
'''
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.basic import get_exception
import os.path
import xml.etree
import tempfile
import shutil
import os
try:
import pan.xapi
import requests
import requests_toolbelt
HAS_LIB = True
except ImportError:
HAS_LIB = False
def import_file(xapi, module, ip_address, file_, category):
xapi.keygen()
params = {
'type': 'import',
'category': category,
'key': xapi.api_key
}
filename = os.path.basename(file_)
mef = requests_toolbelt.MultipartEncoder(
fields={
'file': (filename, open(file_, 'rb'), 'application/octet-stream')
}
)
r = requests.post(
'https://' + ip_address + '/api/',
verify=False,
params=params,
headers={'Content-Type': mef.content_type},
data=mef
)
# if something goes wrong just raise an exception
r.raise_for_status()
resp = xml.etree.ElementTree.fromstring(r.content)
if resp.attrib['status'] == 'error':
module.fail_json(msg=r.content)
return True, filename
def download_file(url):
r = requests.get(url, stream=True)
fo = tempfile.NamedTemporaryFile(prefix='ai', delete=False)
shutil.copyfileobj(r.raw, fo)
fo.close()
return fo.name
def delete_file(path):
os.remove(path)
def main():
argument_spec = dict(
ip_address=dict(required=True),
password=dict(required=True, no_log=True),
username=dict(default='admin'),
category=dict(default='software'),
file=dict(),
url=dict()
)
module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=False, required_one_of=[['file', 'url']])
if not HAS_LIB:
module.fail_json(msg='pan-python, requests, and requests_toolbelt are required for this module')
ip_address = module.params["ip_address"]
password = module.params["password"]
username = module.params['username']
xapi = pan.xapi.PanXapi(
hostname=ip_address,
api_username=username,
api_password=password
)
file_ = module.params['file']
url = module.params['url']
category = module.params['category']
# we can get file from URL or local storage
if url is not None:
file_ = download_file(url)
try:
changed, filename = import_file(xapi, module, ip_address, file_, category)
except Exception:
exc = get_exception()
module.fail_json(msg=exc.message)
# cleanup and delete file if local
if url is not None:
delete_file(file_)
module.exit_json(changed=changed, filename=filename, msg="okey dokey")
if __name__ == '__main__':
main()
| gpl-3.0 |
jenix21/DarunGrim | Src/Scripts/Server/HTMLPages.py | 1 | 21721 | MainMenu = """
<P>[
<a href="/ShowProjects">Projects</a> /
<a href="/ShowFileImport">Files Import</a> /
<a href="/ShowFileList">Files List</a> /
<a href="/ShowFileSearch">File Search</a> /
<a href="/ShowMSPatchList">Microsoft Patches List</a> /
<a href="/">About</a>
]
<P>
"""
BannerText = """
<PRE>
___ ___ ___ ___ ___
/\ \ /\ \ /\ \ /\__\ /\__\
/::\ \ /::\ \ /::\ \ /:/ / /::| |
/:/\:\ \ /:/\:\ \ /:/\:\ \ /:/ / /:|:| |
/:/ \:\__\ /::\~\:\ \ /::\~\:\ \ /:/ / ___ /:/|:| |__
/:/__/ \:|__| /:/\:\ \:\__\ /:/\:\ \:\__\ /:/__/ /\__\ /:/ |:| /\__\
\:\ \ /:/ / \/__\:\/:/ / \/_|::\/:/ / \:\ \ /:/ / \/__|:|/:/ /
\:\ /:/ / \::/ / |:|::/ / \:\ /:/ / |:/:/ /
\:\/:/ / /:/ / |:|\/__/ \:\/:/ / |::/ /
\::/__/ /:/ / |:| | \::/ / /:/ /
~~ \/__/ \|__| \/__/ \/__/
___ ___ ___
/\ \ /\ \ ___ /\__\
/::\ \ /::\ \ /\ \ /::| |
/:/\:\ \ /:/\:\ \ \:\ \ /:|:| |
/:/ \:\ \ /::\~\:\ \ /::\__\ /:/|:|__|__
/:/__/_\:\__\ /:/\:\ \:\__\ __/:/\/__/ /:/ |::::\__\
\:\ /\ \/__/ \/_|::\/:/ / /\/:/ / \/__/~~/:/ /
\:\ \:\__\ |:|::/ / \::/__/ /:/ /
\:\/:/ / |:|\/__/ \:\__\ /:/ /
\::/ / |:| | \/__/ /:/ /
\/__/ \|__| \/__/
</PRE>
<P ALIGN="RIGHT">
Made by <a href="http://twitter.com/ohjeongwook" target="_new">Jeongwook "Matt" Oh<a>
</P>
<p ALIGN="RIGHT">
<a href="mailto:[email protected]">Bug Reporting & Feature Requests<a>
</P>
<p ALIGN="RIGHT">
<a href="http://darungrim.org" target="_new">DarunGrim Main Site<a>
</P>
"""
HeadText = """
<link rel="stylesheet" href="/data/themes/smoothness/jquery-ui-1.8.5.custom.css">
<link rel="stylesheet" href="/data/themes/basic/style.css"/>
<script src="/data/jquery/jquery-1.4.3.min.js"></script>
<script src="/data/jquery/ui/jquery.ui.core.js"></script>
<script src="/data/jquery/ui/jquery.ui.widget.js"></script>
<script src="/data/jquery/ui/jquery.ui.datepicker.js"></script>
<script type="text/javascript" src="/data/jquery/tablesorter/jquery.tablesorter.js"></script>
<script type="text/javascript">
$(document).ready(function()
{
$("#datepicker_from").datepicker();
$("#datepicker_to").datepicker();
$("#mainTable").tablesorter( {sortList:[[0,0],[2,1]], widgets: ['zebra']} );
}
);
function checkAll(){
for (var i=0;i<document.forms[0].elements.length;i++)
{
var e=document.forms[0].elements[i];
if ((e.name != 'allbox') && (e.type=='checkbox'))
{
e.checked=document.forms[0].allbox.checked;
}
}
}
</script>
"""
IndexTemplateText = """<%def name="layoutdata()">
</%def>
<html>
""" + HeadText + """
<body>
""" + MainMenu + """
<div id=Content>
<%self:layoutdata args="col">\
</%self:layoutdata>
</div>
""" + BannerText + """
</body>
</html>"""
PatchesTemplateText = """<%def name="layoutdata(somedata)">
<table class="Table">
% for item in somedata:
<tr>
<td><a href="PatchInfo?id=${item.id}">${item.name}</a></td>
<td>${item.title}</td>
</tr>
% endfor
</table>
<a href="/ShowMSPatchList?operation=update">Check for MS Patches Updates</a>
</%def>
<html>
""" + HeadText + """
<body>
""" + MainMenu + """
<div id=Content>
<%self:layoutdata somedata="${patches}" args="col">\
</%self:layoutdata>
</div>
</body>
</html>"""
PatchInfoTemplateText = """<%def name="layoutdata(somedata)">
<p><a href="/ShowMSPatchList">List</a>
<table class="Table">
% for item in somedata:
<tr>
<td><a href="DownloadInfo?patch_id=${id}&id=${item.id}">${item.label}</a></td>
<td>${item.filename}</td>
</tr>
% endfor
</table>
</%def>
<html>
""" + HeadText + """
<body>
""" + MainMenu + """
<div id=Content>
<%self:layoutdata somedata="${downloads}" args="col">\
</%self:layoutdata>
</div>
</body>
</html>"""
DownloadInfoTemplateText = """<%def name="layoutdata(somedata)">
<p><a href="/ShowMSPatchList">List</a>
><a href="PatchInfo?id=${patch_id}">${patch_name}</a>
<table class="Table">
% for item in somedata:
<tr>
<td><a href="FileInfo?patch_id=${patch_id}&download_id=${id}&id=${item.id}">${item.filename}</a></td>
<td>${item.version_string}</td>
</tr>
% endfor
</table>
% if len( somedata ) == 0:
<p><a href="/DownloadInfo?patch_id=${patch_id}&id=${id}&operation=extract">Download and Extract Patches Automatically</a> <p>(In case this fails, you need to extract and upload files manually)
% endif
</%def>
<html>
""" + HeadText + """
<body>
""" + MainMenu + """
<div id=Content>
<%self:layoutdata somedata="${files}" args="col">\
</%self:layoutdata>
</div>
</body>
</html>"""
FileInfoTemplateText = """<%def name="layoutdata(somedata)">
<p><a href="/ShowMSPatchList">List</a>
><a href="PatchInfo?id=${patch_id}">${patch_name}</a>
><a href="DownloadInfo?patch_id=${patch_id}&id=${download_id}">${download_label}</a>
<table class="Table">
<tr>
<td>Company Name</td>
<td>${somedata.company_name}</td>
</tr>
<tr>
<td>Operating System</td>
<td>${somedata.operating_system}</td>
</tr>
<tr>
<td>Service Pack</td>
<td>${somedata.service_pack}</td>
</tr>
<tr>
<td>Filename</td>
<td>${somedata.filename}</td>
</tr>
<tr>
<td>Unpatched Filename</td>
<td>${source_patch_name}: ${source_filename}</td>
</tr>
<tr>
<td>Patched Filename</td>
<td>${target_patch_name}: ${target_filename}</td>
</tr>
</table>
</%def>
<html>
""" + HeadText + """
<body>
""" + MainMenu + """
<div id=Content>
<%self:layoutdata somedata="${file_index_entry}" args="col">\
</%self:layoutdata>
<form name="input" action="StartDiff" method="get">
<input type="hidden" name="patch_id" value="${patch_id}"/>
<input type="hidden" name="download_id" value="${download_id}"/>
<input type="hidden" name="file_id" value="${id}"/>
<input type="hidden" name="source_id" value="${source_id}"/>
<input type="hidden" name="target_id" value="${target_id}"/>
<input type="submit" value="Start Diffing" />
</form>
</div>
</body>
</html>"""
DiffInfoTemplateText = """<%def name="layoutdata(somedata)">
<META HTTP-EQUIV="Refresh" CONTENT="1; URL="ShowFunctionMatchInfo?source_id=${source_id}&target_id=${target_id}">
<p><a href="ShowFunctionMatchInfo?databasename=source_id=${source_id}&target_id=${target_id}">Show Function Match Table</a>
</%def>
<html>
""" + HeadText + """
<body>
""" + MainMenu + """
<div id=Content>
<%self:layoutdata somedata="${file_index_entry}" args="col">\
</%self:layoutdata>
</div>
</body>
</html>"""
FileListCompanyNamesTemplateText = """<%def name="layoutdata( filenames )">
<title>Company Names</title>
<table class="Table">
<tr>
% for i, filename in enumerate(filenames):
<td><a href="/ShowFileList?company_name=${filename}">${filename}</a></td>
% if i % 5 == 4:
</tr><tr>
% endif
% endfor
</tr>
</table>
</%def>
<html>
""" + HeadText + """
<body>
""" + MainMenu + """
<div id=Content>
<%self:layoutdata filenames="${filenames}" args="col">\
</%self:layoutdata>
</div>
</body>
</html>"""
FileListFileNamesTemplateText = """<%def name="layoutdata(company_name, filenames, numVersions)">
<title>File Names for ${company_name}</title>
Total <b>${len(filenames)}</b> files for <b>${company_name}</b><br>
Back to <a href="/ShowFileList">Company Names</a>
<table class="Table">
<tr>
<th>FILENAME</th>
<th># of versions</th>
</tr>
% for i, filename in enumerate(filenames):
<tr>
<td><a href="/ShowFileList?company_name=${company_name}&filename=${filename}">${filename}</a></td>
<td>${numVersions[i]}</td>
</tr>
% endfor
</table>
</%def>
<html>
""" + HeadText + """
<body>
""" + MainMenu + """
<div id=Content>
<%self:layoutdata company_name="${company_name}" filenames="${filenames}" numVersions="${numVersions}" args="col">\
</%self:layoutdata>
</div>
</body>
</html>"""
ProjectSelectionListTemplate = """
<select name="project_id">
% for project in projects:
<option value=${project.id}>${project.name}</option>
% endfor
</select>
"""
ProjectSelectionTemplate = """<%def name="layoutdata()">
<form name="input" action="AddToProject">
""" + ProjectSelectionListTemplate + """
% for one_id in ids:
<input type="hidden" name="id" value="${one_id}"/>
% endfor
<input type="submit" value="Choose"/>
</form>
</%def>
"""
FileListTemplate = """<%def name="layoutdata(company_name, filename, version_string, file_information_list)">
<title>Version String for ${company_name}:${filename}</title>
<p><a href="/ShowFileList?company_name=${company_name}">${company_name}</a>
<form name="input" action="AddToProject">
<table id="mainTable" class="SortedTable">
<thead>
<tr>
<th></th>
<th>Filename</th>
<th>Version String</th>
<th>Creation</th>
<th>Modification</th>
<th>Addition Time</th>
<th>MD5</th>
<th>SHA1</th>
<th>Arch.</th>
<th>Operation</th>
</tr>
</thead>
<tbody>
% for (name,ctime_str,mtime_str,add_time_str,md5,sha1,id,version_str,project_member_id, arch_info) in file_information_list:
<tr>
<td>
<input type="checkbox" name="id" value="${id}" />
</td>
<td>${name}</td>
<td>${version_str}</td>
<td>${ctime_str}</td>
<td>${mtime_str}</td>
<td>${add_time_str}</td>
<td>${md5}</td>
<td>${sha1}</td>
<td>${arch_info}</td>
<td>
<a href=OpenInIDA?id=${id} target=_new>Open</a>
</td>
</tr>
% endfor
</tr>
</tbody>
</table>
<p><input type="checkbox" value="on" name="allbox" onclick="checkAll();"/>Check All Items
<p><input type="submit" value="Add Checked Files To "/> Existing Project:
""" + ProjectSelectionListTemplate + """
or New Project: <input type="text" name="new_project_name" value=""/>
</form>
</%def>
<html>
""" + HeadText + """
<body>
""" + MainMenu + """
<div id=Content>
<%self:layoutdata company_name="${company_name}" filename="${filename}" version_string="${version_string}" file_information_list="${file_information_list}" args="col">\
</%self:layoutdata>
</div>
</body>
</html>"""
ProjectContentTemplate = """<%def name="layoutdata(company_name, filename, version_string, file_information_list)">
<title>Version String for ${company_name}:${filename}</title>
<p><a href="/ShowFileList?company_name=${company_name}">${company_name}</a>
<form name="input" action="ProcessProjectContent">
<table id="mainTable" class="SortedTable">
<thead>
<tr>
<th></th>
<th>Unpatched</th>
<th>Patched </th>
<th>Filename</th>
<th>Version String</th>
<th>Creation</th>
<th>Modification</th>
<th>Addition Time</th>
<th>MD5</th>
<th>SHA1</th>
<th>Operation</th>
</tr>
</thead>
<tbody>
% for (name,ctime_str,mtime_str,add_time_str,md5,sha1,id,version_str,project_member_id) in file_information_list:
<tr>
<td>
<input type="checkbox" name="project_member_id" value="${project_member_id}" />
</td>
<td>
<input type="radio" name="source_id" value="${id}" />
</td>
<td>
<input type="radio" name="target_id" value="${id}" />
</td>
<td>${name}</td>
<td>${version_str}</td>
<td>${ctime_str}</td>
<td>${mtime_str}</td>
<td>${add_time_str}</td>
<td>${md5}</td>
<td>${sha1}</td>
<td>
<a href=OpenInIDA?id=${id} target=_new>Open</a>
</td>
</tr>
% endfor
</tbody>
</table>
<input type="hidden" name="project_id" value="${project_id}"/>
<p>
<p><input type="checkbox" value="on" name="allbox" onclick="checkAll();"/>Check all
<input type="submit" name="operation" value="Remove From Project"/>
<input type="submit" name="operation" value="Start Diffing"/>
</form>
% if project_result_list and len( project_result_list ) > 0:
<hr>
<h2> Results </h2>
% for (source_id, target_id, source_file_name, source_file_version_string, target_file_name, target_file_version_string) in project_result_list:
<p><a href="/StartDiff?source_id=${source_id}&target_id=${target_id}">${source_file_name}: ${source_file_version_string} VS
% if source_file_name != target_file_name:
${target_file_name}:
% endif
${target_file_version_string}</a>
% endfor
% endif
</%def>
<html>
""" + HeadText + """
<body>
""" + MainMenu + """
<div id=Content>
<%self:layoutdata company_name="${company_name}" filename="${filename}" version_string="${version_string}" file_information_list="${file_information_list}" args="col">\
</%self:layoutdata>
</div>
</body>
</html>"""
FileImportTemplateText = """<%def name="layoutdata( folder )">
<form name="input" action="ShowFileImport">
<input type="text" size="50" name="folder" value="" />
<p><input type="checkbox" name="move_file" value="yes" /> Move Files <B><font color="red">(WARNING: This will remove the source files)</font></B>
<p><input type="checkbox" name="overwrite_mode" value="yes" /> Overwrite old entry
<p><input type="submit" value="Import"/>
</form>
% if folder != None:
Import from ${folder}
% endif
</%def>
<html>
""" + HeadText + """
<body>
""" + MainMenu + """
<div id=Content>
<%self:layoutdata folder = "${folder}" args="col">\
</%self:layoutdata>
</div>
</body>
</html>"""
FunctionmatchInfosTemplateText = """<%def name="layoutdata(source_file_name,
source_file_version_string,
target_file_name,
target_file_version_string,
show_detail, function_match_infos)">
%if patch_name:
<p><a href="/ShowMSPatchList">List</a>
><a href="PatchInfo?id=${patch_id}">${patch_name}</a>
%endif
%if download_label:
><a href="DownloadInfo?patch_id=${patch_id}&id=${download_id}">${download_label}</a>
%endif
%if file_name:
><a href="FileInfo?patch_id=${patch_id}&download_id=${download_id}&id=${file_id}">${file_name}</a>
%endif
[<a href="SyncIDA?source_id=${source_id}&target_id=${target_id}" target="sync_ida">Open IDA</a>]
[<a href="/StartDiff?source_id=${source_id}&target_id=${target_id}&reset=yes&project_id=${project_id}">Reanalyze</a>]
<title>${source_file_name}: ${source_file_version_string} vs
% if source_file_name != target_file_name:
${target_file_name}:
% endif
${target_file_version_string} Functions
</title>
<table id="mainTable" class="FunctionmatchInfo">
<thead>
<tr>
<th>Unpatched</th>
% if show_detail > 1:
<th>Address</th>
% endif
% if show_detail > 0:
<th>Unidentified</th>
% endif
<th>Patched</th>
% if show_detail > 1:
<th>Address</th>
% endif
% if show_detail > 0:
<th>Unidentified</th>
<th>Matched</th>
<th>Modifications</th>
% endif
<th>Security Implication Score</th>
</tr>
</thead>
<tbody>
% for function_match_info in function_match_infos:
<tr>
<td><a href="ShowBasicBlockMatchInfo?patch_id=${patch_id}&download_id=${download_id}&file_id=${file_id}&source_id=${source_id}&target_id=${target_id}&source_address=${function_match_info.source_address}&target_address=${function_match_info.target_address}" target="${source_id}+${target_id}+source_address=${function_match_info.source_address}+target_address=${function_match_info.target_address}">${function_match_info.source_function_name}</a></td>
% if show_detail > 1:
<td>${hex(function_match_info.source_address)[2:].upper()}</td>
% endif
% if show_detail > 0:
<td>${function_match_info.non_match_count_for_the_source}</td>
% endif
<td><a href="ShowBasicBlockMatchInfo?patch_id=${patch_id}&download_id=${download_id}&file_id=${file_id}&source_id=${source_id}&target_id=${target_id}&source_address=${function_match_info.source_address}&target_address=${function_match_info.target_address}" target="${source_id}+${target_id}+source_address=${function_match_info.source_address}+target_address=${function_match_info.target_address}">${function_match_info.target_function_name}</a></td>
% if show_detail > 1:
<td>${hex(function_match_info.target_address)[2:].upper()}</td>
% endif
% if show_detail > 0:
<td>${function_match_info.non_match_count_for_the_target}</td>
<td>${function_match_info.match_count_for_the_source}</td>
<td>${function_match_info.match_count_with_modificationfor_the_source}</td>
% endif
<td>${function_match_info.security_implications_score}</td>
</tr>
% endfor
</tbody>
</table>
</%def>
<html>
""" + HeadText + """
<body>
""" + MainMenu + """
<div id=Content>
<%self:layoutdata
source_file_name = "${source_file_name}"
source_file_version_string = "${source_file_version_string}"
target_file_name = "${target_file_name}"
target_file_version_string = "${target_file_version_string}"
show_detail="${show_detail}"
function_match_infos="${function_match_infos}"
args="col">\
</%self:layoutdata>
</div>
</body>
</html>"""
"""
str(function_match_info.block_type)
str(function_match_info.type)
str( function_match_info.match_rate )
"""
ComparisonTableTemplateText = """<%def name="layoutdata(source_file_name,
source_file_version_string,
target_file_name,
target_file_version_string,
source_function_name,
target_function_name, comparison_table,
source_address,
target_address)">
%if patch_name:
<p><a href="/ShowMSPatchList">List</a>
><a href="PatchInfo?id=${patch_id}">${patch_name}</a>
%endif
%if download_label:
><a href="DownloadInfo?patch_id=${patch_id}&id=${download_id}">${download_label}</a>
%endif
%if file_name:
><a href="FileInfo?patch_id=${patch_id}&download_id=${download_id}&id=${file_id}">${file_name}</a>
%endif
><a href="ShowFunctionMatchInfo?patch_id=${patch_id}&download_id=${download_id}&file_id=${file_id}&source_id=${source_id}&target_id=${target_id}">Functions</a>
<title>${source_file_name}: ${source_file_version_string}:${source_function_name} vs
% if source_file_name != target_file_name:
${target_file_name}:
% endif
${target_file_version_string}:${target_function_name} Blocks</title>
<p><a href="ShowBasicBlockMatchInfo?patch_id=${patch_id}&download_id=${download_id}&file_id=${file_id}&source_id=${source_id}&target_id=${target_id}&source_address=${source_address}&target_address=${target_address}">
${source_file_name}: ${source_file_version_string}: ${source_function_name} vs
% if source_file_name != target_file_name:
${target_file_name}:
% endif
${target_file_version_string}: ${target_function_name}
</a>
<table class="Block">
<tr>
% if source_function_name:
<td><b>Unpatched: ${source_function_name}<b></td>
% else:
<td><b>Unpatched</b></td>
% endif
% if target_function_name:
<td><b>Patched: ${target_function_name}<b></td>
% else:
<td><b>Patched</b></td>
% endif
</tr>
% for ( left_address, left_lines, right_address, right_lines, match_rate ) in comparison_table:
% if left_address != 0 or right_address != 0:
<tr>
% if right_address == 0:
<td class="UnidentifiedBlock">
% else:
% if match_rate == 100 or left_address == 0:
<td class="MatchedBlock">
% else:
<td class="ModifiedBlock">
% endif
% endif
% if left_address != 0:
<b>[${hex(left_address)[2:].upper()}]</b>
% endif
<p>${left_lines}</td>
% if left_address == 0:
<td class="UnidentifiedBlock">
% else:
% if match_rate == 100 or right_address == 0:
<td class="MatchedBlock">
% else:
<td class="ModifiedBlock">
% endif
% endif
% if right_address != 0:
<b>[${hex(right_address)[2:].upper()}]</b>
% endif
<p>${right_lines}</td>
</tr>
% endif
% endfor
</table>
</%def>
""" + HeadText + """
<div id=Content>
<%self:layoutdata
source_file_name = "${source_file_name}"
source_file_version_string = "${source_file_version_string}"
target_file_name = "${target_file_name}"
target_file_version_string = "${target_file_version_string}"
source_function_name="${source_function_name}"
target_function_name="${target_function_name}"
comparison_table="${comparison_table}"
source_address="${source_address}"
target_address="${target_address}"
args="col">\
</%self:layoutdata>
</div>
</div>
"""
MainBody = """
<div id=Content>
<%self:layoutdata args="col">\
</%self:layoutdata>
</div>
</body>
</html>"""
BodyHTML = """
<html>
""" + HeadText + """
<body>
""" + MainMenu + MainBody
CloseButtonHTML = """<form method="post">
<input type="button" value="Close Window"
onclick="window.close()">
</form>"""
SyncIDAHTML="<html><body> Check your IDA %s </body></html>"
OpenInIDAHTML="<html><body> Check your IDA <p> Running %s %s <p>%s</body></html>"
| bsd-3-clause |
resouer/kubernetes | cluster/juju/layers/kubernetes-worker/reactive/kubernetes_worker.py | 24 | 55854 | #!/usr/bin/env python
# Copyright 2015 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import hashlib
import json
import os
import random
import shutil
import subprocess
import time
import yaml
from charms.leadership import leader_get, leader_set
from pathlib import Path
from shlex import split
from subprocess import check_call, check_output
from subprocess import CalledProcessError
from socket import gethostname, getfqdn
from charms import layer
from charms.layer import snap
from charms.reactive import hook
from charms.reactive import endpoint_from_flag
from charms.reactive import set_state, remove_state, is_state
from charms.reactive import when, when_any, when_not, when_none
from charms.kubernetes.common import get_version
from charms.reactive.helpers import data_changed
from charms.templating.jinja2 import render
from charmhelpers.core import hookenv, unitdata
from charmhelpers.core.host import service_stop, service_restart
from charmhelpers.contrib.charmsupport import nrpe
# Override the default nagios shortname regex to allow periods, which we
# need because our bin names contain them (e.g. 'snap.foo.daemon'). The
# default regex in charmhelpers doesn't allow periods, but nagios itself does.
nrpe.Check.shortname_re = '[\.A-Za-z0-9-_]+$'
kubeconfig_path = '/root/cdk/kubeconfig'
kubeproxyconfig_path = '/root/cdk/kubeproxyconfig'
kubeclientconfig_path = '/root/.kube/config'
gcp_creds_env_key = 'GOOGLE_APPLICATION_CREDENTIALS'
snap_resources = ['kubectl', 'kubelet', 'kube-proxy']
os.environ['PATH'] += os.pathsep + os.path.join(os.sep, 'snap', 'bin')
db = unitdata.kv()
@hook('upgrade-charm')
def upgrade_charm():
# migrate to new flags
if is_state('kubernetes-worker.restarted-for-cloud'):
remove_state('kubernetes-worker.restarted-for-cloud')
set_state('kubernetes-worker.cloud.ready')
if is_state('kubernetes-worker.cloud-request-sent'):
# minor change, just for consistency
remove_state('kubernetes-worker.cloud-request-sent')
set_state('kubernetes-worker.cloud.request-sent')
# Trigger removal of PPA docker installation if it was previously set.
set_state('config.changed.install_from_upstream')
hookenv.atexit(remove_state, 'config.changed.install_from_upstream')
cleanup_pre_snap_services()
migrate_resource_checksums()
check_resources_for_upgrade_needed()
# Remove the RC for nginx ingress if it exists
if hookenv.config().get('ingress'):
kubectl_success('delete', 'rc', 'nginx-ingress-controller')
# Remove gpu.enabled state so we can reconfigure gpu-related kubelet flags,
# since they can differ between k8s versions
if is_state('kubernetes-worker.gpu.enabled'):
remove_state('kubernetes-worker.gpu.enabled')
try:
disable_gpu()
except ApplyNodeLabelFailed:
# Removing node label failed. Probably the master is unavailable.
# Proceed with the upgrade in hope GPUs will still be there.
hookenv.log('Failed to remove GPU labels. Proceed with upgrade.')
remove_state('kubernetes-worker.cni-plugins.installed')
remove_state('kubernetes-worker.config.created')
remove_state('kubernetes-worker.ingress.available')
remove_state('worker.auth.bootstrapped')
set_state('kubernetes-worker.restart-needed')
def get_resource_checksum_db_key(resource):
''' Convert a resource name to a resource checksum database key. '''
return 'kubernetes-worker.resource-checksums.' + resource
def calculate_resource_checksum(resource):
''' Calculate a checksum for a resource '''
md5 = hashlib.md5()
path = hookenv.resource_get(resource)
if path:
with open(path, 'rb') as f:
data = f.read()
md5.update(data)
return md5.hexdigest()
def migrate_resource_checksums():
''' Migrate resource checksums from the old schema to the new one '''
for resource in snap_resources:
new_key = get_resource_checksum_db_key(resource)
if not db.get(new_key):
path = hookenv.resource_get(resource)
if path:
# old key from charms.reactive.helpers.any_file_changed
old_key = 'reactive.files_changed.' + path
old_checksum = db.get(old_key)
db.set(new_key, old_checksum)
else:
# No resource is attached. Previously, this meant no checksum
# would be calculated and stored. But now we calculate it as if
# it is a 0-byte resource, so let's go ahead and do that.
zero_checksum = hashlib.md5().hexdigest()
db.set(new_key, zero_checksum)
def check_resources_for_upgrade_needed():
hookenv.status_set('maintenance', 'Checking resources')
for resource in snap_resources:
key = get_resource_checksum_db_key(resource)
old_checksum = db.get(key)
new_checksum = calculate_resource_checksum(resource)
if new_checksum != old_checksum:
set_upgrade_needed()
def calculate_and_store_resource_checksums():
for resource in snap_resources:
key = get_resource_checksum_db_key(resource)
checksum = calculate_resource_checksum(resource)
db.set(key, checksum)
def set_upgrade_needed():
set_state('kubernetes-worker.snaps.upgrade-needed')
config = hookenv.config()
previous_channel = config.previous('channel')
require_manual = config.get('require-manual-upgrade')
if previous_channel is None or not require_manual:
set_state('kubernetes-worker.snaps.upgrade-specified')
def cleanup_pre_snap_services():
# remove old states
remove_state('kubernetes-worker.components.installed')
# disable old services
services = ['kubelet', 'kube-proxy']
for service in services:
hookenv.log('Stopping {0} service.'.format(service))
service_stop(service)
# cleanup old files
files = [
"/lib/systemd/system/kubelet.service",
"/lib/systemd/system/kube-proxy.service",
"/etc/default/kube-default",
"/etc/default/kubelet",
"/etc/default/kube-proxy",
"/srv/kubernetes",
"/usr/local/bin/kubectl",
"/usr/local/bin/kubelet",
"/usr/local/bin/kube-proxy",
"/etc/kubernetes"
]
for file in files:
if os.path.isdir(file):
hookenv.log("Removing directory: " + file)
shutil.rmtree(file)
elif os.path.isfile(file):
hookenv.log("Removing file: " + file)
os.remove(file)
@when('config.changed.channel')
def channel_changed():
set_upgrade_needed()
@when('kubernetes-worker.snaps.upgrade-specified')
def install_snaps():
channel = hookenv.config('channel')
hookenv.status_set('maintenance', 'Installing kubectl snap')
snap.install('kubectl', channel=channel, classic=True)
hookenv.status_set('maintenance', 'Installing kubelet snap')
snap.install('kubelet', channel=channel, classic=True)
hookenv.status_set('maintenance', 'Installing kube-proxy snap')
snap.install('kube-proxy', channel=channel, classic=True)
calculate_and_store_resource_checksums()
set_state('kubernetes-worker.snaps.installed')
set_state('kubernetes-worker.restart-needed')
remove_state('kubernetes-worker.snaps.upgrade-needed')
remove_state('kubernetes-worker.snaps.upgrade-specified')
@hook('stop')
def shutdown():
''' When this unit is destroyed:
- delete the current node
- stop the worker services
'''
try:
if os.path.isfile(kubeconfig_path):
kubectl('delete', 'node', get_node_name())
except CalledProcessError:
hookenv.log('Failed to unregister node.')
service_stop('snap.kubelet.daemon')
service_stop('snap.kube-proxy.daemon')
@when('docker.available')
@when_not('kubernetes-worker.cni-plugins.installed')
def install_cni_plugins():
''' Unpack the cni-plugins resource '''
charm_dir = os.getenv('CHARM_DIR')
# Get the resource via resource_get
try:
resource_name = 'cni-{}'.format(arch())
archive = hookenv.resource_get(resource_name)
except Exception:
message = 'Error fetching the cni resource.'
hookenv.log(message)
hookenv.status_set('blocked', message)
return
if not archive:
hookenv.log('Missing cni resource.')
hookenv.status_set('blocked', 'Missing cni resource.')
return
# Handle null resource publication, we check if filesize < 1mb
filesize = os.stat(archive).st_size
if filesize < 1000000:
hookenv.status_set('blocked', 'Incomplete cni resource.')
return
hookenv.status_set('maintenance', 'Unpacking cni resource.')
unpack_path = '{}/files/cni'.format(charm_dir)
os.makedirs(unpack_path, exist_ok=True)
cmd = ['tar', 'xfvz', archive, '-C', unpack_path]
hookenv.log(cmd)
check_call(cmd)
apps = [
{'name': 'loopback', 'path': '/opt/cni/bin'}
]
for app in apps:
unpacked = '{}/{}'.format(unpack_path, app['name'])
app_path = os.path.join(app['path'], app['name'])
install = ['install', '-v', '-D', unpacked, app_path]
hookenv.log(install)
check_call(install)
# Used by the "registry" action. The action is run on a single worker, but
# the registry pod can end up on any worker, so we need this directory on
# all the workers.
os.makedirs('/srv/registry', exist_ok=True)
set_state('kubernetes-worker.cni-plugins.installed')
@when('kubernetes-worker.snaps.installed')
def set_app_version():
''' Declare the application version to juju '''
cmd = ['kubelet', '--version']
version = check_output(cmd)
hookenv.application_version_set(version.split(b' v')[-1].rstrip())
@when('kubernetes-worker.snaps.installed')
@when('snap.refresh.set')
@when('leadership.is_leader')
def process_snapd_timer():
''' Set the snapd refresh timer on the leader so all cluster members
(present and future) will refresh near the same time. '''
# Get the current snapd refresh timer; we know layer-snap has set this
# when the 'snap.refresh.set' flag is present.
timer = snap.get(snapname='core', key='refresh.timer').decode('utf-8')
# The first time through, data_changed will be true. Subsequent calls
# should only update leader data if something changed.
if data_changed('worker_snapd_refresh', timer):
hookenv.log('setting snapd_refresh timer to: {}'.format(timer))
leader_set({'snapd_refresh': timer})
@when('kubernetes-worker.snaps.installed')
@when('snap.refresh.set')
@when('leadership.changed.snapd_refresh')
@when_not('leadership.is_leader')
def set_snapd_timer():
''' Set the snapd refresh.timer on non-leader cluster members. '''
# NB: This method should only be run when 'snap.refresh.set' is present.
# Layer-snap will always set a core refresh.timer, which may not be the
# same as our leader. Gating with 'snap.refresh.set' ensures layer-snap
# has finished and we are free to set our config to the leader's timer.
timer = leader_get('snapd_refresh')
hookenv.log('setting snapd_refresh timer to: {}'.format(timer))
snap.set_refresh_timer(timer)
@hookenv.atexit
def charm_status():
'''Update the status message with the current status of kubelet.'''
vsphere_joined = is_state('endpoint.vsphere.joined')
azure_joined = is_state('endpoint.azure.joined')
cloud_blocked = is_state('kubernetes-worker.cloud.blocked')
if vsphere_joined and cloud_blocked:
hookenv.status_set('blocked',
'vSphere integration requires K8s 1.12 or greater')
return
if azure_joined and cloud_blocked:
hookenv.status_set('blocked',
'Azure integration requires K8s 1.11 or greater')
return
if is_state('kubernetes-worker.cloud.pending'):
hookenv.status_set('waiting', 'Waiting for cloud integration')
return
if not is_state('kube-control.dns.available'):
# During deployment the worker has to start kubelet without cluster dns
# configured. If this is the first unit online in a service pool
# waiting to self host the dns pod, and configure itself to query the
# dns service declared in the kube-system namespace
hookenv.status_set('waiting', 'Waiting for cluster DNS.')
return
if is_state('kubernetes-worker.snaps.upgrade-specified'):
hookenv.status_set('waiting', 'Upgrade pending')
return
if is_state('kubernetes-worker.snaps.upgrade-needed'):
hookenv.status_set('blocked',
'Needs manual upgrade, run the upgrade action')
return
if is_state('kubernetes-worker.snaps.installed'):
update_kubelet_status()
return
else:
pass # will have been set by snap layer or other handler
def update_kubelet_status():
''' There are different states that the kubelet can be in, where we are
waiting for dns, waiting for cluster turnup, or ready to serve
applications.'''
services = [
'kubelet',
'kube-proxy'
]
failing_services = []
for service in services:
daemon = 'snap.{}.daemon'.format(service)
if not _systemctl_is_active(daemon):
failing_services.append(service)
if len(failing_services) == 0:
hookenv.status_set('active', 'Kubernetes worker running.')
else:
msg = 'Waiting for {} to start.'.format(','.join(failing_services))
hookenv.status_set('waiting', msg)
def get_ingress_address(relation):
try:
network_info = hookenv.network_get(relation.relation_name)
except NotImplementedError:
network_info = []
if network_info and 'ingress-addresses' in network_info:
# just grab the first one for now, maybe be more robust here?
return network_info['ingress-addresses'][0]
else:
# if they don't have ingress-addresses they are running a juju that
# doesn't support spaces, so just return the private address
return hookenv.unit_get('private-address')
@when('certificates.available', 'kube-control.connected')
def send_data(tls, kube_control):
'''Send the data that is required to create a server certificate for
this server.'''
# Use the public ip of this unit as the Common Name for the certificate.
common_name = hookenv.unit_public_ip()
ingress_ip = get_ingress_address(kube_control)
# Create SANs that the tls layer will add to the server cert.
sans = [
hookenv.unit_public_ip(),
ingress_ip,
gethostname()
]
# Create a path safe name by removing path characters from the unit name.
certificate_name = hookenv.local_unit().replace('/', '_')
# Request a server cert with this information.
tls.request_server_cert(common_name, sans, certificate_name)
@when('kube-api-endpoint.available', 'kube-control.dns.available',
'cni.available')
def watch_for_changes(kube_api, kube_control, cni):
''' Watch for configuration changes and signal if we need to restart the
worker services '''
servers = get_kube_api_servers(kube_api)
dns = kube_control.get_dns()
cluster_cidr = cni.get_config()['cidr']
if (data_changed('kube-api-servers', servers) or
data_changed('kube-dns', dns) or
data_changed('cluster-cidr', cluster_cidr)):
set_state('kubernetes-worker.restart-needed')
@when('kubernetes-worker.snaps.installed', 'kube-api-endpoint.available',
'tls_client.ca.saved', 'tls_client.client.certificate.saved',
'tls_client.client.key.saved', 'tls_client.server.certificate.saved',
'tls_client.server.key.saved',
'kube-control.dns.available', 'kube-control.auth.available',
'cni.available', 'kubernetes-worker.restart-needed',
'worker.auth.bootstrapped')
@when_not('kubernetes-worker.cloud.pending',
'kubernetes-worker.cloud.blocked')
def start_worker(kube_api, kube_control, auth_control, cni):
''' Start kubelet using the provided API and DNS info.'''
servers = get_kube_api_servers(kube_api)
# Note that the DNS server doesn't necessarily exist at this point. We know
# what its IP will eventually be, though, so we can go ahead and configure
# kubelet with that info. This ensures that early pods are configured with
# the correct DNS even though the server isn't ready yet.
dns = kube_control.get_dns()
ingress_ip = get_ingress_address(kube_control)
cluster_cidr = cni.get_config()['cidr']
if cluster_cidr is None:
hookenv.log('Waiting for cluster cidr.')
return
creds = db.get('credentials')
data_changed('kube-control.creds', creds)
create_config(random.choice(servers), creds)
configure_kubelet(dns, ingress_ip)
configure_kube_proxy(servers, cluster_cidr)
set_state('kubernetes-worker.config.created')
restart_unit_services()
update_kubelet_status()
set_state('kubernetes-worker.label-config-required')
remove_state('kubernetes-worker.restart-needed')
@when('cni.connected')
@when_not('cni.configured')
def configure_cni(cni):
''' Set worker configuration on the CNI relation. This lets the CNI
subordinate know that we're the worker so it can respond accordingly. '''
cni.set_config(is_master=False, kubeconfig_path=kubeconfig_path)
@when('config.changed.ingress')
def toggle_ingress_state():
''' Ingress is a toggled state. Remove ingress.available if set when
toggled '''
remove_state('kubernetes-worker.ingress.available')
@when('docker.sdn.configured')
def sdn_changed():
'''The Software Defined Network changed on the container so restart the
kubernetes services.'''
restart_unit_services()
update_kubelet_status()
remove_state('docker.sdn.configured')
@when('kubernetes-worker.config.created')
@when_not('kubernetes-worker.ingress.available')
def render_and_launch_ingress():
''' If configuration has ingress daemon set enabled, launch the ingress
load balancer and default http backend. Otherwise attempt deletion. '''
config = hookenv.config()
# If ingress is enabled, launch the ingress controller
if config.get('ingress'):
launch_default_ingress_controller()
else:
hookenv.log('Deleting the http backend and ingress.')
kubectl_manifest('delete',
'/root/cdk/addons/default-http-backend.yaml')
kubectl_manifest('delete',
'/root/cdk/addons/ingress-daemon-set.yaml') # noqa
hookenv.close_port(80)
hookenv.close_port(443)
@when('config.changed.labels')
def handle_labels_changed():
set_state('kubernetes-worker.label-config-required')
@when('kubernetes-worker.label-config-required',
'kubernetes-worker.config.created')
def apply_node_labels():
''' Parse the labels configuration option and apply the labels to the
node. '''
# Get the user's configured labels.
config = hookenv.config()
user_labels = {}
for item in config.get('labels').split(' '):
if '=' in item:
key, val = item.split('=')
user_labels[key] = val
else:
hookenv.log('Skipping malformed option: {}.'.format(item))
# Collect the current label state.
current_labels = db.get('current_labels') or {}
# Remove any labels that the user has removed from the config.
for key in list(current_labels.keys()):
if key not in user_labels:
try:
remove_label(key)
del current_labels[key]
db.set('current_labels', current_labels)
except ApplyNodeLabelFailed as e:
hookenv.log(str(e))
return
# Add any new labels.
for key, val in user_labels.items():
try:
set_label(key, val)
current_labels[key] = val
db.set('current_labels', current_labels)
except ApplyNodeLabelFailed as e:
hookenv.log(str(e))
return
# Set the juju-application label.
try:
set_label('juju-application', hookenv.service_name())
except ApplyNodeLabelFailed as e:
hookenv.log(str(e))
return
# Label configuration complete.
remove_state('kubernetes-worker.label-config-required')
@when_any('config.changed.kubelet-extra-args',
'config.changed.proxy-extra-args',
'config.changed.kubelet-extra-config')
def config_changed_requires_restart():
set_state('kubernetes-worker.restart-needed')
@when('config.changed.docker-logins')
def docker_logins_changed():
"""Set a flag to handle new docker login options.
If docker daemon options have also changed, set a flag to ensure the
daemon is restarted prior to running docker login.
"""
config = hookenv.config()
if data_changed('docker-opts', config['docker-opts']):
hookenv.log('Found new docker daemon options. Requesting a restart.')
# State will be removed by layer-docker after restart
set_state('docker.restart')
set_state('kubernetes-worker.docker-login')
@when('kubernetes-worker.docker-login')
@when_not('docker.restart')
def run_docker_login():
"""Login to a docker registry with configured credentials."""
config = hookenv.config()
previous_logins = config.previous('docker-logins')
logins = config['docker-logins']
logins = json.loads(logins)
if previous_logins:
previous_logins = json.loads(previous_logins)
next_servers = {login['server'] for login in logins}
previous_servers = {login['server'] for login in previous_logins}
servers_to_logout = previous_servers - next_servers
for server in servers_to_logout:
cmd = ['docker', 'logout', server]
subprocess.check_call(cmd)
for login in logins:
server = login['server']
username = login['username']
password = login['password']
cmd = ['docker', 'login', server, '-u', username, '-p', password]
subprocess.check_call(cmd)
remove_state('kubernetes-worker.docker-login')
set_state('kubernetes-worker.restart-needed')
def arch():
'''Return the package architecture as a string. Raise an exception if the
architecture is not supported by kubernetes.'''
# Get the package architecture for this system.
architecture = check_output(['dpkg', '--print-architecture']).rstrip()
# Convert the binary result into a string.
architecture = architecture.decode('utf-8')
return architecture
def create_config(server, creds):
'''Create a kubernetes configuration for the worker unit.'''
# Get the options from the tls-client layer.
layer_options = layer.options('tls-client')
# Get all the paths to the tls information required for kubeconfig.
ca = layer_options.get('ca_certificate_path')
# Create kubernetes configuration in the default location for ubuntu.
create_kubeconfig('/home/ubuntu/.kube/config', server, ca,
token=creds['client_token'], user='ubuntu')
# Make the config dir readable by the ubuntu users so juju scp works.
cmd = ['chown', '-R', 'ubuntu:ubuntu', '/home/ubuntu/.kube']
check_call(cmd)
# Create kubernetes configuration in the default location for root.
create_kubeconfig(kubeclientconfig_path, server, ca,
token=creds['client_token'], user='root')
# Create kubernetes configuration for kubelet, and kube-proxy services.
create_kubeconfig(kubeconfig_path, server, ca,
token=creds['kubelet_token'], user='kubelet')
create_kubeconfig(kubeproxyconfig_path, server, ca,
token=creds['proxy_token'], user='kube-proxy')
def parse_extra_args(config_key):
elements = hookenv.config().get(config_key, '').split()
args = {}
for element in elements:
if '=' in element:
key, _, value = element.partition('=')
args[key] = value
else:
args[element] = 'true'
return args
def configure_kubernetes_service(service, base_args, extra_args_key):
db = unitdata.kv()
prev_args_key = 'kubernetes-worker.prev_args.' + service
prev_args = db.get(prev_args_key) or {}
extra_args = parse_extra_args(extra_args_key)
args = {}
for arg in prev_args:
# remove previous args by setting to null
args[arg] = 'null'
for k, v in base_args.items():
args[k] = v
for k, v in extra_args.items():
args[k] = v
cmd = ['snap', 'set', service] + ['%s=%s' % item for item in args.items()]
check_call(cmd)
db.set(prev_args_key, args)
def merge_kubelet_extra_config(config, extra_config):
''' Updates config to include the contents of extra_config. This is done
recursively to allow deeply nested dictionaries to be merged.
This is destructive: it modifies the config dict that is passed in.
'''
for k, extra_config_value in extra_config.items():
if isinstance(extra_config_value, dict):
config_value = config.setdefault(k, {})
merge_kubelet_extra_config(config_value, extra_config_value)
else:
config[k] = extra_config_value
def configure_kubelet(dns, ingress_ip):
layer_options = layer.options('tls-client')
ca_cert_path = layer_options.get('ca_certificate_path')
server_cert_path = layer_options.get('server_certificate_path')
server_key_path = layer_options.get('server_key_path')
kubelet_opts = {}
kubelet_opts['require-kubeconfig'] = 'true'
kubelet_opts['kubeconfig'] = kubeconfig_path
kubelet_opts['network-plugin'] = 'cni'
kubelet_opts['v'] = '0'
kubelet_opts['logtostderr'] = 'true'
kubelet_opts['node-ip'] = ingress_ip
kubelet_opts['allow-privileged'] = set_privileged()
if is_state('endpoint.aws.ready'):
kubelet_opts['cloud-provider'] = 'aws'
elif is_state('endpoint.gcp.ready'):
cloud_config_path = _cloud_config_path('kubelet')
kubelet_opts['cloud-provider'] = 'gce'
kubelet_opts['cloud-config'] = str(cloud_config_path)
elif is_state('endpoint.openstack.ready'):
cloud_config_path = _cloud_config_path('kubelet')
kubelet_opts['cloud-provider'] = 'openstack'
kubelet_opts['cloud-config'] = str(cloud_config_path)
elif is_state('endpoint.vsphere.joined'):
# vsphere just needs to be joined on the worker (vs 'ready')
cloud_config_path = _cloud_config_path('kubelet')
kubelet_opts['cloud-provider'] = 'vsphere'
# NB: vsphere maps node product-id to its uuid (no config file needed).
uuid_file = '/sys/class/dmi/id/product_uuid'
with open(uuid_file, 'r') as f:
uuid = f.read().strip()
kubelet_opts['provider-id'] = 'vsphere://{}'.format(uuid)
elif is_state('endpoint.azure.ready'):
azure = endpoint_from_flag('endpoint.azure.ready')
cloud_config_path = _cloud_config_path('kubelet')
kubelet_opts['cloud-provider'] = 'azure'
kubelet_opts['cloud-config'] = str(cloud_config_path)
kubelet_opts['provider-id'] = azure.vm_id
if get_version('kubelet') >= (1, 10):
# Put together the KubeletConfiguration data
kubelet_config = {
'apiVersion': 'kubelet.config.k8s.io/v1beta1',
'kind': 'KubeletConfiguration',
'address': '0.0.0.0',
'authentication': {
'anonymous': {
'enabled': False
},
'x509': {
'clientCAFile': ca_cert_path
}
},
'clusterDomain': dns['domain'],
'failSwapOn': False,
'port': 10250,
'tlsCertFile': server_cert_path,
'tlsPrivateKeyFile': server_key_path
}
if dns['enable-kube-dns']:
kubelet_config['clusterDNS'] = [dns['sdn-ip']]
if is_state('kubernetes-worker.gpu.enabled'):
kubelet_config['featureGates'] = {
'DevicePlugins': True
}
# Add kubelet-extra-config. This needs to happen last so that it
# overrides any config provided by the charm.
kubelet_extra_config = hookenv.config('kubelet-extra-config')
kubelet_extra_config = yaml.load(kubelet_extra_config)
merge_kubelet_extra_config(kubelet_config, kubelet_extra_config)
# Render the file and configure Kubelet to use it
os.makedirs('/root/cdk/kubelet', exist_ok=True)
with open('/root/cdk/kubelet/config.yaml', 'w') as f:
f.write('# Generated by kubernetes-worker charm, do not edit\n')
yaml.dump(kubelet_config, f)
kubelet_opts['config'] = '/root/cdk/kubelet/config.yaml'
else:
# NOTE: This is for 1.9. Once we've dropped 1.9 support, we can remove
# this whole block and the parent if statement.
kubelet_opts['address'] = '0.0.0.0'
kubelet_opts['anonymous-auth'] = 'false'
kubelet_opts['client-ca-file'] = ca_cert_path
kubelet_opts['cluster-domain'] = dns['domain']
kubelet_opts['fail-swap-on'] = 'false'
kubelet_opts['port'] = '10250'
kubelet_opts['tls-cert-file'] = server_cert_path
kubelet_opts['tls-private-key-file'] = server_key_path
if dns['enable-kube-dns']:
kubelet_opts['cluster-dns'] = dns['sdn-ip']
if is_state('kubernetes-worker.gpu.enabled'):
kubelet_opts['feature-gates'] = 'DevicePlugins=true'
if get_version('kubelet') >= (1, 11):
kubelet_opts['dynamic-config-dir'] = '/root/cdk/kubelet/dynamic-config'
configure_kubernetes_service('kubelet', kubelet_opts, 'kubelet-extra-args')
def configure_kube_proxy(api_servers, cluster_cidr):
kube_proxy_opts = {}
kube_proxy_opts['cluster-cidr'] = cluster_cidr
kube_proxy_opts['kubeconfig'] = kubeproxyconfig_path
kube_proxy_opts['logtostderr'] = 'true'
kube_proxy_opts['v'] = '0'
kube_proxy_opts['master'] = random.choice(api_servers)
kube_proxy_opts['hostname-override'] = get_node_name()
if b'lxc' in check_output('virt-what', shell=True):
kube_proxy_opts['conntrack-max-per-core'] = '0'
configure_kubernetes_service('kube-proxy', kube_proxy_opts,
'proxy-extra-args')
def create_kubeconfig(kubeconfig, server, ca, key=None, certificate=None,
user='ubuntu', context='juju-context',
cluster='juju-cluster', password=None, token=None):
'''Create a configuration for Kubernetes based on path using the supplied
arguments for values of the Kubernetes server, CA, key, certificate, user
context and cluster.'''
if not key and not certificate and not password and not token:
raise ValueError('Missing authentication mechanism.')
# token and password are mutually exclusive. Error early if both are
# present. The developer has requested an impossible situation.
# see: kubectl config set-credentials --help
if token and password:
raise ValueError('Token and Password are mutually exclusive.')
# Create the config file with the address of the master server.
cmd = 'kubectl config --kubeconfig={0} set-cluster {1} ' \
'--server={2} --certificate-authority={3} --embed-certs=true'
check_call(split(cmd.format(kubeconfig, cluster, server, ca)))
# Delete old users
cmd = 'kubectl config --kubeconfig={0} unset users'
check_call(split(cmd.format(kubeconfig)))
# Create the credentials using the client flags.
cmd = 'kubectl config --kubeconfig={0} ' \
'set-credentials {1} '.format(kubeconfig, user)
if key and certificate:
cmd = '{0} --client-key={1} --client-certificate={2} '\
'--embed-certs=true'.format(cmd, key, certificate)
if password:
cmd = "{0} --username={1} --password={2}".format(cmd, user, password)
# This is mutually exclusive from password. They will not work together.
if token:
cmd = "{0} --token={1}".format(cmd, token)
check_call(split(cmd))
# Create a default context with the cluster.
cmd = 'kubectl config --kubeconfig={0} set-context {1} ' \
'--cluster={2} --user={3}'
check_call(split(cmd.format(kubeconfig, context, cluster, user)))
# Make the config use this new context.
cmd = 'kubectl config --kubeconfig={0} use-context {1}'
check_call(split(cmd.format(kubeconfig, context)))
@when_any('config.changed.default-backend-image',
'config.changed.ingress-ssl-chain-completion',
'config.changed.nginx-image')
@when('kubernetes-worker.config.created')
def launch_default_ingress_controller():
''' Launch the Kubernetes ingress controller & default backend (404) '''
config = hookenv.config()
# need to test this in case we get in
# here from a config change to the image
if not config.get('ingress'):
return
context = {}
context['arch'] = arch()
addon_path = '/root/cdk/addons/{}'
context['defaultbackend_image'] = config.get('default-backend-image')
if (context['defaultbackend_image'] == "" or
context['defaultbackend_image'] == "auto"):
if context['arch'] == 's390x':
context['defaultbackend_image'] = \
"k8s.gcr.io/defaultbackend-s390x:1.5"
elif context['arch'] == 'arm64':
context['defaultbackend_image'] = \
"k8s.gcr.io/defaultbackend-arm64:1.5"
else:
context['defaultbackend_image'] = \
"k8s.gcr.io/defaultbackend-amd64:1.5"
# Render the default http backend (404) replicationcontroller manifest
manifest = addon_path.format('default-http-backend.yaml')
render('default-http-backend.yaml', manifest, context)
hookenv.log('Creating the default http backend.')
try:
kubectl('apply', '-f', manifest)
except CalledProcessError as e:
hookenv.log(e)
hookenv.log('Failed to create default-http-backend. Will attempt again next update.') # noqa
hookenv.close_port(80)
hookenv.close_port(443)
return
# Render the ingress daemon set controller manifest
context['ssl_chain_completion'] = config.get(
'ingress-ssl-chain-completion')
context['ingress_image'] = config.get('nginx-image')
if context['ingress_image'] == "" or context['ingress_image'] == "auto":
images = {'amd64': 'quay.io/kubernetes-ingress-controller/nginx-ingress-controller:0.16.1', # noqa
'arm64': 'quay.io/kubernetes-ingress-controller/nginx-ingress-controller-arm64:0.16.1', # noqa
's390x': 'quay.io/kubernetes-ingress-controller/nginx-ingress-controller-s390x:0.16.1', # noqa
'ppc64el': 'quay.io/kubernetes-ingress-controller/nginx-ingress-controller-ppc64le:0.16.1', # noqa
}
context['ingress_image'] = images.get(context['arch'], images['amd64'])
if get_version('kubelet') < (1, 9):
context['daemonset_api_version'] = 'extensions/v1beta1'
else:
context['daemonset_api_version'] = 'apps/v1'
context['juju_application'] = hookenv.service_name()
manifest = addon_path.format('ingress-daemon-set.yaml')
render('ingress-daemon-set.yaml', manifest, context)
hookenv.log('Creating the ingress daemon set.')
try:
kubectl('apply', '-f', manifest)
except CalledProcessError as e:
hookenv.log(e)
hookenv.log('Failed to create ingress controller. Will attempt again next update.') # noqa
hookenv.close_port(80)
hookenv.close_port(443)
return
set_state('kubernetes-worker.ingress.available')
hookenv.open_port(80)
hookenv.open_port(443)
def restart_unit_services():
'''Restart worker services.'''
hookenv.log('Restarting kubelet and kube-proxy.')
services = ['kube-proxy', 'kubelet']
for service in services:
service_restart('snap.%s.daemon' % service)
def get_kube_api_servers(kube_api):
'''Return the kubernetes api server address and port for this
relationship.'''
hosts = []
# Iterate over every service from the relation object.
for service in kube_api.services():
for unit in service['hosts']:
hosts.append('https://{0}:{1}'.format(unit['hostname'],
unit['port']))
return hosts
def kubectl(*args):
''' Run a kubectl cli command with a config file. Returns stdout and throws
an error if the command fails. '''
command = ['kubectl', '--kubeconfig=' + kubeclientconfig_path] + list(args)
hookenv.log('Executing {}'.format(command))
return check_output(command)
def kubectl_success(*args):
''' Runs kubectl with the given args. Returns True if successful, False if
not. '''
try:
kubectl(*args)
return True
except CalledProcessError:
return False
def kubectl_manifest(operation, manifest):
''' Wrap the kubectl creation command when using filepath resources
:param operation - one of get, create, delete, replace
:param manifest - filepath to the manifest
'''
# Deletions are a special case
if operation == 'delete':
# Ensure we immediately remove requested resources with --now
return kubectl_success(operation, '-f', manifest, '--now')
else:
# Guard against an error re-creating the same manifest multiple times
if operation == 'create':
# If we already have the definition, its probably safe to assume
# creation was true.
if kubectl_success('get', '-f', manifest):
hookenv.log('Skipping definition for {}'.format(manifest))
return True
# Execute the requested command that did not match any of the special
# cases above
return kubectl_success(operation, '-f', manifest)
@when('nrpe-external-master.available')
@when_not('nrpe-external-master.initial-config')
def initial_nrpe_config(nagios=None):
set_state('nrpe-external-master.initial-config')
update_nrpe_config(nagios)
@when('kubernetes-worker.config.created')
@when('nrpe-external-master.available')
@when_any('config.changed.nagios_context',
'config.changed.nagios_servicegroups')
def update_nrpe_config(unused=None):
services = ('snap.kubelet.daemon', 'snap.kube-proxy.daemon')
hostname = nrpe.get_nagios_hostname()
current_unit = nrpe.get_nagios_unit_name()
nrpe_setup = nrpe.NRPE(hostname=hostname)
nrpe.add_init_service_checks(nrpe_setup, services, current_unit)
nrpe_setup.write()
@when_not('nrpe-external-master.available')
@when('nrpe-external-master.initial-config')
def remove_nrpe_config(nagios=None):
remove_state('nrpe-external-master.initial-config')
# List of systemd services for which the checks will be removed
services = ('snap.kubelet.daemon', 'snap.kube-proxy.daemon')
# The current nrpe-external-master interface doesn't handle a lot of logic,
# use the charm-helpers code for now.
hostname = nrpe.get_nagios_hostname()
nrpe_setup = nrpe.NRPE(hostname=hostname)
for service in services:
nrpe_setup.remove_check(shortname=service)
def set_privileged():
"""Return 'true' if privileged containers are needed.
This is when a) the user requested them
b) user does not care (auto) and GPUs are available in a pre
1.9 era
"""
privileged = hookenv.config('allow-privileged').lower()
gpu_needs_privileged = (is_state('kubernetes-worker.gpu.enabled') and
get_version('kubelet') < (1, 9))
if privileged == 'auto':
privileged = 'true' if gpu_needs_privileged else 'false'
if privileged == 'false' and gpu_needs_privileged:
disable_gpu()
remove_state('kubernetes-worker.gpu.enabled')
# No need to restart kubernetes (set the restart-needed state)
# because set-privileged is already in the restart path
return privileged
@when('config.changed.allow-privileged')
@when('kubernetes-worker.config.created')
def on_config_allow_privileged_change():
"""React to changed 'allow-privileged' config value.
"""
set_state('kubernetes-worker.restart-needed')
remove_state('config.changed.allow-privileged')
@when('nvidia-docker.installed')
@when('kubernetes-worker.config.created')
@when_not('kubernetes-worker.gpu.enabled')
def enable_gpu():
"""Enable GPU usage on this node.
"""
if get_version('kubelet') < (1, 9):
hookenv.status_set(
'active',
'Upgrade to snap channel >= 1.9/stable to enable GPU support.'
)
return
hookenv.log('Enabling gpu mode')
try:
# Not sure why this is necessary, but if you don't run this, k8s will
# think that the node has 0 gpus (as shown by the output of
# `kubectl get nodes -o yaml`
check_call(['nvidia-smi'])
except CalledProcessError as cpe:
hookenv.log('Unable to communicate with the NVIDIA driver.')
hookenv.log(cpe)
return
set_label('gpu', 'true')
set_label('cuda', 'true')
set_state('kubernetes-worker.gpu.enabled')
set_state('kubernetes-worker.restart-needed')
@when('kubernetes-worker.gpu.enabled')
@when_not('nvidia-docker.installed')
@when_not('kubernetes-worker.restart-needed')
def nvidia_departed():
"""Cuda departed, probably due to the docker layer switching to a
non nvidia-docker."""
disable_gpu()
remove_state('kubernetes-worker.gpu.enabled')
set_state('kubernetes-worker.restart-needed')
def disable_gpu():
"""Disable GPU usage on this node.
"""
hookenv.log('Disabling gpu mode')
# Remove node labels
remove_label('gpu')
remove_label('cuda')
@when('kubernetes-worker.gpu.enabled')
@when('kube-control.connected')
def notify_master_gpu_enabled(kube_control):
"""Notify kubernetes-master that we're gpu-enabled.
"""
kube_control.set_gpu(True)
@when_not('kubernetes-worker.gpu.enabled')
@when('kube-control.connected')
def notify_master_gpu_not_enabled(kube_control):
"""Notify kubernetes-master that we're not gpu-enabled.
"""
kube_control.set_gpu(False)
@when('kube-control.connected')
def request_kubelet_and_proxy_credentials(kube_control):
""" Request kubelet node authorization with a well formed kubelet user.
This also implies that we are requesting kube-proxy auth. """
# The kube-cotrol interface is created to support RBAC.
# At this point we might as well do the right thing and return the hostname
# even if it will only be used when we enable RBAC
nodeuser = 'system:node:{}'.format(get_node_name().lower())
kube_control.set_auth_request(nodeuser)
@when('kube-control.connected')
def catch_change_in_creds(kube_control):
"""Request a service restart in case credential updates were detected."""
nodeuser = 'system:node:{}'.format(get_node_name().lower())
creds = kube_control.get_auth_credentials(nodeuser)
if creds and creds['user'] == nodeuser:
# We need to cache the credentials here because if the
# master changes (master leader dies and replaced by a new one)
# the new master will have no recollection of our certs.
db.set('credentials', creds)
set_state('worker.auth.bootstrapped')
if data_changed('kube-control.creds', creds):
set_state('kubernetes-worker.restart-needed')
@when_not('kube-control.connected')
def missing_kube_control():
"""Inform the operator they need to add the kube-control relation.
If deploying via bundle this won't happen, but if operator is upgrading a
a charm in a deployment that pre-dates the kube-control relation, it'll be
missing.
"""
try:
goal_state = hookenv.goal_state()
except NotImplementedError:
goal_state = {}
if 'kube-control' in goal_state.get('relations', {}):
hookenv.status_set(
'waiting',
'Waiting for kubernetes-master to become ready')
else:
hookenv.status_set(
'blocked',
'Relate {}:kube-control kubernetes-master:kube-control'.format(
hookenv.service_name()))
@when('docker.ready')
def fix_iptables_for_docker_1_13():
""" Fix iptables FORWARD policy for Docker >=1.13
https://github.com/kubernetes/kubernetes/issues/40182
https://github.com/kubernetes/kubernetes/issues/39823
"""
cmd = ['iptables', '-w', '300', '-P', 'FORWARD', 'ACCEPT']
check_call(cmd)
def _systemctl_is_active(application):
''' Poll systemctl to determine if the application is running '''
cmd = ['systemctl', 'is-active', application]
try:
raw = check_output(cmd)
return b'active' in raw
except Exception:
return False
def get_node_name():
kubelet_extra_args = parse_extra_args('kubelet-extra-args')
cloud_provider = kubelet_extra_args.get('cloud-provider', '')
if is_state('endpoint.aws.ready'):
cloud_provider = 'aws'
elif is_state('endpoint.gcp.ready'):
cloud_provider = 'gce'
elif is_state('endpoint.openstack.ready'):
cloud_provider = 'openstack'
elif is_state('endpoint.vsphere.ready'):
cloud_provider = 'vsphere'
elif is_state('endpoint.azure.ready'):
cloud_provider = 'azure'
if cloud_provider == 'aws':
return getfqdn().lower()
else:
return gethostname().lower()
class ApplyNodeLabelFailed(Exception):
pass
def persistent_call(cmd, retry_message):
deadline = time.time() + 180
while time.time() < deadline:
code = subprocess.call(cmd)
if code == 0:
return True
hookenv.log(retry_message)
time.sleep(1)
else:
return False
def set_label(label, value):
nodename = get_node_name()
cmd = 'kubectl --kubeconfig={0} label node {1} {2}={3} --overwrite'
cmd = cmd.format(kubeconfig_path, nodename, label, value)
cmd = cmd.split()
retry = 'Failed to apply label %s=%s. Will retry.' % (label, value)
if not persistent_call(cmd, retry):
raise ApplyNodeLabelFailed(retry)
def remove_label(label):
nodename = get_node_name()
cmd = 'kubectl --kubeconfig={0} label node {1} {2}-'
cmd = cmd.format(kubeconfig_path, nodename, label)
cmd = cmd.split()
retry = 'Failed to remove label {0}. Will retry.'.format(label)
if not persistent_call(cmd, retry):
raise ApplyNodeLabelFailed(retry)
@when_any('endpoint.aws.joined',
'endpoint.gcp.joined',
'endpoint.openstack.joined',
'endpoint.vsphere.joined',
'endpoint.azure.joined')
@when_not('kubernetes-worker.cloud.ready')
def set_cloud_pending():
k8s_version = get_version('kubelet')
k8s_1_11 = k8s_version >= (1, 11)
k8s_1_12 = k8s_version >= (1, 12)
vsphere_joined = is_state('endpoint.vsphere.joined')
azure_joined = is_state('endpoint.azure.joined')
if (vsphere_joined and not k8s_1_12) or (azure_joined and not k8s_1_11):
set_state('kubernetes-worker.cloud.blocked')
else:
remove_state('kubernetes-worker.cloud.blocked')
set_state('kubernetes-worker.cloud.pending')
@when_any('endpoint.aws.joined',
'endpoint.gcp.joined',
'endpoint.azure.joined')
@when('kube-control.cluster_tag.available')
@when_not('kubernetes-worker.cloud.request-sent')
def request_integration():
hookenv.status_set('maintenance', 'requesting cloud integration')
kube_control = endpoint_from_flag('kube-control.cluster_tag.available')
cluster_tag = kube_control.get_cluster_tag()
if is_state('endpoint.aws.joined'):
cloud = endpoint_from_flag('endpoint.aws.joined')
cloud.tag_instance({
'kubernetes.io/cluster/{}'.format(cluster_tag): 'owned',
})
cloud.tag_instance_security_group({
'kubernetes.io/cluster/{}'.format(cluster_tag): 'owned',
})
cloud.tag_instance_subnet({
'kubernetes.io/cluster/{}'.format(cluster_tag): 'owned',
})
cloud.enable_object_storage_management(['kubernetes-*'])
elif is_state('endpoint.gcp.joined'):
cloud = endpoint_from_flag('endpoint.gcp.joined')
cloud.label_instance({
'k8s-io-cluster-name': cluster_tag,
})
cloud.enable_object_storage_management()
elif is_state('endpoint.azure.joined'):
cloud = endpoint_from_flag('endpoint.azure.joined')
cloud.tag_instance({
'k8s-io-cluster-name': cluster_tag,
})
cloud.enable_object_storage_management()
cloud.enable_instance_inspection()
cloud.enable_dns_management()
set_state('kubernetes-worker.cloud.request-sent')
hookenv.status_set('waiting', 'Waiting for cloud integration')
@when_none('endpoint.aws.joined',
'endpoint.gcp.joined',
'endpoint.openstack.joined',
'endpoint.vsphere.joined',
'endpoint.azure.joined')
def clear_cloud_flags():
remove_state('kubernetes-worker.cloud.pending')
remove_state('kubernetes-worker.cloud.request-sent')
remove_state('kubernetes-worker.cloud.blocked')
remove_state('kubernetes-worker.cloud.ready')
@when_any('endpoint.aws.ready',
'endpoint.gcp.ready',
'endpoint.openstack.ready',
'endpoint.vsphere.ready',
'endpoint.azure.ready')
@when_not('kubernetes-worker.cloud.blocked',
'kubernetes-worker.cloud.ready')
def cloud_ready():
remove_state('kubernetes-worker.cloud.pending')
if is_state('endpoint.gcp.ready'):
_write_gcp_snap_config('kubelet')
elif is_state('endpoint.openstack.ready'):
_write_openstack_snap_config('kubelet')
elif is_state('endpoint.azure.ready'):
_write_azure_snap_config('kubelet')
set_state('kubernetes-worker.cloud.ready')
set_state('kubernetes-worker.restart-needed') # force restart
def _snap_common_path(component):
return Path('/var/snap/{}/common'.format(component))
def _cloud_config_path(component):
return _snap_common_path(component) / 'cloud-config.conf'
def _gcp_creds_path(component):
return _snap_common_path(component) / 'gcp-creds.json'
def _daemon_env_path(component):
return _snap_common_path(component) / 'environment'
def _write_gcp_snap_config(component):
# gcp requires additional credentials setup
gcp = endpoint_from_flag('endpoint.gcp.ready')
creds_path = _gcp_creds_path(component)
with creds_path.open('w') as fp:
os.fchmod(fp.fileno(), 0o600)
fp.write(gcp.credentials)
# create a cloud-config file that sets token-url to nil to make the
# services use the creds env var instead of the metadata server, as
# well as making the cluster multizone
cloud_config_path = _cloud_config_path(component)
cloud_config_path.write_text('[Global]\n'
'token-url = nil\n'
'multizone = true\n')
daemon_env_path = _daemon_env_path(component)
if daemon_env_path.exists():
daemon_env = daemon_env_path.read_text()
if not daemon_env.endswith('\n'):
daemon_env += '\n'
else:
daemon_env = ''
if gcp_creds_env_key not in daemon_env:
daemon_env += '{}={}\n'.format(gcp_creds_env_key, creds_path)
daemon_env_path.parent.mkdir(parents=True, exist_ok=True)
daemon_env_path.write_text(daemon_env)
def _write_openstack_snap_config(component):
# openstack requires additional credentials setup
openstack = endpoint_from_flag('endpoint.openstack.ready')
cloud_config_path = _cloud_config_path(component)
cloud_config_path.write_text('\n'.join([
'[Global]',
'auth-url = {}'.format(openstack.auth_url),
'username = {}'.format(openstack.username),
'password = {}'.format(openstack.password),
'tenant-name = {}'.format(openstack.project_name),
'domain-name = {}'.format(openstack.user_domain_name),
]))
def _write_azure_snap_config(component):
azure = endpoint_from_flag('endpoint.azure.ready')
cloud_config_path = _cloud_config_path(component)
cloud_config_path.write_text(json.dumps({
'useInstanceMetadata': True,
'useManagedIdentityExtension': True,
'subscriptionId': azure.subscription_id,
'resourceGroup': azure.resource_group,
'location': azure.resource_group_location,
'vnetName': azure.vnet_name,
'vnetResourceGroup': azure.vnet_resource_group,
'subnetName': azure.subnet_name,
'securityGroupName': azure.security_group_name,
}))
def get_first_mount(mount_relation):
mount_relation_list = mount_relation.mounts()
if mount_relation_list and len(mount_relation_list) > 0:
# mount relation list is a list of the mount layer relations
# for now we just use the first one that is nfs
for mount in mount_relation_list:
# for now we just check the first mount and use that.
# the nfs charm only supports one for now.
if ('mounts' in mount and
mount['mounts'][0]['fstype'] == 'nfs'):
return mount['mounts'][0]
return None
@when('nfs.available')
def nfs_state_control(mount):
''' Determine if we should remove the state that controls the re-render
and execution of the nfs-relation-changed event because there
are changes in the relationship data, and we should re-render any
configs '''
mount_data = get_first_mount(mount)
if mount_data:
nfs_relation_data = {
'options': mount_data['options'],
'host': mount_data['hostname'],
'mountpoint': mount_data['mountpoint'],
'fstype': mount_data['fstype']
}
# Re-execute the rendering if the data has changed.
if data_changed('nfs-config', nfs_relation_data):
hookenv.log('reconfiguring nfs')
remove_state('nfs.configured')
@when('nfs.available')
@when_not('nfs.configured')
def nfs_storage(mount):
'''NFS on kubernetes requires nfs config rendered into a deployment of
the nfs client provisioner. That will handle the persistent volume claims
with no persistent volume to back them.'''
mount_data = get_first_mount(mount)
if not mount_data:
return
addon_path = '/root/cdk/addons/{}'
# Render the NFS deployment
manifest = addon_path.format('nfs-provisioner.yaml')
render('nfs-provisioner.yaml', manifest, mount_data)
hookenv.log('Creating the nfs provisioner.')
try:
kubectl('apply', '-f', manifest)
except CalledProcessError as e:
hookenv.log(e)
hookenv.log('Failed to create nfs provisioner. Will attempt again next update.') # noqa
return
set_state('nfs.configured')
| apache-2.0 |
chafique-delli/OpenUpgrade | addons/website/tests/test_views.py | 221 | 8517 | # -*- coding: utf-8 -*-
import itertools
import unittest2
from lxml import etree as ET, html
from lxml.html import builder as h
from openerp.tests import common
def attrs(**kwargs):
return dict(('data-oe-%s' % key, str(value)) for key, value in kwargs.iteritems())
class TestViewSaving(common.TransactionCase):
def eq(self, a, b):
self.assertEqual(a.tag, b.tag)
self.assertEqual(a.attrib, b.attrib)
self.assertEqual((a.text or '').strip(), (b.text or '').strip())
self.assertEqual((a.tail or '').strip(), (b.tail or '').strip())
for ca, cb in itertools.izip_longest(a, b):
self.eq(ca, cb)
def setUp(self):
super(TestViewSaving, self).setUp()
self.arch = h.DIV(
h.DIV(
h.H3("Column 1"),
h.UL(
h.LI("Item 1"),
h.LI("Item 2"),
h.LI("Item 3"))),
h.DIV(
h.H3("Column 2"),
h.UL(
h.LI("Item 1"),
h.LI(h.SPAN("My Company", attrs(model='res.company', id=1, field='name', type='char'))),
h.LI(h.SPAN("+00 00 000 00 0 000", attrs(model='res.company', id=1, field='phone', type='char')))
))
)
self.view_id = self.registry('ir.ui.view').create(self.cr, self.uid, {
'name': "Test View",
'type': 'qweb',
'arch': ET.tostring(self.arch, encoding='utf-8').decode('utf-8')
})
def test_embedded_extraction(self):
fields = self.registry('ir.ui.view').extract_embedded_fields(
self.cr, self.uid, self.arch, context=None)
expect = [
h.SPAN("My Company", attrs(model='res.company', id=1, field='name', type='char')),
h.SPAN("+00 00 000 00 0 000", attrs(model='res.company', id=1, field='phone', type='char')),
]
for actual, expected in itertools.izip_longest(fields, expect):
self.eq(actual, expected)
def test_embedded_save(self):
embedded = h.SPAN("+00 00 000 00 0 000", attrs(
model='res.company', id=1, field='phone', type='char'))
self.registry('ir.ui.view').save_embedded_field(self.cr, self.uid, embedded)
company = self.registry('res.company').browse(self.cr, self.uid, 1)
self.assertEqual(company.phone, "+00 00 000 00 0 000")
@unittest2.skip("save conflict for embedded (saved by third party or previous version in page) not implemented")
def test_embedded_conflict(self):
e1 = h.SPAN("My Company", attrs(model='res.company', id=1, field='name'))
e2 = h.SPAN("Leeroy Jenkins", attrs(model='res.company', id=1, field='name'))
View = self.registry('ir.ui.view')
View.save_embedded_field(self.cr, self.uid, e1)
# FIXME: more precise exception
with self.assertRaises(Exception):
View.save_embedded_field(self.cr, self.uid, e2)
def test_embedded_to_field_ref(self):
View = self.registry('ir.ui.view')
embedded = h.SPAN("My Company", attrs(expression="bob"))
self.eq(
View.to_field_ref(self.cr, self.uid, embedded, context=None),
h.SPAN({'t-field': 'bob'})
)
def test_to_field_ref_keep_attributes(self):
View = self.registry('ir.ui.view')
att = attrs(expression="bob", model="res.company", id=1, field="name")
att['id'] = "whop"
att['class'] = "foo bar"
embedded = h.SPAN("My Company", att)
self.eq(View.to_field_ref(self.cr, self.uid, embedded, context=None),
h.SPAN({'t-field': 'bob', 'class': 'foo bar', 'id': 'whop'}))
def test_replace_arch(self):
replacement = h.P("Wheee")
result = self.registry('ir.ui.view').replace_arch_section(
self.cr, self.uid, self.view_id, None, replacement)
self.eq(result, h.DIV("Wheee"))
def test_replace_arch_2(self):
replacement = h.DIV(h.P("Wheee"))
result = self.registry('ir.ui.view').replace_arch_section(
self.cr, self.uid, self.view_id, None, replacement)
self.eq(result, replacement)
def test_fixup_arch(self):
replacement = h.H1("I am the greatest title alive!")
result = self.registry('ir.ui.view').replace_arch_section(
self.cr, self.uid, self.view_id, '/div/div[1]/h3',
replacement)
self.eq(result, h.DIV(
h.DIV(
h.H3("I am the greatest title alive!"),
h.UL(
h.LI("Item 1"),
h.LI("Item 2"),
h.LI("Item 3"))),
h.DIV(
h.H3("Column 2"),
h.UL(
h.LI("Item 1"),
h.LI(h.SPAN("My Company", attrs(model='res.company', id=1, field='name', type='char'))),
h.LI(h.SPAN("+00 00 000 00 0 000", attrs(model='res.company', id=1, field='phone', type='char')))
))
))
def test_multiple_xpath_matches(self):
with self.assertRaises(ValueError):
self.registry('ir.ui.view').replace_arch_section(
self.cr, self.uid, self.view_id, '/div/div/h3',
h.H6("Lol nope"))
def test_save(self):
Company = self.registry('res.company')
View = self.registry('ir.ui.view')
replacement = ET.tostring(h.DIV(
h.H3("Column 2"),
h.UL(
h.LI("wob wob wob"),
h.LI(h.SPAN("Acme Corporation", attrs(model='res.company', id=1, field='name', expression="bob", type='char'))),
h.LI(h.SPAN("+12 3456789", attrs(model='res.company', id=1, field='phone', expression="edmund", type='char'))),
)
), encoding='utf-8')
View.save(self.cr, self.uid, res_id=self.view_id, value=replacement,
xpath='/div/div[2]')
company = Company.browse(self.cr, self.uid, 1)
self.assertEqual(company.name, "Acme Corporation")
self.assertEqual(company.phone, "+12 3456789")
self.eq(
ET.fromstring(View.browse(self.cr, self.uid, self.view_id).arch.encode('utf-8')),
h.DIV(
h.DIV(
h.H3("Column 1"),
h.UL(
h.LI("Item 1"),
h.LI("Item 2"),
h.LI("Item 3"))),
h.DIV(
h.H3("Column 2"),
h.UL(
h.LI("wob wob wob"),
h.LI(h.SPAN({'t-field': "bob"})),
h.LI(h.SPAN({'t-field': "edmund"}))
))
)
)
def test_save_only_embedded(self):
Company = self.registry('res.company')
company_id = 1
Company.write(self.cr, self.uid, company_id, {'name': "Foo Corporation"})
node = html.tostring(h.SPAN(
"Acme Corporation",
attrs(model='res.company', id=company_id, field="name", expression='bob', type='char')))
self.registry('ir.ui.view').save(self.cr, self.uid, res_id=company_id,value=node)
company = Company.browse(self.cr, self.uid, company_id)
self.assertEqual(company.name, "Acme Corporation")
def test_field_tail(self):
View = self.registry('ir.ui.view')
replacement = ET.tostring(
h.LI(h.SPAN("+12 3456789", attrs(
model='res.company', id=1, type='char',
field='phone', expression="edmund")),
"whop whop"
), encoding="utf-8")
View.save(self.cr, self.uid, res_id = self.view_id, value=replacement,
xpath='/div/div[2]/ul/li[3]')
self.eq(
ET.fromstring(View.browse(self.cr, self.uid, self.view_id).arch.encode('utf-8')),
h.DIV(
h.DIV(
h.H3("Column 1"),
h.UL(
h.LI("Item 1"),
h.LI("Item 2"),
h.LI("Item 3"))),
h.DIV(
h.H3("Column 2"),
h.UL(
h.LI("Item 1"),
h.LI(h.SPAN("My Company", attrs(model='res.company', id=1, field='name', type='char'))),
h.LI(h.SPAN({'t-field': "edmund"}), "whop whop"),
))
)
)
| agpl-3.0 |
mvaled/OpenUpgrade | addons/event/__openerp__.py | 261 | 2296 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Events Organisation',
'version': '0.1',
'website' : 'https://www.odoo.com/page/events',
'category': 'Tools',
'summary': 'Trainings, Conferences, Meetings, Exhibitions, Registrations',
'description': """
Organization and management of Events.
======================================
The event module allows you to efficiently organise events and all related tasks: planification, registration tracking,
attendances, etc.
Key Features
------------
* Manage your Events and Registrations
* Use emails to automatically confirm and send acknowledgements for any event registration
""",
'author': 'OpenERP SA',
'depends': ['base_setup', 'board', 'email_template', 'marketing'],
'data': [
'security/event_security.xml',
'security/ir.model.access.csv',
'wizard/event_confirm_view.xml',
'event_view.xml',
'event_data.xml',
'report/report_event_registration_view.xml',
'res_partner_view.xml',
'email_template.xml',
'views/event.xml',
],
'demo': [
'event_demo.xml',
],
'test': [
'test/ui/event_users.yml',
'test/process/event_draft2done.yml'
],
'installable': True,
'auto_install': False,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
gangadharkadam/office_erp | erpnext/hr/doctype/appraisal/appraisal.py | 35 | 2346 | # Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
from frappe.utils import flt, getdate
from frappe import _
from frappe.model.mapper import get_mapped_doc
from frappe.model.document import Document
from erpnext.hr.utils import set_employee_name
class Appraisal(Document):
def validate(self):
if not self.status:
self.status = "Draft"
set_employee_name(self)
self.validate_dates()
self.validate_existing_appraisal()
self.calculate_total()
def get_employee_name(self):
self.employee_name = frappe.db.get_value("Employee", self.employee, "employee_name")
return self.employee_name
def validate_dates(self):
if getdate(self.start_date) > getdate(self.end_date):
frappe.throw(_("End Date can not be less than Start Date"))
def validate_existing_appraisal(self):
chk = frappe.db.sql("""select name from `tabAppraisal` where employee=%s
and (status='Submitted' or status='Completed')
and ((start_date>=%s and start_date<=%s)
or (end_date>=%s and end_date<=%s))""",
(self.employee,self.start_date,self.end_date,self.start_date,self.end_date))
if chk:
frappe.throw(_("Appraisal {0} created for Employee {1} in the given date range").format(chk[0][0], self.employee_name))
def calculate_total(self):
total, total_w = 0, 0
for d in self.get('appraisal_details'):
if d.score:
d.score_earned = flt(d.score) * flt(d.per_weightage) / 100
total = total + d.score_earned
total_w += flt(d.per_weightage)
if int(total_w) != 100:
frappe.throw(_("Total weightage assigned should be 100%. It is {0}").format(str(total_w) + "%"))
if frappe.db.get_value("Employee", self.employee, "user_id") != \
frappe.session.user and total == 0:
frappe.throw(_("Total cannot be zero"))
self.total_score = total
def on_submit(self):
frappe.db.set(self, 'status', 'Submitted')
def on_cancel(self):
frappe.db.set(self, 'status', 'Cancelled')
@frappe.whitelist()
def fetch_appraisal_template(source_name, target_doc=None):
target_doc = get_mapped_doc("Appraisal Template", source_name, {
"Appraisal Template": {
"doctype": "Appraisal",
},
"Appraisal Template Goal": {
"doctype": "Appraisal Goal",
}
}, target_doc)
return target_doc
| agpl-3.0 |
dparshin/phantomjs | src/breakpad/src/third_party/protobuf/protobuf/python/stubout.py | 671 | 4940 | #!/usr/bin/python2.4
#
# Copyright 2008 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This file is used for testing. The original is at:
# http://code.google.com/p/pymox/
class StubOutForTesting:
"""Sample Usage:
You want os.path.exists() to always return true during testing.
stubs = StubOutForTesting()
stubs.Set(os.path, 'exists', lambda x: 1)
...
stubs.UnsetAll()
The above changes os.path.exists into a lambda that returns 1. Once
the ... part of the code finishes, the UnsetAll() looks up the old value
of os.path.exists and restores it.
"""
def __init__(self):
self.cache = []
self.stubs = []
def __del__(self):
self.SmartUnsetAll()
self.UnsetAll()
def SmartSet(self, obj, attr_name, new_attr):
"""Replace obj.attr_name with new_attr. This method is smart and works
at the module, class, and instance level while preserving proper
inheritance. It will not stub out C types however unless that has been
explicitly allowed by the type.
This method supports the case where attr_name is a staticmethod or a
classmethod of obj.
Notes:
- If obj is an instance, then it is its class that will actually be
stubbed. Note that the method Set() does not do that: if obj is
an instance, it (and not its class) will be stubbed.
- The stubbing is using the builtin getattr and setattr. So, the __get__
and __set__ will be called when stubbing (TODO: A better idea would
probably be to manipulate obj.__dict__ instead of getattr() and
setattr()).
Raises AttributeError if the attribute cannot be found.
"""
if (inspect.ismodule(obj) or
(not inspect.isclass(obj) and obj.__dict__.has_key(attr_name))):
orig_obj = obj
orig_attr = getattr(obj, attr_name)
else:
if not inspect.isclass(obj):
mro = list(inspect.getmro(obj.__class__))
else:
mro = list(inspect.getmro(obj))
mro.reverse()
orig_attr = None
for cls in mro:
try:
orig_obj = cls
orig_attr = getattr(obj, attr_name)
except AttributeError:
continue
if orig_attr is None:
raise AttributeError("Attribute not found.")
# Calling getattr() on a staticmethod transforms it to a 'normal' function.
# We need to ensure that we put it back as a staticmethod.
old_attribute = obj.__dict__.get(attr_name)
if old_attribute is not None and isinstance(old_attribute, staticmethod):
orig_attr = staticmethod(orig_attr)
self.stubs.append((orig_obj, attr_name, orig_attr))
setattr(orig_obj, attr_name, new_attr)
def SmartUnsetAll(self):
"""Reverses all the SmartSet() calls, restoring things to their original
definition. Its okay to call SmartUnsetAll() repeatedly, as later calls
have no effect if no SmartSet() calls have been made.
"""
self.stubs.reverse()
for args in self.stubs:
setattr(*args)
self.stubs = []
def Set(self, parent, child_name, new_child):
"""Replace child_name's old definition with new_child, in the context
of the given parent. The parent could be a module when the child is a
function at module scope. Or the parent could be a class when a class'
method is being replaced. The named child is set to new_child, while
the prior definition is saved away for later, when UnsetAll() is called.
This method supports the case where child_name is a staticmethod or a
classmethod of parent.
"""
old_child = getattr(parent, child_name)
old_attribute = parent.__dict__.get(child_name)
if old_attribute is not None and isinstance(old_attribute, staticmethod):
old_child = staticmethod(old_child)
self.cache.append((parent, old_child, child_name))
setattr(parent, child_name, new_child)
def UnsetAll(self):
"""Reverses all the Set() calls, restoring things to their original
definition. Its okay to call UnsetAll() repeatedly, as later calls have
no effect if no Set() calls have been made.
"""
# Undo calls to Set() in reverse order, in case Set() was called on the
# same arguments repeatedly (want the original call to be last one undone)
self.cache.reverse()
for (parent, old_child, child_name) in self.cache:
setattr(parent, child_name, old_child)
self.cache = []
| bsd-3-clause |
blazek/QGIS | python/plugins/processing/algs/qgis/KNearestConcaveHull.py | 4 | 21488 | # -*- coding: utf-8 -*-
"""
/***************************************************************************
KNearestConcaveHull.py
----------------------
Date : November 2014
Copyright : (C) 2014 by Detlev Neumann
Dr. Neumann Consulting - Geospatial Services
Email : [email protected]
***************************************************************************/
/***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************/
"""
__author__ = 'Detlev Neumann'
__date__ = 'November 2014'
__copyright__ = '(C) 2014, Detlev Neumann'
import os.path
import math
from qgis.PyQt.QtGui import QIcon
from qgis.PyQt.QtCore import QVariant
from qgis.core import (QgsApplication,
QgsExpression,
QgsFeature,
QgsFeatureRequest,
QgsFeatureSink,
QgsField,
QgsFields,
QgsGeometry,
QgsProcessing,
QgsProcessingException,
QgsProcessingParameterFeatureSink,
QgsProcessingParameterFeatureSource,
QgsProcessingParameterField,
QgsProcessingParameterNumber,
QgsPoint,
QgsPointXY,
QgsWkbTypes)
from processing.algs.qgis.QgisAlgorithm import QgisAlgorithm
class KNearestConcaveHull(QgisAlgorithm):
KNEIGHBORS = 'KNEIGHBORS'
INPUT = 'INPUT'
OUTPUT = 'OUTPUT'
FIELD = 'FIELD'
def name(self):
return 'knearestconcavehull'
def displayName(self):
return self.tr('Concave hull (k-nearest neighbor)')
def shortDescription(self):
return self.tr('Creates a concave hull using the k-nearest neighbor algorithm.')
def icon(self):
return QgsApplication.getThemeIcon("/algorithms/mAlgorithmConcaveHull.svg")
def svgIconPath(self):
return QgsApplication.iconPath("/algorithms/mAlgorithmConcaveHull.svg")
def group(self):
return self.tr('Vector geometry')
def groupId(self):
return 'vectorgeometry'
def __init__(self):
super().__init__()
def initAlgorithm(self, config=None):
self.addParameter(QgsProcessingParameterFeatureSource(self.INPUT,
self.tr('Input layer')))
self.addParameter(QgsProcessingParameterNumber(self.KNEIGHBORS,
self.tr('Number of neighboring points to consider (a lower number is more concave, a higher number is smoother)'),
QgsProcessingParameterNumber.Integer,
defaultValue=3, minValue=3))
self.addParameter(QgsProcessingParameterField(self.FIELD,
self.tr('Field (set if creating concave hulls by class)'),
parentLayerParameterName=self.INPUT, optional=True))
self.addParameter(QgsProcessingParameterFeatureSink(self.OUTPUT, self.tr('Concave hull'),
QgsProcessing.TypeVectorPolygon))
def processAlgorithm(self, parameters, context, feedback):
# Get variables from dialog
source = self.parameterAsSource(parameters, self.INPUT, context)
if source is None:
raise QgsProcessingException(self.invalidSourceError(parameters, self.INPUT))
field_name = self.parameterAsString(parameters, self.FIELD, context)
kneighbors = self.parameterAsInt(parameters, self.KNEIGHBORS, context)
use_field = bool(field_name)
field_index = -1
fields = QgsFields()
fields.append(QgsField('id', QVariant.Int, '', 20))
current = 0
# Get properties of the field the grouping is based on
if use_field:
field_index = source.fields().lookupField(field_name)
if field_index >= 0:
fields.append(source.fields()[field_index]) # Add a field with the name of the grouping field
# Initialize writer
(sink, dest_id) = self.parameterAsSink(parameters, self.OUTPUT, context,
fields, QgsWkbTypes.Polygon, source.sourceCrs())
if sink is None:
raise QgsProcessingException(self.invalidSinkError(parameters, self.OUTPUT))
success = False
fid = 0
# Get unique values of grouping field
unique_values = source.uniqueValues(field_index)
total = 100.0 / float(source.featureCount() * len(unique_values))
for unique in unique_values:
points = []
filter = QgsExpression.createFieldEqualityExpression(field_name, unique)
request = QgsFeatureRequest().setFilterExpression(filter)
request.setSubsetOfAttributes([])
# Get features with the grouping attribute equal to the current grouping value
features = source.getFeatures(request)
for in_feature in features:
if feedback.isCanceled():
break
# Add points or vertices of more complex geometry
points.extend(extract_points(in_feature.geometry()))
current += 1
feedback.setProgress(int(current * total))
# A minimum of 3 points is necessary to proceed
if len(points) >= 3:
out_feature = QgsFeature()
the_hull = concave_hull(points, kneighbors)
if the_hull:
vertex = [QgsPointXY(point[0], point[1]) for point in the_hull]
poly = QgsGeometry().fromPolygonXY([vertex])
out_feature.setGeometry(poly)
# Give the polygon the same attribute as the point grouping attribute
out_feature.setAttributes([fid, unique])
sink.addFeature(out_feature, QgsFeatureSink.FastInsert)
success = True # at least one polygon created
fid += 1
if not success:
raise QgsProcessingException('No hulls could be created. Most likely there were not at least three unique points in any of the groups.')
else:
# Field parameter provided but can't read from it
raise QgsProcessingException('Unable to find grouping field')
else:
# Not grouped by field
# Initialize writer
(sink, dest_id) = self.parameterAsSink(parameters, self.OUTPUT, context,
fields, QgsWkbTypes.Polygon, source.sourceCrs())
if sink is None:
raise QgsProcessingException(self.invalidSinkError(parameters, self.OUTPUT))
points = []
request = QgsFeatureRequest()
request.setSubsetOfAttributes([])
features = source.getFeatures(request) # Get all features
total = 100.0 / source.featureCount() if source.featureCount() else 0
for in_feature in features:
if feedback.isCanceled():
break
# Add points or vertices of more complex geometry
points.extend(extract_points(in_feature.geometry()))
current += 1
feedback.setProgress(int(current * total))
# A minimum of 3 points is necessary to proceed
if len(points) >= 3:
out_feature = QgsFeature()
the_hull = concave_hull(points, kneighbors)
if the_hull:
vertex = [QgsPointXY(point[0], point[1]) for point in the_hull]
poly = QgsGeometry().fromPolygonXY([vertex])
out_feature.setGeometry(poly)
out_feature.setAttributes([0])
sink.addFeature(out_feature, QgsFeatureSink.FastInsert)
else:
# the_hull returns None only when there are less than three points after cleaning
raise QgsProcessingException('At least three unique points are required to create a concave hull.')
else:
raise QgsProcessingException('At least three points are required to create a concave hull.')
return {self.OUTPUT: dest_id}
def clean_list(list_of_points):
"""
Deletes duplicate points in list_of_points
"""
return list(set(list_of_points))
def find_min_y_point(list_of_points):
"""
Returns that point of *list_of_points* having minimal y-coordinate
:param list_of_points: list of tuples
:return: tuple (x, y)
"""
min_y_pt = list_of_points[0]
for point in list_of_points[1:]:
if point[1] < min_y_pt[1] or (point[1] == min_y_pt[1] and point[0] < min_y_pt[0]):
min_y_pt = point
return min_y_pt
def add_point(vector, element):
"""
Returns vector with the given element append to the right
"""
vector.append(element)
return vector
def remove_point(vector, element):
"""
Returns a copy of vector without the given element
"""
vector.pop(vector.index(element))
return vector
def euclidian_distance(point1, point2):
"""
Returns the euclidian distance of the 2 given points.
:param point1: tuple (x, y)
:param point2: tuple (x, y)
:return: float
"""
return math.sqrt(math.pow(point1[0] - point2[0], 2) + math.pow(point1[1] - point2[1], 2))
def nearest_points(list_of_points, point, k):
"""
Returns a list of the indices of the k closest neighbors from list_of_points to the specified point. The measure
of proximity is the Euclidean distance. Internally, k becomes the minimum between the given value for k and the
number of points in list_of_points
:param list_of_points: list of tuples
:param point: tuple (x, y)
:param k: integer
:return: list of k tuples
"""
# build a list of tuples of distances between point *point* and every point in *list_of_points*, and
# their respective index of list *list_of_distances*
list_of_distances = []
for index in range(len(list_of_points)):
list_of_distances.append((euclidian_distance(list_of_points[index], point), index))
# sort distances in ascending order
list_of_distances.sort()
# get the k nearest neighbors of point
nearest_list = []
for index in range(min(k, len(list_of_points))):
nearest_list.append((list_of_points[list_of_distances[index][1]]))
return nearest_list
def angle(from_point, to_point):
"""
Returns the angle of the directed line segment, going from *from_point* to *to_point*, in radians. The angle is
positive for segments with upward direction (north), otherwise negative (south). Values ranges from 0 at the
right (east) to pi at the left side (west).
:param from_point: tuple (x, y)
:param to_point: tuple (x, y)
:return: float
"""
return math.atan2(to_point[1] - from_point[1], to_point[0] - from_point[0])
def angle_difference(angle1, angle2):
"""
Calculates the difference between the given angles in clockwise direction as radians.
:param angle1: float
:param angle2: float
:return: float; between 0 and 2*Pi
"""
if (angle1 > 0 and angle2 >= 0) and angle1 > angle2:
return abs(angle1 - angle2)
elif (angle1 >= 0 and angle2 > 0) and angle1 < angle2:
return 2 * math.pi + angle1 - angle2
elif (angle1 < 0 and angle2 <= 0) and angle1 < angle2:
return 2 * math.pi + angle1 + abs(angle2)
elif (angle1 <= 0 and angle2 < 0) and angle1 > angle2:
return abs(angle1 - angle2)
elif angle1 <= 0 < angle2:
return 2 * math.pi + angle1 - angle2
elif angle1 >= 0 >= angle2:
return angle1 + abs(angle2)
else:
return 0
def intersect(line1, line2):
"""
Returns True if the two given line segments intersect each other, and False otherwise.
:param line1: 2-tuple of tuple (x, y)
:param line2: 2-tuple of tuple (x, y)
:return: boolean
"""
a1 = line1[1][1] - line1[0][1]
b1 = line1[0][0] - line1[1][0]
c1 = a1 * line1[0][0] + b1 * line1[0][1]
a2 = line2[1][1] - line2[0][1]
b2 = line2[0][0] - line2[1][0]
c2 = a2 * line2[0][0] + b2 * line2[0][1]
tmp = (a1 * b2 - a2 * b1)
if tmp == 0:
return False
sx = (c1 * b2 - c2 * b1) / tmp
if (sx > line1[0][0] and sx > line1[1][0]) or (sx > line2[0][0] and sx > line2[1][0]) or\
(sx < line1[0][0] and sx < line1[1][0]) or (sx < line2[0][0] and sx < line2[1][0]):
return False
sy = (a1 * c2 - a2 * c1) / tmp
if (sy > line1[0][1] and sy > line1[1][1]) or (sy > line2[0][1] and sy > line2[1][1]) or\
(sy < line1[0][1] and sy < line1[1][1]) or (sy < line2[0][1] and sy < line2[1][1]):
return False
return True
def point_in_polygon_q(point, list_of_points):
"""
Return True if given point *point* is laying in the polygon described by the vertices *list_of_points*,
otherwise False
Based on the "Ray Casting Method" described by Joel Lawhead in this blog article:
http://geospatialpython.com/2011/01/point-in-polygon.html
"""
x = point[0]
y = point[1]
poly = [(pt[0], pt[1]) for pt in list_of_points]
n = len(poly)
inside = False
p1x, p1y = poly[0]
for i in range(n + 1):
p2x, p2y = poly[i % n]
if y > min(p1y, p2y):
if y <= max(p1y, p2y):
if x <= max(p1x, p2x):
if p1y != p2y:
xints = (y - p1y) * (p2x - p1x) / (p2y - p1y) + p1x
if p1x == p2x or x <= xints:
inside = not inside
p1x, p1y = p2x, p2y
return inside
def extract_points(geom):
"""
Generate list of QgsPoints from QgsGeometry *geom* ( can be point, line, or polygon )
Code taken from fTools plugin
:param geom: an arbitrary geometry feature
:return: list of points
"""
multi_geom = QgsGeometry()
temp_geom = []
# point geometry
if geom.type() == 0:
if geom.isMultipart():
temp_geom = geom.asMultiPoint()
else:
temp_geom.append(geom.asPoint())
# line geometry
if geom.type() == 1:
# if multipart feature explode to single part
if geom.isMultipart():
multi_geom = geom.asMultiPolyline()
for i in multi_geom:
temp_geom.extend(i)
else:
temp_geom = geom.asPolyline()
# polygon geometry
elif geom.type() == 2:
# if multipart feature explode to single part
if geom.isMultipart():
multi_geom = geom.asMultiPolygon()
# now single part polygons
for i in multi_geom:
# explode to line segments
for j in i:
temp_geom.extend(j)
else:
multi_geom = geom.asPolygon()
# explode to line segments
for i in multi_geom:
temp_geom.extend(i)
return temp_geom
def sort_by_angle(list_of_points, last_point, last_angle):
"""
returns the points in list_of_points in descending order of angle to the last segment of the envelope, measured
in a clockwise direction. Thus, the rightmost of the neighboring points is always selected. The first point of
this list will be the next point of the envelope.
"""
def getkey(item):
return angle_difference(last_angle, angle(last_point, item))
vertex_list = sorted(list_of_points, key=getkey, reverse=True)
return vertex_list
def concave_hull(points_list, k):
"""
Calculates a valid concave hull polygon containing all given points. The algorithm searches for that
point in the neighborhood of k nearest neighbors which maximizes the rotation angle in clockwise direction
without intersecting any previous line segments.
This is an implementation of the algorithm described by Adriano Moreira and Maribel Yasmina Santos:
CONCAVE HULL: A neighborhood_k-NEAREST NEIGHBORS APPROACH FOR THE COMPUTATION OF THE REGION OCCUPIED BY A SET OF POINTS.
GRAPP 2007 - International Conference on Computer Graphics Theory and Applications; pp 61-68.
:param points_list: list of tuples (x, y)
:param k: integer
:return: list of tuples (x, y)
"""
# return an empty list if not enough points are given
if k > len(points_list):
k = len(points_list)
# the number of nearest neighbors k must be greater than or equal to 3
kk = max(k, 3)
# delete duplicate points
point_set = clean_list(points_list)
# if point_set has less then 3 points no polygon can be created and an empty list will be returned
if len(point_set) < 3:
return None
# if point_set has 3 points then these are already vertices of the hull. Append the first point to
# close the hull polygon
if len(point_set) == 3:
return add_point(point_set, point_set[0])
# make sure that k neighbors can be found
kk = min(kk, len(point_set))
# start with the point having the smallest y-coordinate (most southern point)
first_point = find_min_y_point(point_set)
# add this points as the first vertex of the hull
hull = [first_point]
# make the first vertex of the hull to the current point
current_point = first_point
# remove the point from the point_set, to prevent him being among the nearest points
point_set = remove_point(point_set, first_point)
previous_angle = math.pi
# step counts the number of segments
step = 2
# as long as point_set is not empty or search is returning to the starting point
while (current_point != first_point) or (step == 2) and (len(point_set) > 0):
# after 3 iterations add the first point to point_set again, otherwise a hull cannot be closed
if step == 5:
point_set = add_point(point_set, first_point)
# search the k nearest neighbors of the current point
k_nearest_points = nearest_points(point_set, current_point, kk)
# sort the candidates (neighbors) in descending order of right-hand turn. This way the algorithm progresses
# in clockwise direction through as many points as possible
c_points = sort_by_angle(k_nearest_points, current_point, previous_angle)
its = True
i = -1
# search for the nearest point to which the connecting line does not intersect any existing segment
while its is True and (i < len(c_points) - 1):
i += 1
if c_points[i] == first_point:
last_point = 1
else:
last_point = 0
j = 2
its = False
while its is False and (j < len(hull) - last_point):
its = intersect((hull[step - 2], c_points[i]), (hull[step - 2 - j], hull[step - 1 - j]))
j += 1
# there is no candidate to which the connecting line does not intersect any existing segment, so the
# for the next candidate fails. The algorithm starts again with an increased number of neighbors
if its is True:
return concave_hull(points_list, kk + 1)
# the first point which complies with the requirements is added to the hull and gets the current point
current_point = c_points[i]
hull = add_point(hull, current_point)
# calculate the angle between the last vertex and his precursor, that is the last segment of the hull
# in reversed direction
previous_angle = angle(hull[step - 1], hull[step - 2])
# remove current_point from point_set
point_set = remove_point(point_set, current_point)
# increment counter
step += 1
all_inside = True
i = len(point_set) - 1
# check if all points are within the created polygon
while (all_inside is True) and (i >= 0):
all_inside = point_in_polygon_q(point_set[i], hull)
i -= 1
# since at least one point is out of the computed polygon, try again with a higher number of neighbors
if all_inside is False:
return concave_hull(points_list, kk + 1)
# a valid hull has been constructed
return hull
| gpl-2.0 |
gaddman/ansible | test/units/modules/network/f5/test_bigip_monitor_gateway_icmp.py | 21 | 4431 | # -*- coding: utf-8 -*-
#
# Copyright: (c) 2017, F5 Networks Inc.
# GNU General Public License v3.0 (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import json
import pytest
import sys
if sys.version_info < (2, 7):
pytestmark = pytest.mark.skip("F5 Ansible modules require Python >= 2.7")
from ansible.module_utils.basic import AnsibleModule
try:
from library.modules.bigip_monitor_gateway_icmp import ApiParameters
from library.modules.bigip_monitor_gateway_icmp import ModuleParameters
from library.modules.bigip_monitor_gateway_icmp import ModuleManager
from library.modules.bigip_monitor_gateway_icmp import ArgumentSpec
# In Ansible 2.8, Ansible changed import paths.
from test.units.compat import unittest
from test.units.compat.mock import Mock
from test.units.compat.mock import patch
from test.units.modules.utils import set_module_args
except ImportError:
from ansible.modules.network.f5.bigip_monitor_gateway_icmp import ApiParameters
from ansible.modules.network.f5.bigip_monitor_gateway_icmp import ModuleParameters
from ansible.modules.network.f5.bigip_monitor_gateway_icmp import ModuleManager
from ansible.modules.network.f5.bigip_monitor_gateway_icmp import ArgumentSpec
# Ansible 2.8 imports
from units.compat import unittest
from units.compat.mock import Mock
from units.compat.mock import patch
from units.modules.utils import set_module_args
fixture_path = os.path.join(os.path.dirname(__file__), 'fixtures')
fixture_data = {}
def load_fixture(name):
path = os.path.join(fixture_path, name)
if path in fixture_data:
return fixture_data[path]
with open(path) as f:
data = f.read()
try:
data = json.loads(data)
except Exception:
pass
fixture_data[path] = data
return data
class TestParameters(unittest.TestCase):
def test_module_parameters(self):
args = dict(
parent='/Common/gateway-icmp',
interval=10,
time_until_up=0,
timeout=30,
)
p = ModuleParameters(params=args)
assert p.parent == '/Common/gateway-icmp'
assert p.interval == 10
assert p.time_until_up == 0
assert p.timeout == 30
def test_api_parameters(self):
args = dict(
defaultsFrom='/Common/gateway-icmp',
interval=10,
timeUntilUp=0,
timeout=30,
)
p = ApiParameters(params=args)
assert p.parent == '/Common/gateway-icmp'
assert p.interval == 10
assert p.time_until_up == 0
assert p.timeout == 30
class TestManager(unittest.TestCase):
def setUp(self):
self.spec = ArgumentSpec()
def test_create(self, *args):
set_module_args(dict(
name='foo',
parent='/Common/gateway-icmp',
interval=20,
timeout=30,
time_until_up=60,
server='localhost',
password='password',
user='admin'
))
module = AnsibleModule(
argument_spec=self.spec.argument_spec,
supports_check_mode=self.spec.supports_check_mode
)
# Override methods in the specific type of manager
mm = ModuleManager(module=module)
mm.exists = Mock(side_effect=[False, True])
mm.create_on_device = Mock(return_value=True)
results = mm.exec_module()
assert results['changed'] is True
def test_create_with_description(self, *args):
set_module_args(dict(
name='foo',
parent='/Common/gateway-icmp',
interval=20,
timeout=30,
time_until_up=60,
description='Important Description',
server='localhost',
password='password',
user='admin'
))
module = AnsibleModule(
argument_spec=self.spec.argument_spec,
supports_check_mode=self.spec.supports_check_mode
)
# Override methods in the specific type of manager
mm = ModuleManager(module=module)
mm.exists = Mock(side_effect=[False, True])
mm.create_on_device = Mock(return_value=True)
results = mm.exec_module()
assert results['changed'] is True
| gpl-3.0 |
aisipos/django | django/utils/http.py | 47 | 12708 | from __future__ import unicode_literals
import base64
import calendar
import datetime
import re
import sys
import unicodedata
from binascii import Error as BinasciiError
from email.utils import formatdate
from django.core.exceptions import TooManyFieldsSent
from django.utils import six
from django.utils.datastructures import MultiValueDict
from django.utils.encoding import force_bytes, force_str, force_text
from django.utils.functional import keep_lazy_text
from django.utils.six.moves.urllib.parse import (
quote, quote_plus, unquote, unquote_plus, urlencode as original_urlencode,
urlparse,
)
ETAG_MATCH = re.compile(r'(?:W/)?"((?:\\.|[^"])*)"')
MONTHS = 'jan feb mar apr may jun jul aug sep oct nov dec'.split()
__D = r'(?P<day>\d{2})'
__D2 = r'(?P<day>[ \d]\d)'
__M = r'(?P<mon>\w{3})'
__Y = r'(?P<year>\d{4})'
__Y2 = r'(?P<year>\d{2})'
__T = r'(?P<hour>\d{2}):(?P<min>\d{2}):(?P<sec>\d{2})'
RFC1123_DATE = re.compile(r'^\w{3}, %s %s %s %s GMT$' % (__D, __M, __Y, __T))
RFC850_DATE = re.compile(r'^\w{6,9}, %s-%s-%s %s GMT$' % (__D, __M, __Y2, __T))
ASCTIME_DATE = re.compile(r'^\w{3} %s %s %s %s$' % (__M, __D2, __T, __Y))
RFC3986_GENDELIMS = str(":/?#[]@")
RFC3986_SUBDELIMS = str("!$&'()*+,;=")
FIELDS_MATCH = re.compile('[&;]')
@keep_lazy_text
def urlquote(url, safe='/'):
"""
A version of Python's urllib.quote() function that can operate on unicode
strings. The url is first UTF-8 encoded before quoting. The returned string
can safely be used as part of an argument to a subsequent iri_to_uri() call
without double-quoting occurring.
"""
return force_text(quote(force_str(url), force_str(safe)))
@keep_lazy_text
def urlquote_plus(url, safe=''):
"""
A version of Python's urllib.quote_plus() function that can operate on
unicode strings. The url is first UTF-8 encoded before quoting. The
returned string can safely be used as part of an argument to a subsequent
iri_to_uri() call without double-quoting occurring.
"""
return force_text(quote_plus(force_str(url), force_str(safe)))
@keep_lazy_text
def urlunquote(quoted_url):
"""
A wrapper for Python's urllib.unquote() function that can operate on
the result of django.utils.http.urlquote().
"""
return force_text(unquote(force_str(quoted_url)))
@keep_lazy_text
def urlunquote_plus(quoted_url):
"""
A wrapper for Python's urllib.unquote_plus() function that can operate on
the result of django.utils.http.urlquote_plus().
"""
return force_text(unquote_plus(force_str(quoted_url)))
def urlencode(query, doseq=0):
"""
A version of Python's urllib.urlencode() function that can operate on
unicode strings. The parameters are first cast to UTF-8 encoded strings and
then encoded as per normal.
"""
if isinstance(query, MultiValueDict):
query = query.lists()
elif hasattr(query, 'items'):
query = query.items()
return original_urlencode(
[(force_str(k),
[force_str(i) for i in v] if isinstance(v, (list, tuple)) else force_str(v))
for k, v in query],
doseq)
def cookie_date(epoch_seconds=None):
"""
Formats the time to ensure compatibility with Netscape's cookie standard.
Accepts a floating point number expressed in seconds since the epoch, in
UTC - such as that outputted by time.time(). If set to None, defaults to
the current time.
Outputs a string in the format 'Wdy, DD-Mon-YYYY HH:MM:SS GMT'.
"""
rfcdate = formatdate(epoch_seconds)
return '%s-%s-%s GMT' % (rfcdate[:7], rfcdate[8:11], rfcdate[12:25])
def http_date(epoch_seconds=None):
"""
Formats the time to match the RFC1123 date format as specified by HTTP
RFC7231 section 7.1.1.1.
Accepts a floating point number expressed in seconds since the epoch, in
UTC - such as that outputted by time.time(). If set to None, defaults to
the current time.
Outputs a string in the format 'Wdy, DD Mon YYYY HH:MM:SS GMT'.
"""
return formatdate(epoch_seconds, usegmt=True)
def parse_http_date(date):
"""
Parses a date format as specified by HTTP RFC7231 section 7.1.1.1.
The three formats allowed by the RFC are accepted, even if only the first
one is still in widespread use.
Returns an integer expressed in seconds since the epoch, in UTC.
"""
# emails.Util.parsedate does the job for RFC1123 dates; unfortunately
# RFC7231 makes it mandatory to support RFC850 dates too. So we roll
# our own RFC-compliant parsing.
for regex in RFC1123_DATE, RFC850_DATE, ASCTIME_DATE:
m = regex.match(date)
if m is not None:
break
else:
raise ValueError("%r is not in a valid HTTP date format" % date)
try:
year = int(m.group('year'))
if year < 100:
if year < 70:
year += 2000
else:
year += 1900
month = MONTHS.index(m.group('mon').lower()) + 1
day = int(m.group('day'))
hour = int(m.group('hour'))
min = int(m.group('min'))
sec = int(m.group('sec'))
result = datetime.datetime(year, month, day, hour, min, sec)
return calendar.timegm(result.utctimetuple())
except Exception:
six.reraise(ValueError, ValueError("%r is not a valid date" % date), sys.exc_info()[2])
def parse_http_date_safe(date):
"""
Same as parse_http_date, but returns None if the input is invalid.
"""
try:
return parse_http_date(date)
except Exception:
pass
# Base 36 functions: useful for generating compact URLs
def base36_to_int(s):
"""
Converts a base 36 string to an ``int``. Raises ``ValueError` if the
input won't fit into an int.
"""
# To prevent overconsumption of server resources, reject any
# base36 string that is long than 13 base36 digits (13 digits
# is sufficient to base36-encode any 64-bit integer)
if len(s) > 13:
raise ValueError("Base36 input too large")
value = int(s, 36)
# ... then do a final check that the value will fit into an int to avoid
# returning a long (#15067). The long type was removed in Python 3.
if six.PY2 and value > sys.maxint:
raise ValueError("Base36 input too large")
return value
def int_to_base36(i):
"""
Converts an integer to a base36 string
"""
char_set = '0123456789abcdefghijklmnopqrstuvwxyz'
if i < 0:
raise ValueError("Negative base36 conversion input.")
if six.PY2:
if not isinstance(i, six.integer_types):
raise TypeError("Non-integer base36 conversion input.")
if i > sys.maxint:
raise ValueError("Base36 conversion input too large.")
if i < 36:
return char_set[i]
b36 = ''
while i != 0:
i, n = divmod(i, 36)
b36 = char_set[n] + b36
return b36
def urlsafe_base64_encode(s):
"""
Encodes a bytestring in base64 for use in URLs, stripping any trailing
equal signs.
"""
return base64.urlsafe_b64encode(s).rstrip(b'\n=')
def urlsafe_base64_decode(s):
"""
Decodes a base64 encoded string, adding back any trailing equal signs that
might have been stripped.
"""
s = force_bytes(s)
try:
return base64.urlsafe_b64decode(s.ljust(len(s) + len(s) % 4, b'='))
except (LookupError, BinasciiError) as e:
raise ValueError(e)
def parse_etags(etag_str):
"""
Parses a string with one or several etags passed in If-None-Match and
If-Match headers by the rules in RFC 2616. Returns a list of etags
without surrounding double quotes (") and unescaped from \<CHAR>.
"""
etags = ETAG_MATCH.findall(etag_str)
if not etags:
# etag_str has wrong format, treat it as an opaque string then
return [etag_str]
etags = [e.encode('ascii').decode('unicode_escape') for e in etags]
return etags
def quote_etag(etag):
"""
Wraps a string in double quotes escaping contents as necessary.
"""
return '"%s"' % etag.replace('\\', '\\\\').replace('"', '\\"')
def unquote_etag(etag):
"""
Unquote an ETag string; i.e. revert quote_etag().
"""
return etag.strip('"').replace('\\"', '"').replace('\\\\', '\\') if etag else etag
def is_same_domain(host, pattern):
"""
Return ``True`` if the host is either an exact match or a match
to the wildcard pattern.
Any pattern beginning with a period matches a domain and all of its
subdomains. (e.g. ``.example.com`` matches ``example.com`` and
``foo.example.com``). Anything else is an exact string match.
"""
if not pattern:
return False
pattern = pattern.lower()
return (
pattern[0] == '.' and (host.endswith(pattern) or host == pattern[1:]) or
pattern == host
)
def is_safe_url(url, host=None):
"""
Return ``True`` if the url is a safe redirection (i.e. it doesn't point to
a different host and uses a safe scheme).
Always returns ``False`` on an empty url.
"""
if url is not None:
url = url.strip()
if not url:
return False
if six.PY2:
try:
url = force_text(url)
except UnicodeDecodeError:
return False
# Chrome treats \ completely as / in paths but it could be part of some
# basic auth credentials so we need to check both URLs.
return _is_safe_url(url, host) and _is_safe_url(url.replace('\\', '/'), host)
def _is_safe_url(url, host):
# Chrome considers any URL with more than two slashes to be absolute, but
# urlparse is not so flexible. Treat any url with three slashes as unsafe.
if url.startswith('///'):
return False
url_info = urlparse(url)
# Forbid URLs like http:///example.com - with a scheme, but without a hostname.
# In that URL, example.com is not the hostname but, a path component. However,
# Chrome will still consider example.com to be the hostname, so we must not
# allow this syntax.
if not url_info.netloc and url_info.scheme:
return False
# Forbid URLs that start with control characters. Some browsers (like
# Chrome) ignore quite a few control characters at the start of a
# URL and might consider the URL as scheme relative.
if unicodedata.category(url[0])[0] == 'C':
return False
return ((not url_info.netloc or url_info.netloc == host) and
(not url_info.scheme or url_info.scheme in ['http', 'https']))
def limited_parse_qsl(qs, keep_blank_values=False, encoding='utf-8',
errors='replace', fields_limit=None):
"""
Return a list of key/value tuples parsed from query string.
Copied from urlparse with an additional "fields_limit" argument.
Copyright (C) 2013 Python Software Foundation (see LICENSE.python).
Arguments:
qs: percent-encoded query string to be parsed
keep_blank_values: flag indicating whether blank values in
percent-encoded queries should be treated as blank strings. A
true value indicates that blanks should be retained as blank
strings. The default false value indicates that blank values
are to be ignored and treated as if they were not included.
encoding and errors: specify how to decode percent-encoded sequences
into Unicode characters, as accepted by the bytes.decode() method.
fields_limit: maximum number of fields parsed or an exception
is raised. None means no limit and is the default.
"""
if fields_limit:
pairs = FIELDS_MATCH.split(qs, fields_limit)
if len(pairs) > fields_limit:
raise TooManyFieldsSent(
'The number of GET/POST parameters exceeded '
'settings.DATA_UPLOAD_MAX_NUMBER_FIELDS.'
)
else:
pairs = FIELDS_MATCH.split(qs)
r = []
for name_value in pairs:
if not name_value:
continue
nv = name_value.split(str('='), 1)
if len(nv) != 2:
# Handle case of a control-name with no equal sign
if keep_blank_values:
nv.append('')
else:
continue
if len(nv[1]) or keep_blank_values:
if six.PY3:
name = nv[0].replace('+', ' ')
name = unquote(name, encoding=encoding, errors=errors)
value = nv[1].replace('+', ' ')
value = unquote(value, encoding=encoding, errors=errors)
else:
name = unquote(nv[0].replace(b'+', b' '))
value = unquote(nv[1].replace(b'+', b' '))
r.append((name, value))
return r
| bsd-3-clause |
cjhak/b2share | invenio/legacy/bibclassify/templates.py | 14 | 18916 | # This file is part of Invenio.
# Copyright (C) 2010, 2011, 2013, 2014 CERN.
#
# Invenio is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
__revision__ = "$Id$"
"""Template for the bibclassify -
this modules is NOT standalone safe - it is not expected to be
used in a stanalone mode ever.
Some template variables are coming directly from the config
module, those starting with CFG_BIBCLASSIFY_WEB....
"""
import cgi
from invenio import config
from invenio.base.i18n import gettext_set_language
from urllib import quote
from invenio.utils.html import escape_html
import config as bconfig
log = bconfig.get_logger("bibclassify.template")
class Template:
def tmpl_page(self,
keywords=None,
top='',
middle='',
bottom='',
navbar=None,
req=None,
ln=None,
generate=None,
sorting=None,
type=None,
numbering=None,
showall=None):
"""This function generates the final output for every bibclassify page - it is called
from the other templating functions to finalize the output. This way, all the logic
about routing (which page to display) will rest with the webinterface, and templates
care only for output.
@keyword keywords: keywords to display
@keyword top: string, what to put at top
@keyword middle: string
@keyword bottom: string
@keyword navbar: if supplied, we will not add the generic navigation bar
@keyword req: wsgi req object
-- all the rest keyword parameters are common with the tmp_page_... calls
@return: html string
"""
if navbar is None:
navbar = self.tmpl_snippet_sorting_options(keywords,
ln=ln,
generate=generate,
sorting=sorting,
type=type,
numbering=numbering,
showall=showall)
# well, integration with other moduels needs to get better (but for now this will do)
bottom += self.call_external_modules(keywords=keywords,
req=req,
ln=ln,
generate=generate,
sorting=sorting,
type=type,
numbering=numbering,
showall=showall)
#thread_id, cache = reader.test_cache()
#bottom += 'This is thread id: %s, cache id: %s, main cache: %s' % (thread_id, id(cache), id(reader._CACHE))
top = top and '<div class="bibclassify-top"> %s </div>' % top or ''
return '''
<div class="bibclassify">
<div class="bibclassify-nav"> %s </div>
%s
%s
<div class="bibclassify-bottom"> %s </div>
</div>''' % (navbar, top, middle, bottom)
def tmpl_page_msg(self, req=None, ln=None, msg=None):
return self.tmpl_page(middle=msg)
def tmpl_page_tagcloud(self, keywords,
req=None,
ln=None,
generate=None,
sorting=None,
type=None,
numbering=None,
showall=None):
"""Writes the html of the tag cloud
@var keywords: dictionary of KeywordToken objects
key is a KeywordToken object
value is a list: [[(pos1,pos1), (pos2,pos2)..], font-level]
@return: str, html page
"""
# Define the range of fonts.
f_min = 12
f_increment = 3
f_number = 8
fonts = [f_min + i * f_increment for i in range(f_number)]
# compute font levels
_get_font_levels(keywords, no_steps=f_number)
_ = gettext_set_language(ln)
msg = _("Automatically generated <span class=\"keyword single\">single</span>,\
<span class=\"keyword composite\">composite</span>, <span class=\"keyword author-kw\">author</span>,\
and <span class=\"keyword other-kw\">other keywords</span>.")
cloud = []
cloud.append('<div class="tagcloud" levels="%s">' % (' '.join(map(lambda x: '%spx' % x, fonts))))
format_link = self.tmpl_href
max = config.CFG_BIBCLASSIFY_WEB_MAXKW or 1000
i = 0
if numbering == 'on':
for kw, info in keywords.items()[0:max]:
cloud.append('<span style="font-size: %spx;">%s (%s)</span>' %
(fonts[info[-1]],
format_link(kw, ln),
len(info[0])))
else:
for kw, info in keywords.items()[0:max]:
cloud.append('<span style="font-size: %spx;">%s </span>' %
(fonts[info[-1]],
format_link(kw, ln)))
cloud.append('</div>')
cloud = '''
<div class="cloud">
%s
</div>''' % ('\n'.join(cloud))
return self.tmpl_page(keywords=keywords, bottom=msg, middle=cloud,
req=req,
ln=ln,
generate=generate,
sorting=sorting,
type=type,
numbering=numbering,
showall=showall)
def tmpl_page_list(self, keywords,
req=None,
ln=None,
generate=None,
sorting=None,
type=None,
numbering=None,
showall=None):
"""Page with keywords as a list"""
_ = gettext_set_language(ln)
kw = self.tmpl_list_of_keywords(keywords,
ln=ln,
generate=generate,
sorting=sorting,
type=type,
numbering=numbering,
showall=showall)
msg = _(_("Automatically generated <span class=\"keyword single\">single</span>,\
<span class=\"keyword composite\">composite</span>, <span class=\"keyword author-kw\">author</span>,\
and <span class=\"keyword other-kw\">other keywords</span>."))
return self.tmpl_page(keywords=keywords, middle=kw, bottom=msg,
req=req,
ln=ln,
generate=generate,
sorting=sorting,
type=type,
numbering=numbering,
showall=showall)
def tmpl_page_xml_output(self, keywords, xml=None,
req=None,
ln=None,
generate=None,
sorting=None,
type=None,
numbering=None,
showall=None):
kw = '<pre class="bibclassify-marcxml"><code>%s</code></pre>' % escape_html(xml)
return self.tmpl_page(keywords, middle=kw,
ln=ln,
generate=generate,
sorting=sorting,
type=type,
numbering=numbering,
showall=showall)
def tmpl_page_generate_keywords(self,
req=None,
ln=None,
generate=None,
sorting=None,
type=None,
numbering=None,
showall=None):
""" Text to return when no keywords are found"""
_ = gettext_set_language(ln)
msg = '''
<form action="" method="get">
%s
<input type="hidden" name="generate" value="yes">
<input type="submit" value="%s">
</form>''' % (_('Automated keyword extraction wasn\'t run for this document yet.'), _('Generate keywords') )
return self.tmpl_page(top=msg,
ln=ln,
generate=generate,
sorting=sorting,
type=type,
numbering=numbering,
showall=showall)
def tmpl_page_no_keywords(self, ln=None, generate=None, sorting=None, type=None, numbering=None, showall=None):
_ = gettext_set_language(ln)
return self.tmpl_page(top=_('There are no suitable keywords for display in this record.'),
navbar='',
ln=ln,
generate=generate,
sorting=sorting,
type=type,
numbering=numbering,
showall=showall)
def tmpl_list_of_keywords(self, keywords,
ln=None,
generate=None,
sorting=None,
type=None,
numbering=None,
showall=None):
"""Formats the list of keywords - no distinction is made
between weighted or not """
_ = gettext_set_language(ln)
format_link = self.tmpl_href
s_keywords = map(lambda x: (x[0], 1000 - len(x[1][0]), len(x[1][0])), keywords.items())
# need to sort by heights weight (reverse) and then alphabetically
# that's why the substraction above
s_keywords.sort(key=lambda x: (x[1], str(x[0])), reverse=False)
if showall != 'on':
s_keywords = s_keywords[0:config.CFG_BIBCLASSIFY_WEB_MAXKW]
out = []
if numbering == 'on':
for kw, weight, real_weight in s_keywords[0:config.CFG_BIBCLASSIFY_WEB_MAXKW]:
out.append('%s (%s)' % (format_link(kw, ln), real_weight))
else:
for kw, weight, real_weight in s_keywords[0:config.CFG_BIBCLASSIFY_WEB_MAXKW]:
out.append(format_link(kw, ln))
if len(keywords) > len(s_keywords):
out.append('<a href="%s" class="moreinfo %s">%s</a>' %
('?ln=%s&type=list&sorting=%s&showall=on' % (ln, sorting),
'show-more',
_("Show more...")))
half = int(len(out) / 2)
out = '<div class="kw-list">%s</div><div class="kw-list">%s</div>' % (
'<br/>'.join(out[0:half]), '<br/>'.join(out[half:]))
return '''
<div class="bibclassify-kwlist">
%s
<hr />
</div>''' % (out)
def tmpl_format_list_of_keywords(self, keywords,
ln=None,
generate=None,
sorting=None,
type=None,
numbering=None,
showall=None):
"""Formats the list of keywords"""
_ = gettext_set_language(ln)
format_link = self.tmpl_href
sorted_keywords = _get_sorted_keywords(keywords)
_numbering = numbering is 'on'
out = []
for type in ('composite', 'single'):
if sorted_keywords['unweighted'][type]:
out.append('<b>%s</b>' % _('Unweighted %(x_name)s keywords:', x_name=type))
for keyword, info in sorted_keywords['unweighted'][type]:
out.append(format_link(keyword, ln))
for type in ('composite', 'single'):
if sorted_keywords['weighted'][type]:
out.append('<b>%s</b>' % _('Weighted %(x_name)s keywords:', x_name=type))
for keyword, info in sorted_keywords['weighted'][type]:
if _numbering:
out.append("%s (%d)" % (format_link(keyword, ln), len(info[0])))
else:
out.append(format_link(keyword, ln))
return '''
<div class="cloud">
%s
</div>''' % ('<br/>'.join(out))
def tmpl_search_link(self, keyword, ln):
"""Returns a link that searches for a keyword."""
return """%s/search?f=keyword&p=%s&ln=%s""" % (
config.CFG_SITE_URL,
quote('"%s"' % keyword),
ln)
def tmpl_href(self, keyword, ln):
return '<a href="%s" class="keyword %s %s">%s</a>' % (
self.tmpl_search_link(keyword, ln), keyword.getType(), keyword.isComposite() and 'composite' or 'single',
cgi.escape(str(keyword)))
def tmpl_snippet_sorting_options(self, keywords,
ln=None,
generate=None,
sorting=None,
type=None,
numbering=None,
showall=None
):
"""Returns the HTML view of the sorting options. Takes care of
enabling only some options based on the page shown."""
if not keywords:
return ''
_ = gettext_set_language(ln)
out = '<b>%s:</b>\n' % _('Keywords')
for (_type, label) in ( ('tagcloud', _('tag cloud')),
('list', _('list')),
('xml', _('XML')) ):
k = {'langlink': ln, 'type': _type, 'sorting': sorting, 'label': _(label)}
if _type not in type:
out += '[ <a href="?ln=%(langlink)s&type=%(type)s&sorting=%(sorting)s">%(label)s</a> ]' % k
else:
out += '[ %(label)s ]' % k
out += '\n<br/>\n'
"""
out += '<b>Sort keywords:</b>\n'
for (sort_type, label) in ( ('occurences', 'by occurences'),
('related', 'by related documents'),):
k = {'langlink' : ln, 'type': type_arg, 'sort' : sort_type, 'label' : _(label)}
if sort_type not in sort_arg:
out += '[ <a href="?ln=%(langlink)s&type=%(type)s&sort=%(sort)s">%(label)s</a> ]' % k
else:
out += '[ %(label)s ]' % k
"""
return ('''<div class="nav-links">
%s
</div>''' % out)
def call_external_modules(self, **kwargs):
"""Give external modules chance to change bibclassify output
- so far, there is no clear way how to discover modules etc.
It is hardcoded now."""
_modules = bconfig.CFG_EXTERNAL_MODULES
out = ''
for m, v in _modules.items():
try:
if not callable(v):
x = __import__(m, globals=globals(), locals={})
if hasattr(x, v):
v = getattr(x, v)
_modules[m] = v
else:
raise Exception("The registered call %s does not exist in the module %s" % (v, m))
result = v('bibclassify', **kwargs)
if result and isinstance(result, str):
out += result
else:
log.error("Module %s returned wrong results? %s" % (m, str(result)[:50]))
except Exception as msg:
log.error("Error importing module: %s" % (m))
log.error(msg)
del (_modules[m])
return out
def _get_sorted_keywords(keywords):
"""Returns a list of keywords."""
# Separate keywords with and without weight, and single and
# composite keywords.
sorted_keywords = {
'unweighted': {'single': [], 'composite': []},
'weighted': {'single': [], 'composite': []}
}
for k, info in keywords.items():
if len(info[0]) > 0:
state = 'weighted'
else:
state = 'unweighted'
if k.isComposite():
sorted_keywords[state]['composite'].append([k, info])
else:
sorted_keywords[state]['single'].append([k, info])
for type in ('single', 'composite'):
sorted_keywords['unweighted'][type].sort(key=lambda x: str(x[0]).lower()) #keyword label
sorted_keywords['weighted'][type].sort(key=lambda x: len(x[1][0])) # number of spans
return sorted_keywords
def _get_font_levels(keywords, no_steps=8):
"""Takes keywords dictionary {keyw1: [[], ]....}
computes the fontlevel and adds it to the dictionary
@return: nothing, it changes keywords dictionary directly"""
if not keywords:
return keywords
# Extract the weights from the keywords.
try:
weights = map(lambda x: len(x[0]), keywords.values())
except IndexError:
return keywords
# Define the range of fonts.
f_number = no_steps
# Get some necessary values.
w_min = float(min(weights))
w_max = float(max(weights))
# Compute the distribution function.
if w_max == w_min:
level = lambda weight: 1
else:
slope = f_number / (w_max - w_min)
y_intercept = - w_min * slope
level = lambda weight: int(slope * weight + y_intercept)
# Compute the font level for each weight.
for keyword, info in keywords.items():
w = level(len(info[0]))
if w >= f_number:
w = f_number - 1
info.append(w)
| gpl-2.0 |
reingart/web2conf | models/db_activity.py | 1 | 10359 | # -*- coding: utf-8 -*-
######################################
### MANAGE ACTIVITIES ("TALK" PROPOSALS)
######################################
autotranslate = lambda x: T(x) if not x in ("", None) else x
db.define_table('activity',
db.Field('authors',label=T("Authors"),default=('%s %s' %(auth.user.first_name, auth.user.last_name)) if auth.user else None),
db.Field('title',label=T("Title")),
db.Field('type','text',label=T("Type")),
db.Field('code', readable=False, writable=False,),
db.Field('duration','integer',label=T("Duration in minutes")), # era 45 min
db.Field('request_time_extension', 'boolean', readable=False, writable=False, default=False, label=T("Time extension"), comment=T("(explain why)")),
db.Field('cc',label=T("cc"), length=512, default="", readable=False, writable=False),
db.Field('abstract','text',label=T("Abstract")),
db.Field('description','text',label=T("Description"),widget=wysiwyg),
db.Field('categories','list:string',label=T("Categories")),
db.Field('level','string',label=T("Level"),represent=autotranslate),
db.Field('track','string',label=T("Track"),represent=autotranslate),
db.Field('logo','upload', comment=T("only used for sprints)")),
db.Field('scheduled_datetime','datetime',label=T("Scheduled Datetime"),writable=False,readable=False),
db.Field('scheduled_room',label=T("Scheduled Room"),requires=IS_EMPTY_OR(IS_IN_SET(sorted(ACTIVITY_ROOMS.items()))), writable=False,readable=False),
db.Field('status',default='pending',label=T("Status"),writable=False,readable=False),
db.Field('confirmed','boolean',default=False,writable=False,readable=False),
db.Field('video',length=128,label=T('Video'),default='',writable=False,readable=False),
db.Field('score','double',label=T("Score"),default=None,readable=False,writable=False),
db.Field('created_by',db.auth_user,label=T("Created By"),readable=False,writable=False,default=auth.user.id if auth.user else 0),
db.Field('created_on','datetime',label=T("Created On"),readable=False,writable=False,default=request.now),
db.Field('created_signature',label=T("Created Signature"),readable=False,writable=False,
default=('%s %s' % (auth.user.first_name,auth.user.last_name)) if auth.user else ''),
db.Field('modified_by','integer',label=T("Modified By"),readable=False,writable=False,default=auth.user.id if auth.user else 0),
db.Field('modified_on','datetime',label=T("Modified On"),readable=False,writable=False,default=request.now,update=request.now),
db.Field('notes', 'text', comment=T("Additional remarks"), label=T("Notes")),
db.Field('license', 'string', default="CC BY-SA, Atribución - Compartir derivadas de la misma forma.", label=T("License")),
format='%(title)s',
migrate=migrate, fake_migrate=fake_migrate)
db.define_table("partaker", Field("activity", db.activity),
Field("user_id", db.auth_user),
Field("add_me", "boolean", default=True, comment=T("Confirm my assistance")),
Field("comment", "text", comment=T("Write a comment for the project's owner")),
migrate=migrate, fake_migrate=fake_migrate)
if request.controller != 'appadmin':
db.activity.description.represent=lambda value: XML(value)
db.activity.title.requires=[IS_NOT_EMPTY(), IS_NOT_IN_DB(db,'activity.title')]
db.activity.authors.requires=IS_NOT_EMPTY()
db.activity.status.requires=IS_IN_SET(['pending','accepted','rejected', 'declined'])
db.activity.type.requires=IS_IN_SET([(k, T(k)) for k in ACTIVITY_TYPES])
db.activity.type.default=None
db.activity.level.requires=IS_IN_SET([(k, T(k)) for k in ACTIVITY_LEVELS])
db.activity.level.default=ACTIVITY_LEVELS[0]
db.activity.track.requires=IS_IN_SET([(k, T(k)) for k in ACTIVITY_TRACKS])
db.activity.track.default=ACTIVITY_TRACKS[0]
db.activity.abstract.requires=IS_NOT_EMPTY()
db.activity.abstract.represent=lambda x: MARKMIN(x, sep="br")
db.activity.abstract.comment= SPAN(T("WIKI format: "), A('MARKMIN', _target='_blank',
_href="http://web2py.com/examples/static/markmin.html",))
db.activity.description.requires=IS_NOT_EMPTY()
db.activity.categories.requires=IS_IN_SET(ACTIVITY_CATEGORIES,multiple=True)
##db.activity.displays=db.proposal.fields
db.activity.status.writable=db.activity.status.readable=auth.has_membership('manager')
db.activity.scheduled_datetime.writable=db.activity.scheduled_datetime.readable=auth.has_membership('manager')
db.activity.video.writable=db.activity.video.readable=auth.has_membership('reviewer')
db.activity.scheduled_datetime.writable=db.activity.scheduled_datetime.readable=auth.has_membership('manager')
db.activity.scheduled_room.writable=db.activity.scheduled_room.readable=auth.has_membership('manager')
db.activity.created_by.writable=db.activity.created_by.readable=auth.has_membership('manager')
db.activity.scheduled_room.represent = lambda x: x and ACTIVITY_ROOMS[int(x)] or ''
db.activity.represent=lambda activity: \
A('[%s] %s' % (activity.status,activity.title),
_href=URL(r=request,c='activity',f='display',args=[activity.id]))
db.activity.type.represent=lambda activity_type: T(activity_type and activity_type.replace("_", " ") or '')
db.activity.duration.represent=lambda activity_duration: activity_duration and ("%s min" % activity_duration) or 'n/a'
db.activity.notes.default = "Tipo de público: \nConocimientos previos: \nRequisitos Especiales: (hardware, materiales, ayuda financiera)"
db.define_table('activity_archived',db.activity,db.Field('activity_proposal',db.activity),
migrate=migrate, fake_migrate=fake_migrate)
db.define_table('attachment',
db.Field('activity_id',db.activity,label=T('ACTIVITY'),writable=False),
db.Field('name','string',label=T('Name')),
db.Field('description','text',label=T('Description')),
db.Field('file','upload',label=T('File')),
db.Field('file_data','blob',default=''),
db.Field('filename'),
db.Field('created_by','integer',label=T("Created By"),readable=False,writable=False,default=auth.user.id if auth.user else 0),
db.Field('created_on','datetime',label=T("Created On"),readable=False,writable=False,default=request.now),
migrate=migrate, fake_migrate=fake_migrate)
db.attachment.name.requires=IS_NOT_EMPTY()
db.attachment.file.requires=IS_NOT_EMPTY()
db.attachment.filename.requires=IS_NOT_EMPTY()
db.attachment.filename.comment=T("(new filename for downloads)")
db.define_table('comment',
db.Field('activity_id',db.activity,label=T('ACTIVITY'),writable=False),
db.Field('body','text',label=T('Body')),
db.Field('created_signature',label=T("Created Signature"),readable=False,writable=False,
default=('%s %s' % (auth.user.first_name,auth.user.last_name)) if auth.user else ''),
db.Field('created_by','integer',label=T("Created By"),readable=False,writable=False,default=auth.user.id if auth.user else 0),
db.Field('created_on','datetime',label=T("Created On"),readable=False,writable=False,default=request.now),
migrate=migrate, fake_migrate=fake_migrate)
db.comment.body.requires=IS_NOT_EMPTY()
db.define_table('review',
db.Field('activity_id',db.activity,label=T('ACTIVITY'),writable=False),
db.Field('rating','integer',label=T('Rating'),default=0),
db.Field('body','text',label=T('Body'),comment="Mensaje opcional para el autor / organizadores"),
db.Field('created_by','integer',label=T("Created By"),readable=False,writable=False,default=auth.user.id if auth.user else 0),
db.Field('created_signature',label=T("Created Signature"),readable=False,writable=False,
default=('%s %s' % (auth.user.first_name,auth.user.last_name)) if auth.user else ''),
db.Field('created_on','datetime',label=T("Created On"),readable=False,writable=False,default=request.now),
migrate=migrate, fake_migrate=fake_migrate)
#db.review.body.requires=IS_NOT_EMPTY()
db.review.rating.requires=IS_IN_SET([x for x in range(0,6)])
db.define_table('author',
db.Field('user_id', db.auth_user),
db.Field('activity_id', db.activity),
db.Field('created_by','integer',label=T("Created By"),readable=False,writable=False,default=auth.user.id if auth.user else 0),
db.Field('created_on','datetime',label=T("Created On"),readable=False,writable=False,default=request.now),
migrate=migrate, fake_migrate=fake_migrate)
def user_is_author(activity_id=None):
if not auth.is_logged_in() or (not request.args and activity_id is None) or not request.args[0].isdigit():
return False
if activity_id is None:
activity_id = request.args[0]
if db((db.author.user_id==auth.user_id)&(db.author.activity_id==activity_id)).count():
return True
def user_is_author_or_manager(activity_id=None):
allowed = False
if activity_id is not None:
project = db.activity[activity_id]
if project is not None:
if project.created_by == auth.user_id:
allowed = True
elif auth.has_membership(role="manager"):
allowed = True
return allowed
def activity_is_accepted():
if not request.args or not request.args[0].isdigit():
return False
if db((db.activity.id==request.args[0])&(db.activity.status=='accepted')).count():
return True
# TODO: enhance with proper tables...
TUTORIALS_LIST= cache.ram(request.env.path_info + ".tutorials",
lambda: [row.title for row in db(db.activity.status=='accepted').select(db.activity.title, orderby=db.activity.title)],
time_expire=60*5)
class IS_IN_SET_NOT_EMPTY(IS_IN_SET):
def __call__(self, value):
(values, error) = IS_IN_SET.__call__(self,value)
if not values:
return (values, self.error_message)
else:
return (values, error)
db.auth_user.tutorials.requires=IS_IN_SET(TUTORIALS_LIST,multiple=True)
db.auth_user.tutorials.comment=SPAN(T('(seleccione su preferencia de charlas para la organización del evento; '),
A('más información',_target='_blank',_href='/2011/activity/accepted'),T(", la disponibilidad y horarios pueden variar sin previo aviso)"))
ACTIVITY_LEVEL_HINT = {}
for i, level in enumerate(ACTIVITY_LEVELS):
ACTIVITY_LEVEL_HINT[level] = XML("◊"* (i+1),)
db.activity.code.requires = IS_EMPTY_OR(IS_NOT_IN_DB(db, db.activity.code))
| bsd-3-clause |
EmreAtes/spack | var/spack/repos/builtin/packages/r-lsei/package.py | 5 | 2004 | ##############################################################################
# Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, [email protected], All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class RLsei(RPackage):
"""It contains functions that solve least squares linear regression
problems under linear equality/inequality constraints. Functions for
solving quadratic programming problems are also available, which
transform such problems into least squares ones first. It is developed
based on the 'Fortran' program of Lawson and Hanson (1974, 1995), which
is public domain and available at
<http://www.netlib.org/lawson-hanson>."""
homepage = "https://cran.r-project.org/package=lsei"
url = "https://cran.rstudio.com/src/contrib/lsei_1.2-0.tar.gz"
list_url = "https://cran.r-project.org/src/contrib/Archive/lsei"
version('1.2-0', '18a9322d7a79ecb86b8788645c4b7e3c')
| lgpl-2.1 |
GauravBh1010tt/DeepLearn | _deeplearn_utils/dl_text/lex_sem_ft.py | 1 | 7746 | """
** deeplean-ai.com **
** dl-lab **
created by :: GauravBh1010tt
"""
import pandas as pd
import numpy as np
import re
#from tqdm import tqdm
from nltk.corpus import wordnet
from nltk import bigrams, trigrams
from collections import Counter, defaultdict
from gensim.models import Word2Vec
from scipy.spatial.distance import cosine as cos
from stop_words import get_stop_words
from gensim import corpora, models
from nltk.tokenize import RegexpTokenizer
tokenizer = RegexpTokenizer(r'\w+')
def tokenize(sent):
return [x.strip() for x in re.split('(\W+)?', sent) if x.strip()]
#Number Of Words In A String(Returns Integer):
def length(val):
return len(val.split())
#Whether A String Is Subset Of Other(Returns 1 and 0):
def substringCheck(sen_A, sen_B):
if sen_A in sen_B or sen_B in sen_A:
return 1
else:
return 0
#Number Of Same Words In Two Sentences(Returns Float):
def overlap(sen_A, sen_B):
a = sen_A.split()
b = sen_B.split()
count = 0
for word_a in a:
for word_b in b:
if(word_a == word_b):
count += 1
return count
#Number Of Synonyms In Two Sentences(Returns Float):
def overlapSyn(sen_A, sen_B):
a = sen_A.split()
b = sen_B.split()
word_synonyms = []
for word in a:
for synset in wordnet.synsets(word):
for lemma in synset.lemma_names():
if lemma in b and lemma != word:
word_synonyms.append(lemma)
return len(word_synonyms)
#Forming Bag Of Words[BOW][Returns BOW Dictionary]:
def train_BOW(lst):
temp = []
for sent in lst:
temp.extend(sent.split())
counts = Counter(temp)
total_count = len(set(temp))
for word in counts:
counts[word] /= float(total_count)
return counts
#Sum Of BOW Values For A Sent[Returns Float]:
def Sum_BOW(sent, dic):
tot = 0.0
for word in sent.split():
try:
tot += dic[word]
except:
continue
return tot
#Training Bigram Model[Returns Dictionary of Dictionaries]:
def train_bigram(lst):
model = defaultdict(lambda: defaultdict(lambda: 0))
for sent in lst:
sent = sent.split()
for w1, w2 in bigrams(sent, pad_right=True, pad_left=True):
model[w1][w2] += 1
total_count = 0
for w1 in model:
total_count = float(sum(model[w1].values()))
for w2 in model[w1]:
model[w1][w2] /= total_count
return model
#Total Sum Of Bigram Probablity Of A Sentence[Returns Float]:
def sum_bigram(sent, model):
sent = sent.split()
first = True
tot = 0
for i in range(len(sent)):
try:
if first:
tot += model[None][sent[i]]
first = False
else:
tot += model[sent[i-1]][sent[i]]
except:
continue
return tot
#Training Trigram Model[Returns Dictionary of Dictionaries]:
def train_trigram(lst):
model = defaultdict(lambda: defaultdict(lambda: 0))
for sent in lst:
sent = sent.split()
for w1, w2, w3 in trigrams(sent, pad_right=True, pad_left=True):
model[(w1,w2)][w2] += 1
total_count = 0
for w1,w2 in model:
total_count = float(sum(model[(w1, w2)].values()))
for w3 in model[(w1,w2)]:
model[(w1, w2)][w3] /= total_count
#Total Sum Of Trigram Probablity Of A Sentence[Returns Float]:
def sum_trigram(sent, model):
sent = sent.split()
first = True
second = True
tot = 0
for i in range(len(sent)):
try:
if first:
tot += model[None, None][sent[i]]
first = False
elif second:
tot += model[None, sent[i-1]][sent[i]]
second = False
else:
tot += model[sent[i-2], sent[i-1]][sent[i]]
except:
continue
return tot
#Word2Vec Training(Returns Vector):
def W2V_train(lst1, lst2):
vocab = []
for i in range(len(lst1)):
w1 = lst1[i]
w2 = lst2[i]
vocab.append(w1.split())
vocab.append(w2.split())
for temp in vocab:
for j in range(len(temp)):
temp[j] = temp[j].lower()
return Word2Vec(vocab)
#Returns The Difference Between Word2Vec Sum Of All The Words In Two Sentences(Returns Vec):
def W2V_Vec(sent_A, sent_B, vec):
if len(sent_A) <= 1:
sent_A += 'none'
elif len(sent_B) <= 1:
sent_B += 'none'
vec1 = 0
vec2 = 0
sent_A = tokenize(sent_A)
sent_B = tokenize(sent_B)
for word in sent_A:
if word not in ", . ? ! # $ % ^ & * ( ) { } [ ]".split():
try:
vec1 += vec[word]
except:
continue
for word in sent_B:
if word not in ", . ? ! # $ % ^ & * ( ) { } [ ]".split():
try:
vec2 += vec[word]
except:
continue
try:
result = cos(vec1, vec2)
except:
result = 0.0
if np.isnan(result):
return 0.0
else:
return result
#Trains LDA Model (Returns Model):
def LDA_train(doc):
red = []
en_stop = get_stop_words('en')
for d in doc:
try:
raw = d.lower()
tokens = tokenizer.tokenize(raw)
stopped_tokens = [i for i in tokens if not i in en_stop]
red.append(stopped_tokens)
except:
continue
print("Forming Dictionary.....")
dictionary = corpora.Dictionary(red)
print("Forming Corpus.....")
corpus = [dictionary.doc2bow(text) for text in red]
print("Training Model.....")
lda = models.ldamodel.LdaModel(corpus, num_topics=10, id2word = dictionary, passes=1)
return lda
#Returns Average Of Probablity Of Word Present In LDA Model For Input Document(Returns Float):
def LDA(doc1, doc2, lda):
word = pd.DataFrame()
weight = pd.DataFrame()
vec1 = []
vec2 = []
for i in range(10):
vec1.append(0)
vec2.append(0)
for i in range(10):
a = []
wrd = []
wgt = []
for x in lda.print_topic(i).split():
if x != '+':
a.append(x)
for w in a:
t = w.split("*")
wrd.append(t[1][1:-1])
wgt.append(float(t[0]))
word[i] = wrd
weight[i] = wgt
num = 0
wrd1 = []
wrd2 = []
# print 'Vector Formation for doc1.....'
for d in doc1.split():
for i in range(10):
for j in range(10):
if d.lower() == word[i][j]:
vec1[j] += float(weight[i][j])
wrd1.append(word[i][j])
# print 'Vector Formation for doc2.....'
for d in doc2.split():
for i in range(10):
for j in range(10):
if d.lower() == word[i][j]:
vec2[i] += float(weight[i][j])
wrd2.append(word[i][j])
v1 = 0.0
v2 = 0.0
for i in range(10):
if vec1[i] >= v1:
t1 = i
v1 = vec1[i]
if vec2[i] >= v2:
t2 = i
v2 = vec2[i]
wrd1_list = list(set(wrd1))
wrd2_list = list(set(wrd2))
w1_len = len(wrd1_list)
w2_len = len(wrd2_list)
w1_new = []
w2_new = []
for i in range(w1_len):
d = wrd1_list[i]
for i in range(10):
if d != word[t2][i]:
w1_new.append(d)
for i in range(w2_len):
d = wrd2_list[i]
for i in range(10):
if d != word[t1][i]:
w2_new.append(d)
num = len(list(set(w1_new))) + len(set(w2_new))
try:
return num
except:
return 0.0
| mit |
philipn/sycamore | Sycamore/support/Captcha/Visual/Base.py | 2 | 2266 | """ Captcha.Visual.BAse
Base classes for visual CAPTCHAs. We use the Python Imaging Library
to manipulate these images.
"""
#
# PyCAPTCHA Package
# Copyright (C) 2004 Micah Dowty <[email protected]>
#
from Sycamore.support import Captcha
from PIL import Image
__all__ = ['ImageCaptcha', 'Layer']
class ImageCaptcha(Captcha.BaseCaptcha):
"""Base class for image-based CAPTCHA tests.
The render() function generates the CAPTCHA image at the given size by
combining Layer instances from self.layers, which should be created by
the subclass-defined getLayers().
"""
defaultSize = (256,96)
def __init__(self, *args, **kwargs):
Captcha.BaseCaptcha.__init__(self)
self._layers = self.getLayers(*args, **kwargs)
def getImage(self):
"""Get a PIL image representing this CAPTCHA test, creating it if necessary"""
if not self._image:
self._image = self.render()
return self._image
def getLayers(self):
"""Subclasses must override this to return a list of Layer instances to render.
Lists within the list of layers are recursively rendered.
"""
return []
def render(self, size=None):
"""Render this CAPTCHA, returning a PIL image"""
if size is None:
size = self.defaultSize
img = Image.new("RGB", size)
return self._renderList(self._layers, Image.new("RGB", size))
def _renderList(self, l, img):
for i in l:
if type(i) == tuple or type(i) == list:
img = self._renderList(i, img)
else:
img = i.render(img) or img
return img
class Layer(object):
"""A renderable object representing part of a CAPTCHA.
The render() function should return approximately the same result, regardless
of the image size. This means any randomization must occur in the constructor.
If the render() function returns something non-None, it is taken as an image to
replace the current image with. This can be used to implement transformations
that result in a separate image without having to copy the results back to the first.
"""
def render(self, img):
pass
### The End ###
| gpl-2.0 |
m-kuhn/QGIS | python/plugins/processing/algs/grass7/ext/v_sample.py | 4 | 1496 | # -*- coding: utf-8 -*-
"""
***************************************************************************
v_sample.py
-----------
Date : February 2016
Copyright : (C) 2016 by Médéric Ribreux
Email : medspx at medspx dot fr
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Médéric Ribreux'
__date__ = 'February 2016'
__copyright__ = '(C) 2016, Médéric Ribreux'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
def processInputs(alg, parameters, context, feedback):
if 'input' in alg.exportedLayers:
return
# We need to import the vector with v.in.ogr
# and we can use r.external for the raster
alg.loadVectorLayerFromParameter('input', parameters, context, feedback, False)
alg.loadRasterLayerFromParameter('raster', parameters, context, True)
alg.postInputs(context)
| gpl-2.0 |
Volvagia356/Hotot | hotot/utils.py | 4 | 7507 | # -*- coding: UTF-8 -*-
# vim:set shiftwidth=4 tabstop=4 expandtab textwidth=79:
import subprocess
import os
import sys
from webbrowser import _iscommand as is_command
import gtk
import mimetypes, mimetools
import urllib
import config
import locale
try: import i18n
except: from gettext import gettext as _
supported_locate = {
'en_US': 'en'
, 'zh_CN': 'zh_CN'
, 'ja_JP': 'ja'
, 'fr_FR': 'fr'
, 'es_ES': 'es'
, 'pt_BR': 'pt_BR'
}
_browser = ''
def open_webbrowser(uri):
'''open a URI in the registered default application
'''
## for proxychains
os.environ['LD_PRELOAD'] = ' '.join(
[ ld for ld in os.environ.get('LD_PRELOAD', '').split(' ') if 'libproxychains.so' not in ld ]
)
browser = 'xdg-open'
if sys.platform[:3] == "win":
browser = 'start'
subprocess.Popen([browser, uri])
def webkit_set_proxy_uri(scheme = None, host = None, port = None, user = None, passwd = None):
from ctypes import CDLL, c_void_p, c_char_p, c_int
try:
if os.name == 'nt':
libgobject = CDLL('libgobject-2.0-0.dll')
libsoup = CDLL('libsoup-2.4-1.dll')
libwebkit = CDLL('libwebkit-1.0-2.dll')
else:
libgobject = CDLL('libgobject-2.0.so.0')
libsoup = CDLL('libsoup-2.4.so.1')
try:
libwebkit = CDLL('libwebkitgtk-1.0.so.0')
except:
libwebkit = CDLL('libwebkit-1.0.so.2')
pass
get_session = libwebkit.webkit_get_default_session
get_session.restype = c_void_p
session = get_session()
g_object_set = libgobject.g_object_set
if session == 0:
return 1
g_object_set.argtypes = [ c_void_p, c_char_p, c_int, c_void_p ]
g_object_set(session, "max-conns", 20, None)
g_object_set(session, "max-conns-per-host", 5, None)
if not scheme:
return 1
elif ":" in scheme:
soup_uri_new = libsoup.soup_uri_new
soup_uri_new.restype = c_void_p
soup_uri_new.argtypes = [ c_char_p ]
proxy_uri = soup_uri_new(str(scheme))
g_object_set.argtypes = [ c_void_p, c_char_p, c_void_p, c_void_p ]
g_object_set(session, "proxy-uri", proxy_uri, None)
elif host:
soup_uri_new = libsoup.soup_uri_new
soup_uri_new.restype = c_void_p
soup_uri_new.argtypes = [ c_char_p ]
proxy_uri = soup_uri_new("http://127.0.0.1")
if proxy_uri == 0:
return 1
soup_uri_set_scheme = libsoup.soup_uri_set_scheme
soup_uri_set_scheme.argtypes = [ c_void_p, c_char_p ]
soup_uri_set_scheme(proxy_uri, str(scheme))
soup_uri_set_host = libsoup.soup_uri_set_host
soup_uri_set_host.argtypes = [ c_void_p, c_char_p ]
soup_uri_set_host(proxy_uri, str(host))
if port:
soup_uri_set_port = libsoup.soup_uri_set_port
soup_uri_set_port.argtypes = [ c_void_p, c_int ]
soup_uri_set_port(proxy_uri, int(port))
if user:
soup_uri_set_user = libsoup.soup_uri_set_user
soup_uri_set_user.argtypes = [ c_void_p, c_char_p ]
soup_uri_set_user(proxy_uri, str(user))
if passwd:
soup_uri_set_password = libsoup.soup_uri_set_password
soup_uri_set_password.argtypes = [ c_void_p, c_char_p ]
soup_uri_set_password(proxy_uri, str(passwd))
g_object_set.argtypes = [ c_void_p, c_char_p, c_void_p, c_void_p ]
g_object_set(session, "proxy-uri", proxy_uri, None)
return 0
except:
exctype, value = sys.exc_info()[:2]
print 'error: webkit_set_proxy_uri: (%s, %s)' % (exctype,value)
return 1
def open_file_chooser_dialog():
sel_file = None
fc_dlg = gtk.FileChooserDialog(title='Open ... '
, parent=None
, action=gtk.FILE_CHOOSER_ACTION_OPEN
, buttons=(gtk.STOCK_CANCEL, gtk.RESPONSE_CANCEL, gtk.STOCK_OPEN,gtk.RESPONSE_OK))
fc_dlg.set_default_response(gtk.RESPONSE_OK)
resp = fc_dlg.run()
if resp == gtk.RESPONSE_OK:
sel_file = fc_dlg.get_filename()
fc_dlg.destroy()
gtk.gdk.threads_leave()
return sel_file
def encode_multipart_formdata(fields, files):
BOUNDARY = mimetools.choose_boundary()
CRLF = '\r\n'
L = []
total_size = 0
L = []
for key, value in fields.items():
key, value = str(key).encode('utf8'), str(value).encode('utf8')
L.append('--' + BOUNDARY)
L.append('Content-Disposition: form-data; name="%s"' % key)
L.append('')
L.append(value)
for pair in files:
key, filename = pair[0].encode('utf8'), pair[1].encode('utf8')
L.append('--' + BOUNDARY)
L.append('Content-Disposition: form-data; name="%s"; filename="%s"' %
(key, 'hotot.png'));
L.append('Content-Type: %s' % get_content_type(filename))
L.append('')
L.append(file(filename).read())
total_size += os.path.getsize(filename)
L.append('--' + BOUNDARY + '--')
L.append('')
body = CRLF.join(L)
headers = {'content-type':'multipart/form-data; boundary=%s' % BOUNDARY
, 'content-length': str(len(body))};
return headers, body
def get_content_type(filename):
return mimetypes.guess_type(filename)[0] or 'application/octet-stream'
def get_ui_object(name):
for base in config.DATA_DIRS:
fullpath = os.path.join(base, name)
if os.path.exists(fullpath):
return fullpath
def get_extra_exts():
import glob
exts = []
files = glob.glob(os.path.join(config.CONF_DIR, config.EXT_DIR_NAME) + '/*')
ext_dirs = filter(lambda x: os.path.isdir(x), files)
for dir in ext_dirs:
ext_js = os.path.join(dir, 'entry.js')
if os.path.exists(ext_js):
exts.append('file://%s' % ext_js)
return exts
def get_extra_themes():
import glob
themes = []
files = glob.glob(os.path.join(config.CONF_DIR, config.THEME_DIR_NAME) + '/*')
theme_dirs = filter(lambda x: os.path.isdir(x), files)
for dir in theme_dirs:
info_file = os.path.join(dir, 'info.json')
style_file = os.path.join(dir, 'style.css')
if os.path.exists(info_file) and os.path.exists(style_file):
themes.append('file://%s' % dir)
return themes
def get_extra_fonts():
font_list = [ff.get_name() for ff in
gtk.gdk.pango_context_get().list_families()]
font_list.sort()
for font in font_list:
try:
font.decode('ascii')
except:
font_list.remove(font)
font_list.insert(0, font)
return font_list
def get_locale():
try:
lang, encode = locale.getdefaultlocale()
except:
lang = 'en'
if lang in supported_locate:
return supported_locate[lang]
return 'en'
def get_file_path_from_dnd_dropped_uri(uri):
path = ""
if uri.startswith('file:\\\\\\'): # windows
path = uri[8:] # 8 is len('file:///')
elif uri.startswith('file://'): # nautilus, rox
path = uri[7:] # 7 is len('file://')
elif uri.startswith('file:'): # xffm
path = uri[5:] # 5 is len('file:')
path = urllib.url2pathname(path) # escape special chars
path = path.strip('\r\n\x00') # remove \r\n and NULL
return path
| lgpl-3.0 |
indashnet/InDashNet.Open.UN2000 | android/external/chromium_org/testing/gtest/scripts/upload_gtest.py | 1963 | 2851 | #!/usr/bin/env python
#
# Copyright 2009, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""upload_gtest.py v0.1.0 -- uploads a Google Test patch for review.
This simple wrapper passes all command line flags and
[email protected] to upload.py.
USAGE: upload_gtest.py [options for upload.py]
"""
__author__ = '[email protected] (Zhanyong Wan)'
import os
import sys
CC_FLAG = '--cc='
GTEST_GROUP = '[email protected]'
def main():
# Finds the path to upload.py, assuming it is in the same directory
# as this file.
my_dir = os.path.dirname(os.path.abspath(__file__))
upload_py_path = os.path.join(my_dir, 'upload.py')
# Adds Google Test discussion group to the cc line if it's not there
# already.
upload_py_argv = [upload_py_path]
found_cc_flag = False
for arg in sys.argv[1:]:
if arg.startswith(CC_FLAG):
found_cc_flag = True
cc_line = arg[len(CC_FLAG):]
cc_list = [addr for addr in cc_line.split(',') if addr]
if GTEST_GROUP not in cc_list:
cc_list.append(GTEST_GROUP)
upload_py_argv.append(CC_FLAG + ','.join(cc_list))
else:
upload_py_argv.append(arg)
if not found_cc_flag:
upload_py_argv.append(CC_FLAG + GTEST_GROUP)
# Invokes upload.py with the modified command line flags.
os.execv(upload_py_path, upload_py_argv)
if __name__ == '__main__':
main()
| apache-2.0 |
mazafrav/JdeRobot | src/drivers/MAVLinkServer/modules/lib/wxgrapheditor.py | 8 | 2841 | '''
Graphical editing of graph definition
'''
from wx_loader import wx
from graphdefinition import GraphDefinition
class GraphDialog(wx.Dialog):
def __init__(self, title, graphdef, callback):
wx.Dialog.__init__(self, None, -1, title, size=(900, 400))
self.callback = callback
self.graphdef = graphdef
self.panel = wx.Panel(self, -1)
vbox = wx.BoxSizer(wx.VERTICAL)
# name entry
hbox_name = wx.BoxSizer(wx.HORIZONTAL)
st_name = wx.StaticText(self.panel, -1, 'Name: ')
self.tc_name = wx.TextCtrl(self.panel, -1, size=(400, -1))
self.tc_name.Value = self.graphdef.name
hbox_name.Add(st_name, 0, wx.LEFT, 10)
hbox_name.Add(self.tc_name, 0, wx.LEFT, 35)
vbox.Add(hbox_name, 0, wx.TOP, 10)
# expression entry
st = wx.StaticText(self.panel, -1, 'Expressions: ')
vbox.Add(st, 0, wx.LEFT, 10)
hbox_expressions = wx.BoxSizer(wx.HORIZONTAL)
self.tc_expressions = wx.TextCtrl(self.panel, -1, style=wx.TE_MULTILINE|wx.HSCROLL, size=(800, 80))
elist = []
for e in self.graphdef.expressions:
e = ' '.join(e.split())
elist.append(e)
self.tc_expressions.Value = '\n'.join(elist)
vbox.Add(self.tc_expressions, 0, wx.LEFT, 15)
# description entry
st = wx.StaticText(self.panel, -1, 'Description: ')
vbox.Add(st, 0, wx.LEFT, 10)
self.tc_description = wx.TextCtrl(self.panel, -1, style=wx.TE_MULTILINE)
vbox.Add(self.tc_description, 1, wx.EXPAND | wx.TOP | wx.RIGHT | wx.LEFT, 15)
self.tc_description.Value = self.graphdef.description
# buttons
button_save = wx.Button(self.panel, 1, 'Save')
button_cancel = wx.Button(self.panel, 2, 'Cancel')
button_test = wx.Button(self.panel, 3, 'Test')
hbox_buttons = wx.BoxSizer(wx.HORIZONTAL)
hbox_buttons.Add(button_save, 0, wx.LEFT, 10)
hbox_buttons.Add(button_cancel, 0, wx.LEFT, 10)
hbox_buttons.Add(button_test, 0, wx.LEFT, 10)
vbox.Add(hbox_buttons, 0, wx.TOP, 10)
self.Bind(wx.EVT_BUTTON, self.OnSave, id=1)
self.Bind(wx.EVT_BUTTON, self.OnCancel, id=2)
self.Bind(wx.EVT_BUTTON, self.OnTest, id=3)
self.panel.SetSizer(vbox)
self.Centre()
def update_values(self):
self.graphdef.name = self.tc_name.Value.strip()
self.graphdef.expressions = self.tc_expressions.Value.split('\n')
self.graphdef.description = self.tc_description.Value
def OnCancel(self, event):
self.Close()
def OnTest(self, event):
self.update_values()
self.callback('test', self.graphdef)
def OnSave(self, event):
self.update_values()
self.callback('save', self.graphdef)
| gpl-3.0 |
moiseslorap/RIT | Computer Science 1/Labs/lab3/arrows.py | 1 | 8837 | """
file: arrows.py
language: python3
author: [email protected] Moisés Lora Pérez
class: CSCI 141-03
description: This program draws colored recursive triangles both iteratively and recursively. It receives some random
parameters such as the length, angle, and distance, but also has fixed value like the numbers of triangles that should
be drawn. The figures drawn should always start inside the bounding box and never outside it.".
"""
from math import *
from random import *
from turtle import *
def areaEquilateralTriangle(length):
"""
This function calculates the area of any given equilateral triangle.
:param length: This paramater is calculated randomly on the recursive and iterative functions
:return: The area calculation is returned
pre-conditions: The turtle is not manipulated on this function.
post-conditions: The turtle is not manipulated on this function.
"""
a = length
b = a/2
h = ((sqrt(3)*(a))/2)
return 2*((b*h)/2)
def BOUNDING_BOX():
"""
This function returns the Bounding Box's Area at 200.
:return: Area is 200.
pre-conditions: The turtle is not manipulated on this function.
post-conditions: The turtle is not manipulated on this function.
"""
return 200
def MAX_FIGURES():
"""
This function defines the maximum number of triangles drawn.
pre-conditions: The turtle is not manipulated on this function.
post-conditions: The turtle is not manipulated on this function.
:return: Maximum figures drawn is 500
"""
return 500
def MAX_DISTANCE():
"""
This function defines the maximum size of the triangles drawn.
pre-conditions: The turtle is not manipulated on this function.
post-conditions: The turtle is not manipulated on this function.
:return: Maximum distance is 30
"""
return 30
def MAX_ANGLE():
"""
This function defines the maximum size of the triangles drawn.
pre-conditions: The turtle is not manipulated on this function.
post-conditions: The turtle is not manipulated on this function.
:return: Maximum angle is 30
"""
return 30
def MAX_SIZE():
"""
This function defines the maximum length of the triangles drawn.
pre-conditions: The turtle is not manipulated on this function.
post-conditions: The turtle is not manipulated on this function.
:return: Maximum length is 30
"""
return 30
def boundingBox():
"""
This function draws a bounding box (square) and then centers at the middle of the canvas
:param length: This paramater is calculated randomly on the recursive and iterative functions
:return: The area calculation is returned
pre-conditions: The turtle starts at the -200(x-axis)position of the bounding box on the bottom left facing east.
post-conditions: The turtle ends on the middle of the bounding box facing east.
"""
up()
backward(BOUNDING_BOX())
left(90)
backward(BOUNDING_BOX())
right(90)
down()
forward(400)
left(90)
forward(400)
left(90)
forward(400)
left(90)
forward(400)
left(90)
up()
left(90)
forward(BOUNDING_BOX())
right(90)
forward(BOUNDING_BOX())
down()
def triangle(length):
"""
This function correctly draws a colored triangle with a random length and random color fill.
pre-conditions: The first triangle drawn starts at the middle of the bounding box facing east.
post-conditions: The triangle then ends at the same point it started and continues at the position which the
recursive or iterative function randomly generates.
"""
color(random(), random(), random())
begin_fill()
forward(length)
left(120)
forward(length)
left(120)
forward(length)
left(120)
end_fill()
def drawFiguresRec(depth):
"""
This function draws the figures recursively, makes sure the figures fit within the bounding box and calculates
the sum of the triangles areas.
pre-conditions: The first triangle drawn starts at the middle of the bounding box facing east.
post-conditions: The triangle then ends at the same point it started and continues to draw more at an angle, length,
and distance which the recursive function randomly generates until the depth reaches a value lower than zero.
:param depth: The depth is inputed by the user which defines the number of triangles that will be drawn.
:return: The sum of the triangles areas.
"""
if depth <= 0:
return 0
else:
x, y = position()
length = randint(1, MAX_SIZE())
angle = randint(-MAX_ANGLE(), MAX_ANGLE())
dist = random() * MAX_DISTANCE()
up()
left(angle)
if y + dist > BOUNDING_BOX():
setheading(270)
elif y - dist < -BOUNDING_BOX():
setheading(90)
elif x+dist > BOUNDING_BOX():
setheading(180)
elif x-dist < -BOUNDING_BOX():
setheading(0)
forward(dist)
triangle(length)
down()
return areaEquilateralTriangle(length) + drawFiguresRec(depth-1)
def drawFiguresIter(depth):
"""
This function draws the figures iteratively, makes sure the figures fit within the bounding box and calculates
the sum of the triangles areas.
pre-conditions: The first triangle drawn starts at the middle of the bounding box facing east.
post-conditions: The triangle then ends at the same point it started and continues at angle, length, and distance
which the iterative function randomly generates until the depth reaches a value lower than zero.
:param depth: The depth is inputed by the user which defines the number of triangles that will be drawn.
:return: The sum of the triangles areas.
"""
sum = 0
while True:
if depth <= 0:
break
else:
x, y = position()
length = randint(1, MAX_SIZE())
dist = random() * MAX_DISTANCE()
left(randint(-MAX_ANGLE(), MAX_ANGLE()))
if y + dist > BOUNDING_BOX():
setheading(270)
elif y - dist < -BOUNDING_BOX():
setheading(90)
elif x+dist > BOUNDING_BOX():
setheading(180)
elif x-dist < -BOUNDING_BOX():
setheading(0)
up()
forward(dist)
down()
depth-=1
length-=1
triangle(length)
up()
sum += areaEquilateralTriangle(length)
return sum
def main():
"""
This function contains an if statement that evaluates the value of the input (depth) and evaluates if it's within
the range comprehended, and if not an error message is displayed. Then bounding box is called and then recursive
function is drawn and value of the sum of the areas is returned. Then canvas is reseted and the same is done for
the iterative function and the program then terminated.
pre-conditions: The turtle starts at the -200(x-axis)position of the bounding box on the bottom left facing east.
post-conditions: The turtle then starts drawing figures from the middle of the bounding box facing east, then
proceeds to draw figures by calling the recursive function. Then after completion canvas is reseted and then the
same happens for the iterative function.
"""
#I convert the input into a integer using the following method.
depth = int(input("Enter the number of figures to be drawn: "))
#This makes sure the value inputed is within the range of (0,500)
if 0 <= depth <= MAX_FIGURES():
#This draws the Bounding box
boundingBox()
#This calls the recursive function and also assigns the value of its return to a variable called sum.
sum = drawFiguresRec(depth)
#This prints the sum.
print("The sum of the areas is", sum)
#this asks the user for continuation
input("Press enter to continue")
#this resets the canvas
reset()
#This draws the bounding box again
boundingBox()
#This calls the iterative function and also assigns the value of its return to a variable called sum.
sum = drawFiguresIter(depth)
#This prints the sum.
print("The sum of the areas is", sum)
#this asks the user for termination
input("Press enter to terminate")
else:
#Error message for input out of range.
print("The number of triangles should be within the range of 0 to 500. Invalid Input!")
#this executes the program
main()
| mit |
hryamzik/ansible | lib/ansible/modules/network/aci/aci_epg_to_domain.py | 1 | 12442 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: aci_epg_to_domain
short_description: Bind EPGs to Domains (fv:RsDomAtt)
description:
- Bind EPGs to Physical and Virtual Domains on Cisco ACI fabrics.
notes:
- The C(tenant), C(ap), C(epg), and C(domain) used must exist before using this module in your playbook.
The M(aci_tenant) M(aci_ap), M(aci_epg) M(aci_domain) modules can be used for this.
- OpenStack VMM domains must not be created using this module. The OpenStack VMM domain is created directly
by the Cisco APIC Neutron plugin as part of the installation and configuration.
This module can be used to query status of an OpenStack VMM domain.
- More information about the internal APIC class B(fv:RsDomAtt) from
L(the APIC Management Information Model reference,https://developer.cisco.com/docs/apic-mim-ref/).
author:
- Jacob McGill (@jmcgill298)
version_added: '2.4'
options:
allow_useg:
description:
- Allows micro-segmentation.
- The APIC defaults to C(encap) when unset during creation.
choices: [ encap, useg ]
ap:
description:
- Name of an existing application network profile, that will contain the EPGs.
aliases: [ app_profile, app_profile_name ]
deploy_immediacy:
description:
- Determines when the policy is pushed to hardware Policy CAM.
- The APIC defaults to C(lazy) when unset during creation.
choices: [ immediate, lazy ]
domain:
description:
- Name of the physical or virtual domain being associated with the EPG.
aliases: [ domain_name, domain_profile ]
domain_type:
description:
- Determines if the Domain is physical (phys) or virtual (vmm).
choices: [ phys, vmm ]
aliases: [ type ]
encap:
description:
- The VLAN encapsulation for the EPG when binding a VMM Domain with static encap_mode.
- This acts as the secondary encap when using useg.
- Accepted values range between C(1) and C(4096).
type: int
encap_mode:
description:
- The ecapsulataion method to be used.
- The APIC defaults to C(auto) when unset during creation.
choices: [ auto, vlan, vxlan ]
epg:
description:
- Name of the end point group.
aliases: [ epg_name, name ]
netflow:
description:
- Determines if netflow should be enabled.
- The APIC defaults to C(no) when unset during creation.
type: bool
primary_encap:
description:
- Determines the primary VLAN ID when using useg.
- Accepted values range between C(1) and C(4096).
type: int
resolution_immediacy:
description:
- Determines when the policies should be resolved and available.
- The APIC defaults to C(lazy) when unset during creation.
choices: [ immediate, lazy, pre-provision ]
state:
description:
- Use C(present) or C(absent) for adding or removing.
- Use C(query) for listing an object or multiple objects.
choices: [ absent, present, query ]
default: present
tenant:
description:
- Name of an existing tenant.
aliases: [ tenant_name ]
vm_provider:
description:
- The VM platform for VMM Domains.
- Support for Kubernetes was added in ACI v3.0.
- Support for CloudFoundry, OpenShift and Red Hat was added in ACI v3.1.
choices: [ cloudfoundry, kubernetes, microsoft, openshift, openstack, redhat, vmware ]
extends_documentation_fragment: aci
'''
EXAMPLES = r'''
- name: Add a new physical domain to EPG binding
aci_epg_to_domain:
host: apic
username: admin
password: SomeSecretPassword
tenant: anstest
ap: anstest
epg: anstest
domain: anstest
domain_type: phys
state: present
- name: Remove an existing physical domain to EPG binding
aci_epg_to_domain:
host: apic
username: admin
password: SomeSecretPassword
tenant: anstest
ap: anstest
epg: anstest
domain: anstest
domain_type: phys
state: absent
- name: Query a specific physical domain to EPG binding
aci_epg_to_domain:
host: apic
username: admin
password: SomeSecretPassword
tenant: anstest
ap: anstest
epg: anstest
domain: anstest
domain_type: phys
state: query
- name: Query all domain to EPG bindings
aci_epg_to_domain:
host: apic
username: admin
password: SomeSecretPassword
state: query
'''
RETURN = r'''
current:
description: The existing configuration from the APIC after the module has finished
returned: success
type: list
sample:
[
{
"fvTenant": {
"attributes": {
"descr": "Production environment",
"dn": "uni/tn-production",
"name": "production",
"nameAlias": "",
"ownerKey": "",
"ownerTag": ""
}
}
}
]
error:
description: The error information as returned from the APIC
returned: failure
type: dict
sample:
{
"code": "122",
"text": "unknown managed object class foo"
}
raw:
description: The raw output returned by the APIC REST API (xml or json)
returned: parse error
type: string
sample: '<?xml version="1.0" encoding="UTF-8"?><imdata totalCount="1"><error code="122" text="unknown managed object class foo"/></imdata>'
sent:
description: The actual/minimal configuration pushed to the APIC
returned: info
type: list
sample:
{
"fvTenant": {
"attributes": {
"descr": "Production environment"
}
}
}
previous:
description: The original configuration from the APIC before the module has started
returned: info
type: list
sample:
[
{
"fvTenant": {
"attributes": {
"descr": "Production",
"dn": "uni/tn-production",
"name": "production",
"nameAlias": "",
"ownerKey": "",
"ownerTag": ""
}
}
}
]
proposed:
description: The assembled configuration from the user-provided parameters
returned: info
type: dict
sample:
{
"fvTenant": {
"attributes": {
"descr": "Production environment",
"name": "production"
}
}
}
filter_string:
description: The filter string used for the request
returned: failure or debug
type: string
sample: ?rsp-prop-include=config-only
method:
description: The HTTP method used for the request to the APIC
returned: failure or debug
type: string
sample: POST
response:
description: The HTTP response from the APIC
returned: failure or debug
type: string
sample: OK (30 bytes)
status:
description: The HTTP status from the APIC
returned: failure or debug
type: int
sample: 200
url:
description: The HTTP url used for the request to the APIC
returned: failure or debug
type: string
sample: https://10.11.12.13/api/mo/uni/tn-production.json
'''
from ansible.module_utils.network.aci.aci import ACIModule, aci_argument_spec
from ansible.module_utils.basic import AnsibleModule
VM_PROVIDER_MAPPING = dict(
cloudfoundry='CloudFoundry',
kubernetes='Kubernetes',
microsoft='Microsoft',
openshift='OpenShift',
openstack='OpenStack',
redhat='Redhat',
vmware='VMware',
)
def main():
argument_spec = aci_argument_spec()
argument_spec.update(
allow_useg=dict(type='str', choices=['encap', 'useg']),
ap=dict(type='str', aliases=['app_profile', 'app_profile_name']), # Not required for querying all objects
deploy_immediacy=dict(type='str', choices=['immediate', 'lazy']),
domain=dict(type='str', aliases=['domain_name', 'domain_profile']), # Not required for querying all objects
domain_type=dict(type='str', choices=['phys', 'vmm'], aliases=['type']), # Not required for querying all objects
encap=dict(type='int'),
encap_mode=dict(type='str', choices=['auto', 'vlan', 'vxlan']),
epg=dict(type='str', aliases=['name', 'epg_name']), # Not required for querying all objects
netflow=dict(type='raw'), # Turn into a boolean in v2.9
primary_encap=dict(type='int'),
resolution_immediacy=dict(type='str', choices=['immediate', 'lazy', 'pre-provision']),
state=dict(type='str', default='present', choices=['absent', 'present', 'query']),
tenant=dict(type='str', aliases=['tenant_name']), # Not required for querying all objects
vm_provider=dict(type='str', choices=['cloudfoundry', 'kubernetes', 'microsoft', 'openshift', 'openstack', 'redhat', 'vmware']),
)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True,
required_if=[
['domain_type', 'vmm', ['vm_provider']],
['state', 'absent', ['ap', 'domain', 'domain_type', 'epg', 'tenant']],
['state', 'present', ['ap', 'domain', 'domain_type', 'epg', 'tenant']],
],
)
aci = ACIModule(module)
allow_useg = module.params['allow_useg']
ap = module.params['ap']
deploy_immediacy = module.params['deploy_immediacy']
domain = module.params['domain']
domain_type = module.params['domain_type']
vm_provider = module.params['vm_provider']
encap = module.params['encap']
if encap is not None:
if encap in range(1, 4097):
encap = 'vlan-{0}'.format(encap)
else:
module.fail_json(msg='Valid VLAN assigments are from 1 to 4096')
encap_mode = module.params['encap_mode']
epg = module.params['epg']
netflow = aci.boolean(module.params['netflow'], 'enabled', 'disabled')
primary_encap = module.params['primary_encap']
if primary_encap is not None:
if primary_encap in range(1, 4097):
primary_encap = 'vlan-{0}'.format(primary_encap)
else:
module.fail_json(msg='Valid VLAN assigments are from 1 to 4096')
resolution_immediacy = module.params['resolution_immediacy']
state = module.params['state']
tenant = module.params['tenant']
if domain_type == 'phys' and vm_provider is not None:
module.fail_json(msg="Domain type 'phys' cannot have a 'vm_provider'")
# Compile the full domain for URL building
if domain_type == 'vmm':
epg_domain = 'uni/vmmp-{0}/dom-{1}'.format(VM_PROVIDER_MAPPING[vm_provider], domain)
elif domain_type is not None:
epg_domain = 'uni/phys-{0}'.format(domain)
else:
epg_domain = None
aci.construct_url(
root_class=dict(
aci_class='fvTenant',
aci_rn='tn-{0}'.format(tenant),
filter_target='eq(fvTenant.name, "{0}")'.format(tenant),
module_object=tenant,
),
subclass_1=dict(
aci_class='fvAp',
aci_rn='ap-{0}'.format(ap),
filter_target='eq(fvAp.name, "{0}")'.format(ap),
module_object=ap,
),
subclass_2=dict(
aci_class='fvAEPg',
aci_rn='epg-{0}'.format(epg),
filter_target='eq(fvTenant.name, "{0}")'.format(epg),
module_object=epg,
),
subclass_3=dict(
aci_class='fvRsDomAtt',
aci_rn='rsdomAtt-[{0}]'.format(epg_domain),
filter_target='eq(fvRsDomAtt.tDn, "{0}")'.format(epg_domain),
module_object=epg_domain,
),
)
aci.get_existing()
if state == 'present':
aci.payload(
aci_class='fvRsDomAtt',
class_config=dict(
classPref=allow_useg,
encap=encap,
encapMode=encap_mode,
instrImedcy=deploy_immediacy,
netflowPref=netflow,
primaryEncap=primary_encap,
resImedcy=resolution_immediacy,
),
)
aci.get_diff(aci_class='fvRsDomAtt')
aci.post_config()
elif state == 'absent':
aci.delete_config()
aci.exit_json()
if __name__ == "__main__":
main()
| gpl-3.0 |
windelbouwman/ppci-mirror | tools/fuzzer.py | 1 | 1546 | #!/usr/bin/python
"""
Tool that randomly generates code and feeds it through the code generator.
"""
import random
import io
from ppci.api import ir_to_object
from ppci import ir
from ppci.irutils import Builder, Writer, Verifier
class Generator:
def __init__(self):
self.builder = Builder()
self.verifier = Verifier()
def gen_module(self):
module = ir.Module('fuzz')
self.builder.module = module
# Generate some functions
for i in range(random.randrange(6, 20)):
self.gen_function('fzfnc{}'.format(i))
f = io.StringIO()
self.verifier.verify(module)
writer = Writer(f)
writer.write(module)
print(f.getvalue())
return module
def gen_function(self, name):
function = self.builder.new_procedure(name)
self.builder.set_function(function)
first_block = self.builder.new_block()
function.entry = first_block
self.builder.set_block(first_block)
for i in range(random.randrange(10, 80)):
self.gen_ins(i)
self.builder.emit(ir.Exit())
def gen_ins(self, i):
c1 = self.builder.emit(ir.Const(i, 'cnsta{}'.format(i), ir.i32))
c2 = self.builder.emit(ir.Const(i+2, 'cnstb{}'.format(i), ir.i32))
self.builder.emit(ir.Binop(c1, '+', c2, 'op{}'.format(i), ir.i32))
def go():
generator = Generator()
module = generator.gen_module()
obj = ir_to_object([module], 'arm')
print(obj)
if __name__ == '__main__':
go()
| bsd-2-clause |
Valloric/ycmd | ycmd/tests/shutdown_test.py | 5 | 3784 | # Copyright (C) 2016 ycmd contributors
#
# This file is part of ycmd.
#
# ycmd is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ycmd is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with ycmd. If not, see <http://www.gnu.org/licenses/>.
from __future__ import unicode_literals
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
# Not installing aliases from python-future; it's unreliable and slow.
from builtins import * # noqa
from hamcrest import assert_that, equal_to
from threading import Event
import time
import requests
from ycmd.tests.client_test import Client_test
from ycmd.utils import StartThread
# Time to wait for all the servers to shutdown. Tweak for the CI environment.
#
# NOTE: The timeout is 2 minutes. That is a long time, but the java sub-server
# (jdt.ls) takes a _long time_ to finally actually shut down. This is because it
# is based on eclipse, which must do whatever eclipse must do when it shuts down
# its workspace.
SUBSERVER_SHUTDOWN_TIMEOUT = 120
class Shutdown_test( Client_test ):
@Client_test.CaptureLogfiles
def FromHandlerWithoutSubserver_test( self ):
self.Start()
self.AssertServersAreRunning()
response = self.PostRequest( 'shutdown' )
self.AssertResponse( response )
assert_that( response.json(), equal_to( True ) )
self.AssertServersShutDown( timeout = SUBSERVER_SHUTDOWN_TIMEOUT )
self.AssertLogfilesAreRemoved()
@Client_test.CaptureLogfiles
def FromHandlerWithSubservers_test( self ):
self.Start()
filetypes = [ 'cs',
'go',
'java',
'javascript',
'typescript',
'rust' ]
for filetype in filetypes:
self.StartSubserverForFiletype( filetype )
self.AssertServersAreRunning()
response = self.PostRequest( 'shutdown' )
self.AssertResponse( response )
assert_that( response.json(), equal_to( True ) )
self.AssertServersShutDown( timeout = SUBSERVER_SHUTDOWN_TIMEOUT )
self.AssertLogfilesAreRemoved()
@Client_test.CaptureLogfiles
def FromWatchdogWithoutSubserver_test( self ):
self.Start( idle_suicide_seconds = 2, check_interval_seconds = 1 )
self.AssertServersAreRunning()
self.AssertServersShutDown( timeout = SUBSERVER_SHUTDOWN_TIMEOUT )
self.AssertLogfilesAreRemoved()
@Client_test.CaptureLogfiles
def FromWatchdogWithSubservers_test( self ):
all_servers_are_running = Event()
def KeepServerAliveInAnotherThread():
while not all_servers_are_running.is_set():
try:
self.GetRequest( 'ready' )
except requests.exceptions.ConnectionError:
pass
finally:
time.sleep( 0.1 )
self.Start( idle_suicide_seconds = 2, check_interval_seconds = 1 )
StartThread( KeepServerAliveInAnotherThread )
try:
filetypes = [ 'cs',
'go',
'java',
'javascript',
'typescript',
'rust' ]
for filetype in filetypes:
self.StartSubserverForFiletype( filetype )
self.AssertServersAreRunning()
finally:
all_servers_are_running.set()
self.AssertServersShutDown( timeout = SUBSERVER_SHUTDOWN_TIMEOUT + 10 )
self.AssertLogfilesAreRemoved()
| gpl-3.0 |
angr/angr | angr/knowledge_plugins/patches.py | 1 | 3920 | from typing import Optional, List, Dict
from cle.address_translator import AddressTranslator
from sortedcontainers import SortedDict
from .plugin import KnowledgeBasePlugin
# TODO: Serializable
class Patch:
def __init__(self, addr, new_bytes, comment: Optional[str]=None):
self.addr = addr
self.new_bytes = new_bytes
self.comment = comment
def __len__(self):
return len(self.new_bytes)
class PatchManager(KnowledgeBasePlugin):
"""
A placeholder-style implementation for a binary patch manager. This class should be significantly changed in the
future when all data about loaded binary objects are loaded into angr knowledge base from CLE. As of now, it only
stores byte-level replacements. Other angr components may choose to use or not use information provided by this
manager. In other words, it is not transparent.
Patches should not overlap, but it's user's responsibility to check for and avoid overlapping patches.
"""
def __init__(self, kb):
super().__init__()
self._patches: Dict[int,Patch] = SortedDict()
self._kb = kb
def add_patch(self, addr, new_bytes, comment: Optional[str]=None):
self._patches[addr] = Patch(addr, new_bytes, comment=comment)
def add_patch_obj(self, patch: Patch):
self._patches[patch.addr] = patch
def remove_patch(self, addr):
if addr in self._patches:
del self._patches[addr]
def patch_addrs(self):
return self._patches.keys()
def get_patch(self, addr):
"""
Get patch at the given address.
:param int addr: The address of the patch.
:return: The patch if there is one starting at the address, or None if there isn't any.
:rtype: Patch or None
"""
return self._patches.get(addr, None)
def get_all_patches(self, addr, size):
"""
Retrieve all patches that cover a region specified by [addr, addr+size).
:param int addr: The address of the beginning of the region.
:param int size: Size of the region.
:return: A list of patches.
:rtype: list
"""
patches = [ ]
for patch_addr in self._patches.irange(maximum=addr+size-1, reverse=True):
p = self._patches[patch_addr]
if self.overlap(p.addr, p.addr + len(p), addr, addr+size):
patches.append(p)
else:
break
return patches[::-1]
def keys(self):
return self._patches.keys()
def items(self):
return self._patches.items()
def values(self):
return self._patches.values()
def copy(self):
o = PatchManager(self._kb)
o._patches = self._patches.copy()
@staticmethod
def overlap(a0, a1, b0, b1):
return a0 <= b0 < a1 or a0 <= b1 < a1 or b0 <= a0 < b1
def apply_patches_to_binary(self, binary_bytes: Optional[bytes]=None, patches: Optional[List[Patch]]=None) -> bytes:
if patches is None:
patches = sorted(list(self._patches.values()), key=lambda x: x.addr)
if binary_bytes is None:
with open(self._kb._project.loader.main_object.binary, "rb") as f:
binary_bytes = f.read()
for patch in patches:
# convert addr to file offset
at = AddressTranslator.from_mva(patch.addr, self._kb._project.loader.main_object)
file_offset = at.to_raw()
if file_offset < len(binary_bytes) and file_offset + len(patch.new_bytes) < len(binary_bytes):
binary_bytes = binary_bytes[:file_offset] + \
patch.new_bytes + \
binary_bytes[file_offset + len(patch.new_bytes):]
return binary_bytes
KnowledgeBasePlugin.register_default('patches', PatchManager)
| bsd-2-clause |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.