repo_name
stringlengths 5
100
| path
stringlengths 4
375
| copies
stringclasses 991
values | size
stringlengths 4
7
| content
stringlengths 666
1M
| license
stringclasses 15
values |
---|---|---|---|---|---|
tchellomello/home-assistant | homeassistant/components/izone/config_flow.py | 16 | 1219 | """Config flow for izone."""
import asyncio
import logging
from async_timeout import timeout
from homeassistant import config_entries
from homeassistant.helpers import config_entry_flow
from homeassistant.helpers.dispatcher import async_dispatcher_connect
from .const import DISPATCH_CONTROLLER_DISCOVERED, IZONE, TIMEOUT_DISCOVERY
from .discovery import async_start_discovery_service, async_stop_discovery_service
_LOGGER = logging.getLogger(__name__)
async def _async_has_devices(hass):
controller_ready = asyncio.Event()
async_dispatcher_connect(
hass, DISPATCH_CONTROLLER_DISCOVERED, lambda x: controller_ready.set()
)
disco = await async_start_discovery_service(hass)
try:
async with timeout(TIMEOUT_DISCOVERY):
await controller_ready.wait()
except asyncio.TimeoutError:
pass
if not disco.pi_disco.controllers:
await async_stop_discovery_service(hass)
_LOGGER.debug("No controllers found")
return False
_LOGGER.debug("Controllers %s", disco.pi_disco.controllers)
return True
config_entry_flow.register_discovery_flow(
IZONE, "iZone Aircon", _async_has_devices, config_entries.CONN_CLASS_LOCAL_POLL
)
| apache-2.0 |
xlzdew/seleniumpr | py/selenium/webdriver/chrome/webdriver.py | 21 | 3050 | #!/usr/bin/python
#
# Copyright 2011-2013 Software freedom conservancy
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import base64
from selenium.webdriver.remote.command import Command
from selenium.webdriver.remote.webdriver import WebDriver as RemoteWebDriver
from selenium.common.exceptions import WebDriverException
from .service import Service
from .options import Options
class WebDriver(RemoteWebDriver):
"""
Controls the ChromeDriver and allows you to drive the browser.
You will need to download the ChromeDriver executable from
http://chromedriver.storage.googleapis.com/index.html
"""
def __init__(self, executable_path="chromedriver", port=0,
chrome_options=None, service_args=None,
desired_capabilities=None, service_log_path=None):
"""
Creates a new instance of the chrome driver.
Starts the service and then creates new instance of chrome driver.
:Args:
- executable_path - path to the executable. If the default is used it assumes the executable is in the $PATH
- port - port you would like the service to run, if left as 0, a free port will be found.
- desired_capabilities: Dictionary object with non-browser specific
capabilities only, such as "proxy" or "loggingPref".
- chrome_options: this takes an instance of ChromeOptions
"""
if chrome_options is None:
options = Options()
else:
options = chrome_options
if desired_capabilities is not None:
desired_capabilities.update(options.to_capabilities())
else:
desired_capabilities = options.to_capabilities()
self.service = Service(executable_path, port=port,
service_args=service_args, log_path=service_log_path)
self.service.start()
try:
RemoteWebDriver.__init__(self,
command_executor=self.service.service_url,
desired_capabilities=desired_capabilities,
keep_alive=True)
except:
self.quit()
raise
self._is_remote = False
def quit(self):
"""
Closes the browser and shuts down the ChromeDriver executable
that is started when starting the ChromeDriver
"""
try:
RemoteWebDriver.quit(self)
except:
# We don't care about the message because something probably has gone wrong
pass
finally:
self.service.stop()
| apache-2.0 |
kytos/kytos | kytos/core/exceptions.py | 1 | 3168 | """Kytos Core-Defined Exceptions."""
class KytosCoreException(Exception):
"""Exception thrown when KytosCore is broken."""
def __str__(self):
"""Return message of KytosCoreException."""
return 'KytosCore exception: ' + super().__str__()
class KytosSwitchOfflineException(Exception):
"""Exception thrown when a switch is offline."""
def __init__(self, switch):
"""Require a switch.
Args:
switch (:class:`~kytos.core.switch.Switch`): A switch offline.
"""
super().__init__()
self.switch = switch
def __str__(self):
"""Return message of KytosSwitchOfflineException."""
msg = 'The switch {} is not reachable. Please check the connection '
msg += 'between the switch and the controller.'
return msg.format(self.switch.dpid)
class KytosEventException(Exception):
"""Exception thrown when a KytosEvent have an illegal use."""
def __init__(self, message="KytosEvent exception", event=None):
"""Assign parameters to instance variables.
Args:
message (string): message from KytosEventException.
event (:class:`~kytos.core.events.KytosEvent`): Event malformed.
"""
super().__init__()
self.message = message
self.event = event
def __str__(self):
"""Return the full message from KytosEventException."""
message = self.message
if self.event:
message += ". EventType: " + type(self.event)
return message
class KytosWrongEventType(KytosEventException):
"""Exception related to EventType.
When related to buffers, it means that the EventType is not allowed on
that buffer.
"""
class KytosNoTagAvailableError(Exception):
"""Exception raised when a link has no vlan available."""
def __init__(self, link):
"""Require a link.
Args:
link (:class:`~kytos.core.link.Link`): A link with no vlan
available.
"""
super().__init__()
self.link = link
def __str__(self):
"""Full message."""
msg = f'Link {self.link.id} has no vlan available.'
return msg
class KytosLinkCreationError(Exception):
"""Exception thrown when the link has an empty endpoint."""
# Exceptions related to NApps
class KytosNAppException(Exception):
"""Exception raised on a KytosNApp."""
def __init__(self, message="KytosNApp exception"):
"""Assign the parameters to instance variables.
Args:
message (string): message from KytosNAppException.
"""
super().__init__()
self.message = message
def __str__(self):
"""Return the message from KytosNAppException."""
return self.message
class KytosNAppMissingInitArgument(KytosNAppException):
"""Exception thrown when NApp have a missing init argument."""
def __init__(self, message="KytosNAppMissingInitArgument"):
"""Assing parameters to instance variables.
Args:
message (str): Name of the missed argument.
"""
super().__init__(message=message)
| mit |
AndreyKedo/My_project_blog | node_modules/node-gyp/gyp/pylib/gyp/common.py | 1292 | 20063 | # Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from __future__ import with_statement
import collections
import errno
import filecmp
import os.path
import re
import tempfile
import sys
# A minimal memoizing decorator. It'll blow up if the args aren't immutable,
# among other "problems".
class memoize(object):
def __init__(self, func):
self.func = func
self.cache = {}
def __call__(self, *args):
try:
return self.cache[args]
except KeyError:
result = self.func(*args)
self.cache[args] = result
return result
class GypError(Exception):
"""Error class representing an error, which is to be presented
to the user. The main entry point will catch and display this.
"""
pass
def ExceptionAppend(e, msg):
"""Append a message to the given exception's message."""
if not e.args:
e.args = (msg,)
elif len(e.args) == 1:
e.args = (str(e.args[0]) + ' ' + msg,)
else:
e.args = (str(e.args[0]) + ' ' + msg,) + e.args[1:]
def FindQualifiedTargets(target, qualified_list):
"""
Given a list of qualified targets, return the qualified targets for the
specified |target|.
"""
return [t for t in qualified_list if ParseQualifiedTarget(t)[1] == target]
def ParseQualifiedTarget(target):
# Splits a qualified target into a build file, target name and toolset.
# NOTE: rsplit is used to disambiguate the Windows drive letter separator.
target_split = target.rsplit(':', 1)
if len(target_split) == 2:
[build_file, target] = target_split
else:
build_file = None
target_split = target.rsplit('#', 1)
if len(target_split) == 2:
[target, toolset] = target_split
else:
toolset = None
return [build_file, target, toolset]
def ResolveTarget(build_file, target, toolset):
# This function resolves a target into a canonical form:
# - a fully defined build file, either absolute or relative to the current
# directory
# - a target name
# - a toolset
#
# build_file is the file relative to which 'target' is defined.
# target is the qualified target.
# toolset is the default toolset for that target.
[parsed_build_file, target, parsed_toolset] = ParseQualifiedTarget(target)
if parsed_build_file:
if build_file:
# If a relative path, parsed_build_file is relative to the directory
# containing build_file. If build_file is not in the current directory,
# parsed_build_file is not a usable path as-is. Resolve it by
# interpreting it as relative to build_file. If parsed_build_file is
# absolute, it is usable as a path regardless of the current directory,
# and os.path.join will return it as-is.
build_file = os.path.normpath(os.path.join(os.path.dirname(build_file),
parsed_build_file))
# Further (to handle cases like ../cwd), make it relative to cwd)
if not os.path.isabs(build_file):
build_file = RelativePath(build_file, '.')
else:
build_file = parsed_build_file
if parsed_toolset:
toolset = parsed_toolset
return [build_file, target, toolset]
def BuildFile(fully_qualified_target):
# Extracts the build file from the fully qualified target.
return ParseQualifiedTarget(fully_qualified_target)[0]
def GetEnvironFallback(var_list, default):
"""Look up a key in the environment, with fallback to secondary keys
and finally falling back to a default value."""
for var in var_list:
if var in os.environ:
return os.environ[var]
return default
def QualifiedTarget(build_file, target, toolset):
# "Qualified" means the file that a target was defined in and the target
# name, separated by a colon, suffixed by a # and the toolset name:
# /path/to/file.gyp:target_name#toolset
fully_qualified = build_file + ':' + target
if toolset:
fully_qualified = fully_qualified + '#' + toolset
return fully_qualified
@memoize
def RelativePath(path, relative_to, follow_path_symlink=True):
# Assuming both |path| and |relative_to| are relative to the current
# directory, returns a relative path that identifies path relative to
# relative_to.
# If |follow_symlink_path| is true (default) and |path| is a symlink, then
# this method returns a path to the real file represented by |path|. If it is
# false, this method returns a path to the symlink. If |path| is not a
# symlink, this option has no effect.
# Convert to normalized (and therefore absolute paths).
if follow_path_symlink:
path = os.path.realpath(path)
else:
path = os.path.abspath(path)
relative_to = os.path.realpath(relative_to)
# On Windows, we can't create a relative path to a different drive, so just
# use the absolute path.
if sys.platform == 'win32':
if (os.path.splitdrive(path)[0].lower() !=
os.path.splitdrive(relative_to)[0].lower()):
return path
# Split the paths into components.
path_split = path.split(os.path.sep)
relative_to_split = relative_to.split(os.path.sep)
# Determine how much of the prefix the two paths share.
prefix_len = len(os.path.commonprefix([path_split, relative_to_split]))
# Put enough ".." components to back up out of relative_to to the common
# prefix, and then append the part of path_split after the common prefix.
relative_split = [os.path.pardir] * (len(relative_to_split) - prefix_len) + \
path_split[prefix_len:]
if len(relative_split) == 0:
# The paths were the same.
return ''
# Turn it back into a string and we're done.
return os.path.join(*relative_split)
@memoize
def InvertRelativePath(path, toplevel_dir=None):
"""Given a path like foo/bar that is relative to toplevel_dir, return
the inverse relative path back to the toplevel_dir.
E.g. os.path.normpath(os.path.join(path, InvertRelativePath(path)))
should always produce the empty string, unless the path contains symlinks.
"""
if not path:
return path
toplevel_dir = '.' if toplevel_dir is None else toplevel_dir
return RelativePath(toplevel_dir, os.path.join(toplevel_dir, path))
def FixIfRelativePath(path, relative_to):
# Like RelativePath but returns |path| unchanged if it is absolute.
if os.path.isabs(path):
return path
return RelativePath(path, relative_to)
def UnrelativePath(path, relative_to):
# Assuming that |relative_to| is relative to the current directory, and |path|
# is a path relative to the dirname of |relative_to|, returns a path that
# identifies |path| relative to the current directory.
rel_dir = os.path.dirname(relative_to)
return os.path.normpath(os.path.join(rel_dir, path))
# re objects used by EncodePOSIXShellArgument. See IEEE 1003.1 XCU.2.2 at
# http://www.opengroup.org/onlinepubs/009695399/utilities/xcu_chap02.html#tag_02_02
# and the documentation for various shells.
# _quote is a pattern that should match any argument that needs to be quoted
# with double-quotes by EncodePOSIXShellArgument. It matches the following
# characters appearing anywhere in an argument:
# \t, \n, space parameter separators
# # comments
# $ expansions (quoted to always expand within one argument)
# % called out by IEEE 1003.1 XCU.2.2
# & job control
# ' quoting
# (, ) subshell execution
# *, ?, [ pathname expansion
# ; command delimiter
# <, >, | redirection
# = assignment
# {, } brace expansion (bash)
# ~ tilde expansion
# It also matches the empty string, because "" (or '') is the only way to
# represent an empty string literal argument to a POSIX shell.
#
# This does not match the characters in _escape, because those need to be
# backslash-escaped regardless of whether they appear in a double-quoted
# string.
_quote = re.compile('[\t\n #$%&\'()*;<=>?[{|}~]|^$')
# _escape is a pattern that should match any character that needs to be
# escaped with a backslash, whether or not the argument matched the _quote
# pattern. _escape is used with re.sub to backslash anything in _escape's
# first match group, hence the (parentheses) in the regular expression.
#
# _escape matches the following characters appearing anywhere in an argument:
# " to prevent POSIX shells from interpreting this character for quoting
# \ to prevent POSIX shells from interpreting this character for escaping
# ` to prevent POSIX shells from interpreting this character for command
# substitution
# Missing from this list is $, because the desired behavior of
# EncodePOSIXShellArgument is to permit parameter (variable) expansion.
#
# Also missing from this list is !, which bash will interpret as the history
# expansion character when history is enabled. bash does not enable history
# by default in non-interactive shells, so this is not thought to be a problem.
# ! was omitted from this list because bash interprets "\!" as a literal string
# including the backslash character (avoiding history expansion but retaining
# the backslash), which would not be correct for argument encoding. Handling
# this case properly would also be problematic because bash allows the history
# character to be changed with the histchars shell variable. Fortunately,
# as history is not enabled in non-interactive shells and
# EncodePOSIXShellArgument is only expected to encode for non-interactive
# shells, there is no room for error here by ignoring !.
_escape = re.compile(r'(["\\`])')
def EncodePOSIXShellArgument(argument):
"""Encodes |argument| suitably for consumption by POSIX shells.
argument may be quoted and escaped as necessary to ensure that POSIX shells
treat the returned value as a literal representing the argument passed to
this function. Parameter (variable) expansions beginning with $ are allowed
to remain intact without escaping the $, to allow the argument to contain
references to variables to be expanded by the shell.
"""
if not isinstance(argument, str):
argument = str(argument)
if _quote.search(argument):
quote = '"'
else:
quote = ''
encoded = quote + re.sub(_escape, r'\\\1', argument) + quote
return encoded
def EncodePOSIXShellList(list):
"""Encodes |list| suitably for consumption by POSIX shells.
Returns EncodePOSIXShellArgument for each item in list, and joins them
together using the space character as an argument separator.
"""
encoded_arguments = []
for argument in list:
encoded_arguments.append(EncodePOSIXShellArgument(argument))
return ' '.join(encoded_arguments)
def DeepDependencyTargets(target_dicts, roots):
"""Returns the recursive list of target dependencies."""
dependencies = set()
pending = set(roots)
while pending:
# Pluck out one.
r = pending.pop()
# Skip if visited already.
if r in dependencies:
continue
# Add it.
dependencies.add(r)
# Add its children.
spec = target_dicts[r]
pending.update(set(spec.get('dependencies', [])))
pending.update(set(spec.get('dependencies_original', [])))
return list(dependencies - set(roots))
def BuildFileTargets(target_list, build_file):
"""From a target_list, returns the subset from the specified build_file.
"""
return [p for p in target_list if BuildFile(p) == build_file]
def AllTargets(target_list, target_dicts, build_file):
"""Returns all targets (direct and dependencies) for the specified build_file.
"""
bftargets = BuildFileTargets(target_list, build_file)
deptargets = DeepDependencyTargets(target_dicts, bftargets)
return bftargets + deptargets
def WriteOnDiff(filename):
"""Write to a file only if the new contents differ.
Arguments:
filename: name of the file to potentially write to.
Returns:
A file like object which will write to temporary file and only overwrite
the target if it differs (on close).
"""
class Writer(object):
"""Wrapper around file which only covers the target if it differs."""
def __init__(self):
# Pick temporary file.
tmp_fd, self.tmp_path = tempfile.mkstemp(
suffix='.tmp',
prefix=os.path.split(filename)[1] + '.gyp.',
dir=os.path.split(filename)[0])
try:
self.tmp_file = os.fdopen(tmp_fd, 'wb')
except Exception:
# Don't leave turds behind.
os.unlink(self.tmp_path)
raise
def __getattr__(self, attrname):
# Delegate everything else to self.tmp_file
return getattr(self.tmp_file, attrname)
def close(self):
try:
# Close tmp file.
self.tmp_file.close()
# Determine if different.
same = False
try:
same = filecmp.cmp(self.tmp_path, filename, False)
except OSError, e:
if e.errno != errno.ENOENT:
raise
if same:
# The new file is identical to the old one, just get rid of the new
# one.
os.unlink(self.tmp_path)
else:
# The new file is different from the old one, or there is no old one.
# Rename the new file to the permanent name.
#
# tempfile.mkstemp uses an overly restrictive mode, resulting in a
# file that can only be read by the owner, regardless of the umask.
# There's no reason to not respect the umask here, which means that
# an extra hoop is required to fetch it and reset the new file's mode.
#
# No way to get the umask without setting a new one? Set a safe one
# and then set it back to the old value.
umask = os.umask(077)
os.umask(umask)
os.chmod(self.tmp_path, 0666 & ~umask)
if sys.platform == 'win32' and os.path.exists(filename):
# NOTE: on windows (but not cygwin) rename will not replace an
# existing file, so it must be preceded with a remove. Sadly there
# is no way to make the switch atomic.
os.remove(filename)
os.rename(self.tmp_path, filename)
except Exception:
# Don't leave turds behind.
os.unlink(self.tmp_path)
raise
return Writer()
def EnsureDirExists(path):
"""Make sure the directory for |path| exists."""
try:
os.makedirs(os.path.dirname(path))
except OSError:
pass
def GetFlavor(params):
"""Returns |params.flavor| if it's set, the system's default flavor else."""
flavors = {
'cygwin': 'win',
'win32': 'win',
'darwin': 'mac',
}
if 'flavor' in params:
return params['flavor']
if sys.platform in flavors:
return flavors[sys.platform]
if sys.platform.startswith('sunos'):
return 'solaris'
if sys.platform.startswith('freebsd'):
return 'freebsd'
if sys.platform.startswith('openbsd'):
return 'openbsd'
if sys.platform.startswith('netbsd'):
return 'netbsd'
if sys.platform.startswith('aix'):
return 'aix'
return 'linux'
def CopyTool(flavor, out_path):
"""Finds (flock|mac|win)_tool.gyp in the gyp directory and copies it
to |out_path|."""
# aix and solaris just need flock emulation. mac and win use more complicated
# support scripts.
prefix = {
'aix': 'flock',
'solaris': 'flock',
'mac': 'mac',
'win': 'win'
}.get(flavor, None)
if not prefix:
return
# Slurp input file.
source_path = os.path.join(
os.path.dirname(os.path.abspath(__file__)), '%s_tool.py' % prefix)
with open(source_path) as source_file:
source = source_file.readlines()
# Add header and write it out.
tool_path = os.path.join(out_path, 'gyp-%s-tool' % prefix)
with open(tool_path, 'w') as tool_file:
tool_file.write(
''.join([source[0], '# Generated by gyp. Do not edit.\n'] + source[1:]))
# Make file executable.
os.chmod(tool_path, 0755)
# From Alex Martelli,
# http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/52560
# ASPN: Python Cookbook: Remove duplicates from a sequence
# First comment, dated 2001/10/13.
# (Also in the printed Python Cookbook.)
def uniquer(seq, idfun=None):
if idfun is None:
idfun = lambda x: x
seen = {}
result = []
for item in seq:
marker = idfun(item)
if marker in seen: continue
seen[marker] = 1
result.append(item)
return result
# Based on http://code.activestate.com/recipes/576694/.
class OrderedSet(collections.MutableSet):
def __init__(self, iterable=None):
self.end = end = []
end += [None, end, end] # sentinel node for doubly linked list
self.map = {} # key --> [key, prev, next]
if iterable is not None:
self |= iterable
def __len__(self):
return len(self.map)
def __contains__(self, key):
return key in self.map
def add(self, key):
if key not in self.map:
end = self.end
curr = end[1]
curr[2] = end[1] = self.map[key] = [key, curr, end]
def discard(self, key):
if key in self.map:
key, prev_item, next_item = self.map.pop(key)
prev_item[2] = next_item
next_item[1] = prev_item
def __iter__(self):
end = self.end
curr = end[2]
while curr is not end:
yield curr[0]
curr = curr[2]
def __reversed__(self):
end = self.end
curr = end[1]
while curr is not end:
yield curr[0]
curr = curr[1]
# The second argument is an addition that causes a pylint warning.
def pop(self, last=True): # pylint: disable=W0221
if not self:
raise KeyError('set is empty')
key = self.end[1][0] if last else self.end[2][0]
self.discard(key)
return key
def __repr__(self):
if not self:
return '%s()' % (self.__class__.__name__,)
return '%s(%r)' % (self.__class__.__name__, list(self))
def __eq__(self, other):
if isinstance(other, OrderedSet):
return len(self) == len(other) and list(self) == list(other)
return set(self) == set(other)
# Extensions to the recipe.
def update(self, iterable):
for i in iterable:
if i not in self:
self.add(i)
class CycleError(Exception):
"""An exception raised when an unexpected cycle is detected."""
def __init__(self, nodes):
self.nodes = nodes
def __str__(self):
return 'CycleError: cycle involving: ' + str(self.nodes)
def TopologicallySorted(graph, get_edges):
r"""Topologically sort based on a user provided edge definition.
Args:
graph: A list of node names.
get_edges: A function mapping from node name to a hashable collection
of node names which this node has outgoing edges to.
Returns:
A list containing all of the node in graph in topological order.
It is assumed that calling get_edges once for each node and caching is
cheaper than repeatedly calling get_edges.
Raises:
CycleError in the event of a cycle.
Example:
graph = {'a': '$(b) $(c)', 'b': 'hi', 'c': '$(b)'}
def GetEdges(node):
return re.findall(r'\$\(([^))]\)', graph[node])
print TopologicallySorted(graph.keys(), GetEdges)
==>
['a', 'c', b']
"""
get_edges = memoize(get_edges)
visited = set()
visiting = set()
ordered_nodes = []
def Visit(node):
if node in visiting:
raise CycleError(visiting)
if node in visited:
return
visited.add(node)
visiting.add(node)
for neighbor in get_edges(node):
Visit(neighbor)
visiting.remove(node)
ordered_nodes.insert(0, node)
for node in sorted(graph):
Visit(node)
return ordered_nodes
def CrossCompileRequested():
# TODO: figure out how to not build extra host objects in the
# non-cross-compile case when this is enabled, and enable unconditionally.
return (os.environ.get('GYP_CROSSCOMPILE') or
os.environ.get('AR_host') or
os.environ.get('CC_host') or
os.environ.get('CXX_host') or
os.environ.get('AR_target') or
os.environ.get('CC_target') or
os.environ.get('CXX_target'))
| gpl-3.0 |
CSC-ORG/Dynamic-Dashboard-2015 | engine/lib/python2.7/site-packages/django/contrib/gis/utils/layermapping.py | 61 | 27125 | # LayerMapping -- A Django Model/OGR Layer Mapping Utility
"""
The LayerMapping class provides a way to map the contents of OGR
vector files (e.g. SHP files) to Geographic-enabled Django models.
For more information, please consult the GeoDjango documentation:
http://geodjango.org/docs/layermapping.html
"""
import sys
from decimal import Decimal, InvalidOperation as DecimalInvalidOperation
from django.core.exceptions import ObjectDoesNotExist
from django.db import connections, router
from django.contrib.gis.db.models import GeometryField
from django.contrib.gis.gdal import (CoordTransform, DataSource,
OGRException, OGRGeometry, OGRGeomType, SpatialReference)
from django.contrib.gis.gdal.field import (
OFTDate, OFTDateTime, OFTInteger, OFTReal, OFTString, OFTTime)
from django.db import models, transaction
from django.utils import six
from django.utils.encoding import force_text
# LayerMapping exceptions.
class LayerMapError(Exception):
pass
class InvalidString(LayerMapError):
pass
class InvalidDecimal(LayerMapError):
pass
class InvalidInteger(LayerMapError):
pass
class MissingForeignKey(LayerMapError):
pass
class LayerMapping(object):
"A class that maps OGR Layers to GeoDjango Models."
# Acceptable 'base' types for a multi-geometry type.
MULTI_TYPES = {1: OGRGeomType('MultiPoint'),
2: OGRGeomType('MultiLineString'),
3: OGRGeomType('MultiPolygon'),
OGRGeomType('Point25D').num: OGRGeomType('MultiPoint25D'),
OGRGeomType('LineString25D').num: OGRGeomType('MultiLineString25D'),
OGRGeomType('Polygon25D').num: OGRGeomType('MultiPolygon25D'),
}
# Acceptable Django field types and corresponding acceptable OGR
# counterparts.
FIELD_TYPES = {
models.AutoField: OFTInteger,
models.IntegerField: (OFTInteger, OFTReal, OFTString),
models.FloatField: (OFTInteger, OFTReal),
models.DateField: OFTDate,
models.DateTimeField: OFTDateTime,
models.EmailField: OFTString,
models.TimeField: OFTTime,
models.DecimalField: (OFTInteger, OFTReal),
models.CharField: OFTString,
models.SlugField: OFTString,
models.TextField: OFTString,
models.URLField: OFTString,
models.BigIntegerField: (OFTInteger, OFTReal, OFTString),
models.SmallIntegerField: (OFTInteger, OFTReal, OFTString),
models.PositiveSmallIntegerField: (OFTInteger, OFTReal, OFTString),
}
def __init__(self, model, data, mapping, layer=0,
source_srs=None, encoding='utf-8',
transaction_mode='commit_on_success',
transform=True, unique=None, using=None):
"""
A LayerMapping object is initialized using the given Model (not an instance),
a DataSource (or string path to an OGR-supported data file), and a mapping
dictionary. See the module level docstring for more details and keyword
argument usage.
"""
# Getting the DataSource and the associated Layer.
if isinstance(data, six.string_types):
self.ds = DataSource(data, encoding=encoding)
else:
self.ds = data
self.layer = self.ds[layer]
self.using = using if using is not None else router.db_for_write(model)
self.spatial_backend = connections[self.using].ops
# Setting the mapping & model attributes.
self.mapping = mapping
self.model = model
# Checking the layer -- initialization of the object will fail if
# things don't check out before hand.
self.check_layer()
# Getting the geometry column associated with the model (an
# exception will be raised if there is no geometry column).
if self.spatial_backend.mysql:
transform = False
else:
self.geo_field = self.geometry_field()
# Checking the source spatial reference system, and getting
# the coordinate transformation object (unless the `transform`
# keyword is set to False)
if transform:
self.source_srs = self.check_srs(source_srs)
self.transform = self.coord_transform()
else:
self.transform = transform
# Setting the encoding for OFTString fields, if specified.
if encoding:
# Making sure the encoding exists, if not a LookupError
# exception will be thrown.
from codecs import lookup
lookup(encoding)
self.encoding = encoding
else:
self.encoding = None
if unique:
self.check_unique(unique)
transaction_mode = 'autocommit' # Has to be set to autocommit.
self.unique = unique
else:
self.unique = None
# Setting the transaction decorator with the function in the
# transaction modes dictionary.
self.transaction_mode = transaction_mode
if transaction_mode == 'autocommit':
self.transaction_decorator = None
elif transaction_mode == 'commit_on_success':
self.transaction_decorator = transaction.atomic
else:
raise LayerMapError('Unrecognized transaction mode: %s' % transaction_mode)
#### Checking routines used during initialization ####
def check_fid_range(self, fid_range):
"This checks the `fid_range` keyword."
if fid_range:
if isinstance(fid_range, (tuple, list)):
return slice(*fid_range)
elif isinstance(fid_range, slice):
return fid_range
else:
raise TypeError
else:
return None
def check_layer(self):
"""
This checks the Layer metadata, and ensures that it is compatible
with the mapping information and model. Unlike previous revisions,
there is no need to increment through each feature in the Layer.
"""
# The geometry field of the model is set here.
# TODO: Support more than one geometry field / model. However, this
# depends on the GDAL Driver in use.
self.geom_field = False
self.fields = {}
# Getting lists of the field names and the field types available in
# the OGR Layer.
ogr_fields = self.layer.fields
ogr_field_types = self.layer.field_types
# Function for determining if the OGR mapping field is in the Layer.
def check_ogr_fld(ogr_map_fld):
try:
idx = ogr_fields.index(ogr_map_fld)
except ValueError:
raise LayerMapError('Given mapping OGR field "%s" not found in OGR Layer.' % ogr_map_fld)
return idx
# No need to increment through each feature in the model, simply check
# the Layer metadata against what was given in the mapping dictionary.
for field_name, ogr_name in self.mapping.items():
# Ensuring that a corresponding field exists in the model
# for the given field name in the mapping.
try:
model_field = self.model._meta.get_field(field_name)
except models.fields.FieldDoesNotExist:
raise LayerMapError('Given mapping field "%s" not in given Model fields.' % field_name)
# Getting the string name for the Django field class (e.g., 'PointField').
fld_name = model_field.__class__.__name__
if isinstance(model_field, GeometryField):
if self.geom_field:
raise LayerMapError('LayerMapping does not support more than one GeometryField per model.')
# Getting the coordinate dimension of the geometry field.
coord_dim = model_field.dim
try:
if coord_dim == 3:
gtype = OGRGeomType(ogr_name + '25D')
else:
gtype = OGRGeomType(ogr_name)
except OGRException:
raise LayerMapError('Invalid mapping for GeometryField "%s".' % field_name)
# Making sure that the OGR Layer's Geometry is compatible.
ltype = self.layer.geom_type
if not (ltype.name.startswith(gtype.name) or self.make_multi(ltype, model_field)):
raise LayerMapError('Invalid mapping geometry; model has %s%s, '
'layer geometry type is %s.' %
(fld_name, '(dim=3)' if coord_dim == 3 else '', ltype))
# Setting the `geom_field` attribute w/the name of the model field
# that is a Geometry. Also setting the coordinate dimension
# attribute.
self.geom_field = field_name
self.coord_dim = coord_dim
fields_val = model_field
elif isinstance(model_field, models.ForeignKey):
if isinstance(ogr_name, dict):
# Is every given related model mapping field in the Layer?
rel_model = model_field.rel.to
for rel_name, ogr_field in ogr_name.items():
idx = check_ogr_fld(ogr_field)
try:
rel_model._meta.get_field(rel_name)
except models.fields.FieldDoesNotExist:
raise LayerMapError('ForeignKey mapping field "%s" not in %s fields.' %
(rel_name, rel_model.__class__.__name__))
fields_val = rel_model
else:
raise TypeError('ForeignKey mapping must be of dictionary type.')
else:
# Is the model field type supported by LayerMapping?
if model_field.__class__ not in self.FIELD_TYPES:
raise LayerMapError('Django field type "%s" has no OGR mapping (yet).' % fld_name)
# Is the OGR field in the Layer?
idx = check_ogr_fld(ogr_name)
ogr_field = ogr_field_types[idx]
# Can the OGR field type be mapped to the Django field type?
if not issubclass(ogr_field, self.FIELD_TYPES[model_field.__class__]):
raise LayerMapError('OGR field "%s" (of type %s) cannot be mapped to Django %s.' %
(ogr_field, ogr_field.__name__, fld_name))
fields_val = model_field
self.fields[field_name] = fields_val
def check_srs(self, source_srs):
"Checks the compatibility of the given spatial reference object."
if isinstance(source_srs, SpatialReference):
sr = source_srs
elif isinstance(source_srs, self.spatial_backend.spatial_ref_sys()):
sr = source_srs.srs
elif isinstance(source_srs, (int, six.string_types)):
sr = SpatialReference(source_srs)
else:
# Otherwise just pulling the SpatialReference from the layer
sr = self.layer.srs
if not sr:
raise LayerMapError('No source reference system defined.')
else:
return sr
def check_unique(self, unique):
"Checks the `unique` keyword parameter -- may be a sequence or string."
if isinstance(unique, (list, tuple)):
# List of fields to determine uniqueness with
for attr in unique:
if attr not in self.mapping:
raise ValueError
elif isinstance(unique, six.string_types):
# Only a single field passed in.
if unique not in self.mapping:
raise ValueError
else:
raise TypeError('Unique keyword argument must be set with a tuple, list, or string.')
# Keyword argument retrieval routines ####
def feature_kwargs(self, feat):
"""
Given an OGR Feature, this will return a dictionary of keyword arguments
for constructing the mapped model.
"""
# The keyword arguments for model construction.
kwargs = {}
# Incrementing through each model field and OGR field in the
# dictionary mapping.
for field_name, ogr_name in self.mapping.items():
model_field = self.fields[field_name]
if isinstance(model_field, GeometryField):
# Verify OGR geometry.
try:
val = self.verify_geom(feat.geom, model_field)
except OGRException:
raise LayerMapError('Could not retrieve geometry from feature.')
elif isinstance(model_field, models.base.ModelBase):
# The related _model_, not a field was passed in -- indicating
# another mapping for the related Model.
val = self.verify_fk(feat, model_field, ogr_name)
else:
# Otherwise, verify OGR Field type.
val = self.verify_ogr_field(feat[ogr_name], model_field)
# Setting the keyword arguments for the field name with the
# value obtained above.
kwargs[field_name] = val
return kwargs
def unique_kwargs(self, kwargs):
"""
Given the feature keyword arguments (from `feature_kwargs`) this routine
will construct and return the uniqueness keyword arguments -- a subset
of the feature kwargs.
"""
if isinstance(self.unique, six.string_types):
return {self.unique: kwargs[self.unique]}
else:
return dict((fld, kwargs[fld]) for fld in self.unique)
#### Verification routines used in constructing model keyword arguments. ####
def verify_ogr_field(self, ogr_field, model_field):
"""
Verifies if the OGR Field contents are acceptable to the Django
model field. If they are, the verified value is returned,
otherwise the proper exception is raised.
"""
if (isinstance(ogr_field, OFTString) and
isinstance(model_field, (models.CharField, models.TextField))):
if self.encoding:
# The encoding for OGR data sources may be specified here
# (e.g., 'cp437' for Census Bureau boundary files).
val = force_text(ogr_field.value, self.encoding)
else:
val = ogr_field.value
if model_field.max_length and len(val) > model_field.max_length:
raise InvalidString('%s model field maximum string length is %s, given %s characters.' %
(model_field.name, model_field.max_length, len(val)))
elif isinstance(ogr_field, OFTReal) and isinstance(model_field, models.DecimalField):
try:
# Creating an instance of the Decimal value to use.
d = Decimal(str(ogr_field.value))
except DecimalInvalidOperation:
raise InvalidDecimal('Could not construct decimal from: %s' % ogr_field.value)
# Getting the decimal value as a tuple.
dtup = d.as_tuple()
digits = dtup[1]
d_idx = dtup[2] # index where the decimal is
# Maximum amount of precision, or digits to the left of the decimal.
max_prec = model_field.max_digits - model_field.decimal_places
# Getting the digits to the left of the decimal place for the
# given decimal.
if d_idx < 0:
n_prec = len(digits[:d_idx])
else:
n_prec = len(digits) + d_idx
# If we have more than the maximum digits allowed, then throw an
# InvalidDecimal exception.
if n_prec > max_prec:
raise InvalidDecimal('A DecimalField with max_digits %d, decimal_places %d must round to an absolute value less than 10^%d.' %
(model_field.max_digits, model_field.decimal_places, max_prec))
val = d
elif isinstance(ogr_field, (OFTReal, OFTString)) and isinstance(model_field, models.IntegerField):
# Attempt to convert any OFTReal and OFTString value to an OFTInteger.
try:
val = int(ogr_field.value)
except ValueError:
raise InvalidInteger('Could not construct integer from: %s' % ogr_field.value)
else:
val = ogr_field.value
return val
def verify_fk(self, feat, rel_model, rel_mapping):
"""
Given an OGR Feature, the related model and its dictionary mapping,
this routine will retrieve the related model for the ForeignKey
mapping.
"""
# TODO: It is expensive to retrieve a model for every record --
# explore if an efficient mechanism exists for caching related
# ForeignKey models.
# Constructing and verifying the related model keyword arguments.
fk_kwargs = {}
for field_name, ogr_name in rel_mapping.items():
fk_kwargs[field_name] = self.verify_ogr_field(feat[ogr_name], rel_model._meta.get_field(field_name))
# Attempting to retrieve and return the related model.
try:
return rel_model.objects.using(self.using).get(**fk_kwargs)
except ObjectDoesNotExist:
raise MissingForeignKey('No ForeignKey %s model found with keyword arguments: %s' % (rel_model.__name__, fk_kwargs))
def verify_geom(self, geom, model_field):
"""
Verifies the geometry -- will construct and return a GeometryCollection
if necessary (for example if the model field is MultiPolygonField while
the mapped shapefile only contains Polygons).
"""
# Downgrade a 3D geom to a 2D one, if necessary.
if self.coord_dim != geom.coord_dim:
geom.coord_dim = self.coord_dim
if self.make_multi(geom.geom_type, model_field):
# Constructing a multi-geometry type to contain the single geometry
multi_type = self.MULTI_TYPES[geom.geom_type.num]
g = OGRGeometry(multi_type)
g.add(geom)
else:
g = geom
# Transforming the geometry with our Coordinate Transformation object,
# but only if the class variable `transform` is set w/a CoordTransform
# object.
if self.transform:
g.transform(self.transform)
# Returning the WKT of the geometry.
return g.wkt
#### Other model methods ####
def coord_transform(self):
"Returns the coordinate transformation object."
SpatialRefSys = self.spatial_backend.spatial_ref_sys()
try:
# Getting the target spatial reference system
target_srs = SpatialRefSys.objects.using(self.using).get(srid=self.geo_field.srid).srs
# Creating the CoordTransform object
return CoordTransform(self.source_srs, target_srs)
except Exception as msg:
new_msg = 'Could not translate between the data source and model geometry: %s' % msg
six.reraise(LayerMapError, LayerMapError(new_msg), sys.exc_info()[2])
def geometry_field(self):
"Returns the GeometryField instance associated with the geographic column."
# Use the `get_field_by_name` on the model's options so that we
# get the correct field instance if there's model inheritance.
opts = self.model._meta
fld, model, direct, m2m = opts.get_field_by_name(self.geom_field)
return fld
def make_multi(self, geom_type, model_field):
"""
Given the OGRGeomType for a geometry and its associated GeometryField,
determine whether the geometry should be turned into a GeometryCollection.
"""
return (geom_type.num in self.MULTI_TYPES and
model_field.__class__.__name__ == 'Multi%s' % geom_type.django)
def save(self, verbose=False, fid_range=False, step=False,
progress=False, silent=False, stream=sys.stdout, strict=False):
"""
Saves the contents from the OGR DataSource Layer into the database
according to the mapping dictionary given at initialization.
Keyword Parameters:
verbose:
If set, information will be printed subsequent to each model save
executed on the database.
fid_range:
May be set with a slice or tuple of (begin, end) feature ID's to map
from the data source. In other words, this keyword enables the user
to selectively import a subset range of features in the geographic
data source.
step:
If set with an integer, transactions will occur at every step
interval. For example, if step=1000, a commit would occur after
the 1,000th feature, the 2,000th feature etc.
progress:
When this keyword is set, status information will be printed giving
the number of features processed and successfully saved. By default,
progress information will pe printed every 1000 features processed,
however, this default may be overridden by setting this keyword with an
integer for the desired interval.
stream:
Status information will be written to this file handle. Defaults to
using `sys.stdout`, but any object with a `write` method is supported.
silent:
By default, non-fatal error notifications are printed to stdout, but
this keyword may be set to disable these notifications.
strict:
Execution of the model mapping will cease upon the first error
encountered. The default behavior is to attempt to continue.
"""
# Getting the default Feature ID range.
default_range = self.check_fid_range(fid_range)
# Setting the progress interval, if requested.
if progress:
if progress is True or not isinstance(progress, int):
progress_interval = 1000
else:
progress_interval = progress
def _save(feat_range=default_range, num_feat=0, num_saved=0):
if feat_range:
layer_iter = self.layer[feat_range]
else:
layer_iter = self.layer
for feat in layer_iter:
num_feat += 1
# Getting the keyword arguments
try:
kwargs = self.feature_kwargs(feat)
except LayerMapError as msg:
# Something borked the validation
if strict:
raise
elif not silent:
stream.write('Ignoring Feature ID %s because: %s\n' % (feat.fid, msg))
else:
# Constructing the model using the keyword args
is_update = False
if self.unique:
# If we want unique models on a particular field, handle the
# geometry appropriately.
try:
# Getting the keyword arguments and retrieving
# the unique model.
u_kwargs = self.unique_kwargs(kwargs)
m = self.model.objects.using(self.using).get(**u_kwargs)
is_update = True
# Getting the geometry (in OGR form), creating
# one from the kwargs WKT, adding in additional
# geometries, and update the attribute with the
# just-updated geometry WKT.
geom = getattr(m, self.geom_field).ogr
new = OGRGeometry(kwargs[self.geom_field])
for g in new:
geom.add(g)
setattr(m, self.geom_field, geom.wkt)
except ObjectDoesNotExist:
# No unique model exists yet, create.
m = self.model(**kwargs)
else:
m = self.model(**kwargs)
try:
# Attempting to save.
m.save(using=self.using)
num_saved += 1
if verbose:
stream.write('%s: %s\n' % ('Updated' if is_update else 'Saved', m))
except Exception as msg:
if strict:
# Bailing out if the `strict` keyword is set.
if not silent:
stream.write('Failed to save the feature (id: %s) into the model with the keyword arguments:\n' % feat.fid)
stream.write('%s\n' % kwargs)
raise
elif not silent:
stream.write('Failed to save %s:\n %s\nContinuing\n' % (kwargs, msg))
# Printing progress information, if requested.
if progress and num_feat % progress_interval == 0:
stream.write('Processed %d features, saved %d ...\n' % (num_feat, num_saved))
# Only used for status output purposes -- incremental saving uses the
# values returned here.
return num_saved, num_feat
if self.transaction_decorator is not None:
_save = self.transaction_decorator(_save)
nfeat = self.layer.num_feat
if step and isinstance(step, int) and step < nfeat:
# Incremental saving is requested at the given interval (step)
if default_range:
raise LayerMapError('The `step` keyword may not be used in conjunction with the `fid_range` keyword.')
beg, num_feat, num_saved = (0, 0, 0)
indices = range(step, nfeat, step)
n_i = len(indices)
for i, end in enumerate(indices):
# Constructing the slice to use for this step; the last slice is
# special (e.g, [100:] instead of [90:100]).
if i + 1 == n_i:
step_slice = slice(beg, None)
else:
step_slice = slice(beg, end)
try:
num_feat, num_saved = _save(step_slice, num_feat, num_saved)
beg = end
except: # Deliberately catch everything
stream.write('%s\nFailed to save slice: %s\n' % ('=-' * 20, step_slice))
raise
else:
# Otherwise, just calling the previously defined _save() function.
_save()
| mit |
djeo94/CouchPotatoServer | CouchPotato.py | 51 | 4616 | #!/usr/bin/env python
from __future__ import print_function
from logging import handlers
from os.path import dirname
import logging
import os
import select
import signal
import socket
import subprocess
import sys
import traceback
# Root path
base_path = dirname(os.path.abspath(__file__))
# Insert local directories into path
sys.path.insert(0, os.path.join(base_path, 'libs'))
from couchpotato.environment import Env
from couchpotato.core.helpers.variable import getDataDir, removePyc
# Remove pyc files before dynamic load (sees .pyc files regular .py modules)
removePyc(base_path)
class Loader(object):
do_restart = False
def __init__(self):
# Get options via arg
from couchpotato.runner import getOptions
self.options = getOptions(sys.argv[1:])
# Load settings
settings = Env.get('settings')
settings.setFile(self.options.config_file)
# Create data dir if needed
if self.options.data_dir:
self.data_dir = self.options.data_dir
else:
self.data_dir = os.path.expanduser(Env.setting('data_dir'))
if self.data_dir == '':
self.data_dir = getDataDir()
if not os.path.isdir(self.data_dir):
os.makedirs(self.data_dir)
# Create logging dir
self.log_dir = os.path.join(self.data_dir, 'logs');
if not os.path.isdir(self.log_dir):
os.makedirs(self.log_dir)
# Logging
from couchpotato.core.logger import CPLog
self.log = CPLog(__name__)
formatter = logging.Formatter('%(asctime)s %(levelname)s %(message)s', '%H:%M:%S')
hdlr = handlers.RotatingFileHandler(os.path.join(self.log_dir, 'error.log'), 'a', 500000, 10)
hdlr.setLevel(logging.CRITICAL)
hdlr.setFormatter(formatter)
self.log.logger.addHandler(hdlr)
def addSignals(self):
signal.signal(signal.SIGINT, self.onExit)
signal.signal(signal.SIGTERM, lambda signum, stack_frame: sys.exit(1))
from couchpotato.core.event import addEvent
addEvent('app.do_shutdown', self.setRestart)
def setRestart(self, restart):
self.do_restart = restart
return True
def onExit(self, signal, frame):
from couchpotato.core.event import fireEvent
fireEvent('app.shutdown', single = True)
def run(self):
self.addSignals()
from couchpotato.runner import runCouchPotato
runCouchPotato(self.options, base_path, sys.argv[1:], data_dir = self.data_dir, log_dir = self.log_dir, Env = Env)
if self.do_restart:
self.restart()
def restart(self):
try:
# remove old pidfile first
try:
if self.runAsDaemon():
try: self.daemon.stop()
except: pass
except:
self.log.critical(traceback.format_exc())
# Release log files and shutdown logger
logging.shutdown()
args = [sys.executable] + [os.path.join(base_path, os.path.basename(__file__))] + sys.argv[1:]
subprocess.Popen(args)
except:
self.log.critical(traceback.format_exc())
def daemonize(self):
if self.runAsDaemon():
try:
from daemon import Daemon
self.daemon = Daemon(self.options.pid_file)
self.daemon.daemonize()
except SystemExit:
raise
except:
self.log.critical(traceback.format_exc())
def runAsDaemon(self):
return self.options.daemon and self.options.pid_file
if __name__ == '__main__':
l = None
try:
l = Loader()
l.daemonize()
l.run()
except KeyboardInterrupt:
pass
except select.error:
pass
except SystemExit:
raise
except socket.error as e:
# log when socket receives SIGINT, but continue.
# previous code would have skipped over other types of IO errors too.
nr, msg = e
if nr != 4:
try:
l.log.critical(traceback.format_exc())
except:
print(traceback.format_exc())
raise
except:
try:
# if this fails we will have two tracebacks
# one for failing to log, and one for the exception that got us here.
if l:
l.log.critical(traceback.format_exc())
else:
print(traceback.format_exc())
except:
print(traceback.format_exc())
raise
| gpl-3.0 |
kynikos/wiki-scripts | ws/parser_helpers/encodings.py | 1 | 7478 | #! /usr/bin/env python3
import string
import re
import unicodedata
__all__ = ["encode", "decode", "dotencode", "urlencode", "urldecode", "queryencode", "querydecode"]
def encode(str_, escape_char="%", encode_chars="", skip_chars="", special_map=None, charset="utf-8", errors="strict"):
"""
Generalized implementation of a `percent encoding`_ algorithm.
.. _`percent encoding`: https://en.wikipedia.org/wiki/Percent-encoding
:param str_: the string to be encoded
:param escape_char: character to be used as escape (by default '%')
:param encode_chars: the characters to be encoded; empty string means that
all characters will be encoded unless explicitly skipped
:param skip_chars: characters to be skipped (applied after ``encode_chars``)
:param special_map: a mapping overriding default encoding (applied after
both ``encode_chars`` and ``skip_chars``)
:param charset: character set used to encode non-ASCII characters to byte
sequence with :py:meth:`str.encode()`
:param errors: defines behaviour when encoding non-ASCII characters to bytes
fails (passed to :py:meth:`str.encode()`)
"""
output = ""
for char in str_:
if encode_chars == "" or char in encode_chars:
if char not in skip_chars:
if special_map is not None and char in special_map:
output += special_map[char]
else:
for byte in bytes(char, charset, errors):
output += "{}{:02X}".format(escape_char, byte)
else:
output += char
else:
output += char
return output
def decode(str_, escape_char="%", special_map=None, charset="utf-8", errors="strict"):
"""
An inverse function to :py:func:`encode`.
.. note::
The reversibility of the encoding depends on the parameters passed to
:py:func:`encode`. Specifically, if the `escape_char` is not encoded,
the operation is irreversible. Unfortunately MediaWiki does this with
dot-encoding, so don't even try to decode dot-encoded strings!
:param str_: the string to be decoded
:param escape_char: character to be used as escape (by default '%')
:param special_map: an analogue to the same parameter in :py:func:`encode`
(the caller is responsible for inverting the mapping they passed to
:py:func:`encode`)
:param charset:
character set used to decode byte sequence with :py:meth:`bytes.decode()`
:param errors:
defines behaviour when byte-decoding with :py:meth:`bytes.decode()` fails
"""
tok = re.compile(escape_char + "([0-9A-Fa-f]{2})|(.)", re.DOTALL)
output = ""
barr = bytearray()
for match in tok.finditer(str_):
enc_couple, char = match.groups()
if enc_couple:
barr.append(int(enc_couple, 16))
else:
if len(barr) > 0:
output += barr.decode(charset, errors)
barr = bytearray()
if special_map is not None and char in special_map:
output += special_map[char]
else:
output += char
if len(barr) > 0:
output += barr.decode(charset, errors)
return output
def _anchor_preprocess(str_):
"""
Context-sensitive pre-processing for anchor-encoding. See `MediaWiki`_ for
details.
.. _`MediaWiki`: https://www.mediawiki.org/wiki/Manual:PAGENAMEE_encoding
"""
# underscores are treated as spaces during this pre-processing, so they are
# convert to spaces first (the encoding later converts them back)
str_ = str_.replace("_", " ")
# strip leading + trailing whitespace
str_ = str_.strip()
# squash *spaces* in the middle (other whitespace is preserved)
str_ = re.sub("[ ]+", " ", str_)
# leading colons are stripped, others preserved (colons in the middle preceded by
# newline are supposed to be fucked up in MediaWiki, but this is pretty safe to ignore)
str_ = str_.lstrip(":")
return str_
def dotencode(str_):
"""
Return an anchor-encoded string as shown in this `encoding table`_. It uses
the ``legacy`` format for `$wgFragmentMode`_.
.. note::
The rules for handling special characters in section anchors are not
well defined even upstream, see `T20431`_. This function produces the
actual anchor for the section, i.e. the ID of the heading's span element
(e.g. ``<span id="anchor" ...>``).
.. _`encoding table`: https://www.mediawiki.org/wiki/Manual:PAGENAMEE_encoding#Encodings_compared
.. _`T20431`: https://phabricator.wikimedia.org/T20431
.. _`$wgFragmentMode`: https://www.mediawiki.org/wiki/Manual:$wgFragmentMode
"""
skipped = string.ascii_letters + string.digits + "-_.:"
special = {" ": "_"}
return encode(_anchor_preprocess(str_), escape_char=".", skip_chars=skipped, special_map=special)
def anchorencode(str_, format="html5"):
"""
Function corresponding to the ``{{anchorencode:}}`` `magic word`_.
:param str_: the string to be encoded
:param format: either ``"html5"`` or ``"legacy"`` (see `$wgFragmentMode`_)
.. _`magic word`: https://www.mediawiki.org/wiki/Help:Magic_words
.. _`$wgFragmentMode`: https://www.mediawiki.org/wiki/Manual:$wgFragmentMode
"""
if format not in {"html5", "legacy"}:
raise ValueError(format)
if format == "legacy":
return dotencode(str_)
str_ = _anchor_preprocess(str_)
special_map = {" ": "_"}
escape_char = "."
charset="utf-8"
errors="strict"
# below is the code from the encode function, but without the encode_chars
# and skip_chars parameters, and adjusted for unicode categories
output = ""
for char in str_:
# encode only characters from the Separator and Other categories
# https://en.wikipedia.org/wiki/Unicode#General_Category_property
if unicodedata.category(char)[0] in {"Z", "C"}:
if special_map is not None and char in special_map:
output += special_map[char]
else:
for byte in bytes(char, charset, errors):
output += "{}{:02X}".format(escape_char, byte)
else:
output += char
return output
def urlencode(str_):
"""
Standard URL encoding as described on `Wikipedia`_, which should correspond
to the ``PATH`` style in the MediaWiki's `comparison table`_.
.. _`Wikipedia`: https://en.wikipedia.org/wiki/Percent-encoding
.. _`comparison table`: https://www.mediawiki.org/wiki/Manual:PAGENAMEE_encoding#Encodings_compared
"""
skipped = string.ascii_letters + string.digits + "-_.~"
return encode(str_, skip_chars=skipped)
def urldecode(str_):
"""
An inverse function to :py:func:`urlencode`.
"""
return decode(str_)
def queryencode(str_):
"""
The ``QUERY`` style encoding as described on `MediaWiki`_. This is the
default URL encoding in MediaWiki since 1.17.
.. _`MediaWiki`: https://www.mediawiki.org/wiki/Manual:PAGENAMEE_encoding#Encodings_compared
"""
skipped = string.ascii_letters + string.digits + "-_."
special = {" ": "+"}
return encode(str_, skip_chars=skipped, special_map=special)
def querydecode(str_):
"""
An inverse function to :py:func:`queryencode`.
"""
special = {"+": " "}
return decode(str_, special_map=special)
| gpl-3.0 |
anditto/bitcoin | test/functional/wallet_reorgsrestore.py | 28 | 4745 | #!/usr/bin/env python3
# Copyright (c) 2019-2020 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test tx status in case of reorgs while wallet being shutdown.
Wallet txn status rely on block connection/disconnection for its
accuracy. In case of reorgs happening while wallet being shutdown
block updates are not going to be received. At wallet loading, we
check against chain if confirmed txn are still in chain and change
their status if block in which they have been included has been
disconnected.
"""
from decimal import Decimal
import os
import shutil
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import (
assert_equal,
)
class ReorgsRestoreTest(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 3
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def run_test(self):
# Send a tx from which to conflict outputs later
txid_conflict_from = self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(), Decimal("10"))
self.nodes[0].generate(1)
self.sync_blocks()
# Disconnect node1 from others to reorg its chain later
self.disconnect_nodes(0, 1)
self.disconnect_nodes(1, 2)
self.connect_nodes(0, 2)
# Send a tx to be unconfirmed later
txid = self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(), Decimal("10"))
tx = self.nodes[0].gettransaction(txid)
self.nodes[0].generate(4)
tx_before_reorg = self.nodes[0].gettransaction(txid)
assert_equal(tx_before_reorg["confirmations"], 4)
# Disconnect node0 from node2 to broadcast a conflict on their respective chains
self.disconnect_nodes(0, 2)
nA = next(tx_out["vout"] for tx_out in self.nodes[0].gettransaction(txid_conflict_from)["details"] if tx_out["amount"] == Decimal("10"))
inputs = []
inputs.append({"txid": txid_conflict_from, "vout": nA})
outputs_1 = {}
outputs_2 = {}
# Create a conflicted tx broadcast on node0 chain and conflicting tx broadcast on node1 chain. Both spend from txid_conflict_from
outputs_1[self.nodes[0].getnewaddress()] = Decimal("9.99998")
outputs_2[self.nodes[0].getnewaddress()] = Decimal("9.99998")
conflicted = self.nodes[0].signrawtransactionwithwallet(self.nodes[0].createrawtransaction(inputs, outputs_1))
conflicting = self.nodes[0].signrawtransactionwithwallet(self.nodes[0].createrawtransaction(inputs, outputs_2))
conflicted_txid = self.nodes[0].sendrawtransaction(conflicted["hex"])
self.nodes[0].generate(1)
conflicting_txid = self.nodes[2].sendrawtransaction(conflicting["hex"])
self.nodes[2].generate(9)
# Reconnect node0 and node2 and check that conflicted_txid is effectively conflicted
self.connect_nodes(0, 2)
self.sync_blocks([self.nodes[0], self.nodes[2]])
conflicted = self.nodes[0].gettransaction(conflicted_txid)
conflicting = self.nodes[0].gettransaction(conflicting_txid)
assert_equal(conflicted["confirmations"], -9)
assert_equal(conflicted["walletconflicts"][0], conflicting["txid"])
# Node0 wallet is shutdown
self.restart_node(0)
# The block chain re-orgs and the tx is included in a different block
self.nodes[1].generate(9)
self.nodes[1].sendrawtransaction(tx["hex"])
self.nodes[1].generate(1)
self.nodes[1].sendrawtransaction(conflicted["hex"])
self.nodes[1].generate(1)
# Node0 wallet file is loaded on longest sync'ed node1
self.stop_node(1)
self.nodes[0].backupwallet(os.path.join(self.nodes[0].datadir, 'wallet.bak'))
shutil.copyfile(os.path.join(self.nodes[0].datadir, 'wallet.bak'), os.path.join(self.nodes[1].datadir, self.chain, self.default_wallet_name, self.wallet_data_filename))
self.start_node(1)
tx_after_reorg = self.nodes[1].gettransaction(txid)
# Check that normal confirmed tx is confirmed again but with different blockhash
assert_equal(tx_after_reorg["confirmations"], 2)
assert(tx_before_reorg["blockhash"] != tx_after_reorg["blockhash"])
conflicted_after_reorg = self.nodes[1].gettransaction(conflicted_txid)
# Check that conflicted tx is confirmed again with blockhash different than previously conflicting tx
assert_equal(conflicted_after_reorg["confirmations"], 1)
assert(conflicting["blockhash"] != conflicted_after_reorg["blockhash"])
if __name__ == '__main__':
ReorgsRestoreTest().main()
| mit |
fhaoquan/kbengine | kbe/res/scripts/common/Lib/test/test_set.py | 72 | 63693 | import unittest
from test import support
import gc
import weakref
import operator
import copy
import pickle
from random import randrange, shuffle
import sys
import warnings
import collections
import collections.abc
class PassThru(Exception):
pass
def check_pass_thru():
raise PassThru
yield 1
class BadCmp:
def __hash__(self):
return 1
def __eq__(self, other):
raise RuntimeError
class ReprWrapper:
'Used to test self-referential repr() calls'
def __repr__(self):
return repr(self.value)
class HashCountingInt(int):
'int-like object that counts the number of times __hash__ is called'
def __init__(self, *args):
self.hash_count = 0
def __hash__(self):
self.hash_count += 1
return int.__hash__(self)
class TestJointOps:
# Tests common to both set and frozenset
def setUp(self):
self.word = word = 'simsalabim'
self.otherword = 'madagascar'
self.letters = 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ'
self.s = self.thetype(word)
self.d = dict.fromkeys(word)
def test_new_or_init(self):
self.assertRaises(TypeError, self.thetype, [], 2)
self.assertRaises(TypeError, set().__init__, a=1)
def test_uniquification(self):
actual = sorted(self.s)
expected = sorted(self.d)
self.assertEqual(actual, expected)
self.assertRaises(PassThru, self.thetype, check_pass_thru())
self.assertRaises(TypeError, self.thetype, [[]])
def test_len(self):
self.assertEqual(len(self.s), len(self.d))
def test_contains(self):
for c in self.letters:
self.assertEqual(c in self.s, c in self.d)
self.assertRaises(TypeError, self.s.__contains__, [[]])
s = self.thetype([frozenset(self.letters)])
self.assertIn(self.thetype(self.letters), s)
def test_union(self):
u = self.s.union(self.otherword)
for c in self.letters:
self.assertEqual(c in u, c in self.d or c in self.otherword)
self.assertEqual(self.s, self.thetype(self.word))
self.assertEqual(type(u), self.basetype)
self.assertRaises(PassThru, self.s.union, check_pass_thru())
self.assertRaises(TypeError, self.s.union, [[]])
for C in set, frozenset, dict.fromkeys, str, list, tuple:
self.assertEqual(self.thetype('abcba').union(C('cdc')), set('abcd'))
self.assertEqual(self.thetype('abcba').union(C('efgfe')), set('abcefg'))
self.assertEqual(self.thetype('abcba').union(C('ccb')), set('abc'))
self.assertEqual(self.thetype('abcba').union(C('ef')), set('abcef'))
self.assertEqual(self.thetype('abcba').union(C('ef'), C('fg')), set('abcefg'))
# Issue #6573
x = self.thetype()
self.assertEqual(x.union(set([1]), x, set([2])), self.thetype([1, 2]))
def test_or(self):
i = self.s.union(self.otherword)
self.assertEqual(self.s | set(self.otherword), i)
self.assertEqual(self.s | frozenset(self.otherword), i)
try:
self.s | self.otherword
except TypeError:
pass
else:
self.fail("s|t did not screen-out general iterables")
def test_intersection(self):
i = self.s.intersection(self.otherword)
for c in self.letters:
self.assertEqual(c in i, c in self.d and c in self.otherword)
self.assertEqual(self.s, self.thetype(self.word))
self.assertEqual(type(i), self.basetype)
self.assertRaises(PassThru, self.s.intersection, check_pass_thru())
for C in set, frozenset, dict.fromkeys, str, list, tuple:
self.assertEqual(self.thetype('abcba').intersection(C('cdc')), set('cc'))
self.assertEqual(self.thetype('abcba').intersection(C('efgfe')), set(''))
self.assertEqual(self.thetype('abcba').intersection(C('ccb')), set('bc'))
self.assertEqual(self.thetype('abcba').intersection(C('ef')), set(''))
self.assertEqual(self.thetype('abcba').intersection(C('cbcf'), C('bag')), set('b'))
s = self.thetype('abcba')
z = s.intersection()
if self.thetype == frozenset():
self.assertEqual(id(s), id(z))
else:
self.assertNotEqual(id(s), id(z))
def test_isdisjoint(self):
def f(s1, s2):
'Pure python equivalent of isdisjoint()'
return not set(s1).intersection(s2)
for larg in '', 'a', 'ab', 'abc', 'ababac', 'cdc', 'cc', 'efgfe', 'ccb', 'ef':
s1 = self.thetype(larg)
for rarg in '', 'a', 'ab', 'abc', 'ababac', 'cdc', 'cc', 'efgfe', 'ccb', 'ef':
for C in set, frozenset, dict.fromkeys, str, list, tuple:
s2 = C(rarg)
actual = s1.isdisjoint(s2)
expected = f(s1, s2)
self.assertEqual(actual, expected)
self.assertTrue(actual is True or actual is False)
def test_and(self):
i = self.s.intersection(self.otherword)
self.assertEqual(self.s & set(self.otherword), i)
self.assertEqual(self.s & frozenset(self.otherword), i)
try:
self.s & self.otherword
except TypeError:
pass
else:
self.fail("s&t did not screen-out general iterables")
def test_difference(self):
i = self.s.difference(self.otherword)
for c in self.letters:
self.assertEqual(c in i, c in self.d and c not in self.otherword)
self.assertEqual(self.s, self.thetype(self.word))
self.assertEqual(type(i), self.basetype)
self.assertRaises(PassThru, self.s.difference, check_pass_thru())
self.assertRaises(TypeError, self.s.difference, [[]])
for C in set, frozenset, dict.fromkeys, str, list, tuple:
self.assertEqual(self.thetype('abcba').difference(C('cdc')), set('ab'))
self.assertEqual(self.thetype('abcba').difference(C('efgfe')), set('abc'))
self.assertEqual(self.thetype('abcba').difference(C('ccb')), set('a'))
self.assertEqual(self.thetype('abcba').difference(C('ef')), set('abc'))
self.assertEqual(self.thetype('abcba').difference(), set('abc'))
self.assertEqual(self.thetype('abcba').difference(C('a'), C('b')), set('c'))
def test_sub(self):
i = self.s.difference(self.otherword)
self.assertEqual(self.s - set(self.otherword), i)
self.assertEqual(self.s - frozenset(self.otherword), i)
try:
self.s - self.otherword
except TypeError:
pass
else:
self.fail("s-t did not screen-out general iterables")
def test_symmetric_difference(self):
i = self.s.symmetric_difference(self.otherword)
for c in self.letters:
self.assertEqual(c in i, (c in self.d) ^ (c in self.otherword))
self.assertEqual(self.s, self.thetype(self.word))
self.assertEqual(type(i), self.basetype)
self.assertRaises(PassThru, self.s.symmetric_difference, check_pass_thru())
self.assertRaises(TypeError, self.s.symmetric_difference, [[]])
for C in set, frozenset, dict.fromkeys, str, list, tuple:
self.assertEqual(self.thetype('abcba').symmetric_difference(C('cdc')), set('abd'))
self.assertEqual(self.thetype('abcba').symmetric_difference(C('efgfe')), set('abcefg'))
self.assertEqual(self.thetype('abcba').symmetric_difference(C('ccb')), set('a'))
self.assertEqual(self.thetype('abcba').symmetric_difference(C('ef')), set('abcef'))
def test_xor(self):
i = self.s.symmetric_difference(self.otherword)
self.assertEqual(self.s ^ set(self.otherword), i)
self.assertEqual(self.s ^ frozenset(self.otherword), i)
try:
self.s ^ self.otherword
except TypeError:
pass
else:
self.fail("s^t did not screen-out general iterables")
def test_equality(self):
self.assertEqual(self.s, set(self.word))
self.assertEqual(self.s, frozenset(self.word))
self.assertEqual(self.s == self.word, False)
self.assertNotEqual(self.s, set(self.otherword))
self.assertNotEqual(self.s, frozenset(self.otherword))
self.assertEqual(self.s != self.word, True)
def test_setOfFrozensets(self):
t = map(frozenset, ['abcdef', 'bcd', 'bdcb', 'fed', 'fedccba'])
s = self.thetype(t)
self.assertEqual(len(s), 3)
def test_sub_and_super(self):
p, q, r = map(self.thetype, ['ab', 'abcde', 'def'])
self.assertTrue(p < q)
self.assertTrue(p <= q)
self.assertTrue(q <= q)
self.assertTrue(q > p)
self.assertTrue(q >= p)
self.assertFalse(q < r)
self.assertFalse(q <= r)
self.assertFalse(q > r)
self.assertFalse(q >= r)
self.assertTrue(set('a').issubset('abc'))
self.assertTrue(set('abc').issuperset('a'))
self.assertFalse(set('a').issubset('cbs'))
self.assertFalse(set('cbs').issuperset('a'))
def test_pickling(self):
for i in range(pickle.HIGHEST_PROTOCOL + 1):
p = pickle.dumps(self.s, i)
dup = pickle.loads(p)
self.assertEqual(self.s, dup, "%s != %s" % (self.s, dup))
if type(self.s) not in (set, frozenset):
self.s.x = 10
p = pickle.dumps(self.s)
dup = pickle.loads(p)
self.assertEqual(self.s.x, dup.x)
def test_iterator_pickling(self):
itorg = iter(self.s)
data = self.thetype(self.s)
d = pickle.dumps(itorg)
it = pickle.loads(d)
# Set iterators unpickle as list iterators due to the
# undefined order of set items.
# self.assertEqual(type(itorg), type(it))
self.assertTrue(isinstance(it, collections.abc.Iterator))
self.assertEqual(self.thetype(it), data)
it = pickle.loads(d)
try:
drop = next(it)
except StopIteration:
return
d = pickle.dumps(it)
it = pickle.loads(d)
self.assertEqual(self.thetype(it), data - self.thetype((drop,)))
def test_deepcopy(self):
class Tracer:
def __init__(self, value):
self.value = value
def __hash__(self):
return self.value
def __deepcopy__(self, memo=None):
return Tracer(self.value + 1)
t = Tracer(10)
s = self.thetype([t])
dup = copy.deepcopy(s)
self.assertNotEqual(id(s), id(dup))
for elem in dup:
newt = elem
self.assertNotEqual(id(t), id(newt))
self.assertEqual(t.value + 1, newt.value)
def test_gc(self):
# Create a nest of cycles to exercise overall ref count check
class A:
pass
s = set(A() for i in range(1000))
for elem in s:
elem.cycle = s
elem.sub = elem
elem.set = set([elem])
def test_subclass_with_custom_hash(self):
# Bug #1257731
class H(self.thetype):
def __hash__(self):
return int(id(self) & 0x7fffffff)
s=H()
f=set()
f.add(s)
self.assertIn(s, f)
f.remove(s)
f.add(s)
f.discard(s)
def test_badcmp(self):
s = self.thetype([BadCmp()])
# Detect comparison errors during insertion and lookup
self.assertRaises(RuntimeError, self.thetype, [BadCmp(), BadCmp()])
self.assertRaises(RuntimeError, s.__contains__, BadCmp())
# Detect errors during mutating operations
if hasattr(s, 'add'):
self.assertRaises(RuntimeError, s.add, BadCmp())
self.assertRaises(RuntimeError, s.discard, BadCmp())
self.assertRaises(RuntimeError, s.remove, BadCmp())
def test_cyclical_repr(self):
w = ReprWrapper()
s = self.thetype([w])
w.value = s
if self.thetype == set:
self.assertEqual(repr(s), '{set(...)}')
else:
name = repr(s).partition('(')[0] # strip class name
self.assertEqual(repr(s), '%s({%s(...)})' % (name, name))
def test_cyclical_print(self):
w = ReprWrapper()
s = self.thetype([w])
w.value = s
fo = open(support.TESTFN, "w")
try:
fo.write(str(s))
fo.close()
fo = open(support.TESTFN, "r")
self.assertEqual(fo.read(), repr(s))
finally:
fo.close()
support.unlink(support.TESTFN)
def test_do_not_rehash_dict_keys(self):
n = 10
d = dict.fromkeys(map(HashCountingInt, range(n)))
self.assertEqual(sum(elem.hash_count for elem in d), n)
s = self.thetype(d)
self.assertEqual(sum(elem.hash_count for elem in d), n)
s.difference(d)
self.assertEqual(sum(elem.hash_count for elem in d), n)
if hasattr(s, 'symmetric_difference_update'):
s.symmetric_difference_update(d)
self.assertEqual(sum(elem.hash_count for elem in d), n)
d2 = dict.fromkeys(set(d))
self.assertEqual(sum(elem.hash_count for elem in d), n)
d3 = dict.fromkeys(frozenset(d))
self.assertEqual(sum(elem.hash_count for elem in d), n)
d3 = dict.fromkeys(frozenset(d), 123)
self.assertEqual(sum(elem.hash_count for elem in d), n)
self.assertEqual(d3, dict.fromkeys(d, 123))
def test_container_iterator(self):
# Bug #3680: tp_traverse was not implemented for set iterator object
class C(object):
pass
obj = C()
ref = weakref.ref(obj)
container = set([obj, 1])
obj.x = iter(container)
del obj, container
gc.collect()
self.assertTrue(ref() is None, "Cycle was not collected")
class TestSet(TestJointOps, unittest.TestCase):
thetype = set
basetype = set
def test_init(self):
s = self.thetype()
s.__init__(self.word)
self.assertEqual(s, set(self.word))
s.__init__(self.otherword)
self.assertEqual(s, set(self.otherword))
self.assertRaises(TypeError, s.__init__, s, 2);
self.assertRaises(TypeError, s.__init__, 1);
def test_constructor_identity(self):
s = self.thetype(range(3))
t = self.thetype(s)
self.assertNotEqual(id(s), id(t))
def test_set_literal(self):
s = set([1,2,3])
t = {1,2,3}
self.assertEqual(s, t)
def test_hash(self):
self.assertRaises(TypeError, hash, self.s)
def test_clear(self):
self.s.clear()
self.assertEqual(self.s, set())
self.assertEqual(len(self.s), 0)
def test_copy(self):
dup = self.s.copy()
self.assertEqual(self.s, dup)
self.assertNotEqual(id(self.s), id(dup))
self.assertEqual(type(dup), self.basetype)
def test_add(self):
self.s.add('Q')
self.assertIn('Q', self.s)
dup = self.s.copy()
self.s.add('Q')
self.assertEqual(self.s, dup)
self.assertRaises(TypeError, self.s.add, [])
def test_remove(self):
self.s.remove('a')
self.assertNotIn('a', self.s)
self.assertRaises(KeyError, self.s.remove, 'Q')
self.assertRaises(TypeError, self.s.remove, [])
s = self.thetype([frozenset(self.word)])
self.assertIn(self.thetype(self.word), s)
s.remove(self.thetype(self.word))
self.assertNotIn(self.thetype(self.word), s)
self.assertRaises(KeyError, self.s.remove, self.thetype(self.word))
def test_remove_keyerror_unpacking(self):
# bug: www.python.org/sf/1576657
for v1 in ['Q', (1,)]:
try:
self.s.remove(v1)
except KeyError as e:
v2 = e.args[0]
self.assertEqual(v1, v2)
else:
self.fail()
def test_remove_keyerror_set(self):
key = self.thetype([3, 4])
try:
self.s.remove(key)
except KeyError as e:
self.assertTrue(e.args[0] is key,
"KeyError should be {0}, not {1}".format(key,
e.args[0]))
else:
self.fail()
def test_discard(self):
self.s.discard('a')
self.assertNotIn('a', self.s)
self.s.discard('Q')
self.assertRaises(TypeError, self.s.discard, [])
s = self.thetype([frozenset(self.word)])
self.assertIn(self.thetype(self.word), s)
s.discard(self.thetype(self.word))
self.assertNotIn(self.thetype(self.word), s)
s.discard(self.thetype(self.word))
def test_pop(self):
for i in range(len(self.s)):
elem = self.s.pop()
self.assertNotIn(elem, self.s)
self.assertRaises(KeyError, self.s.pop)
def test_update(self):
retval = self.s.update(self.otherword)
self.assertEqual(retval, None)
for c in (self.word + self.otherword):
self.assertIn(c, self.s)
self.assertRaises(PassThru, self.s.update, check_pass_thru())
self.assertRaises(TypeError, self.s.update, [[]])
for p, q in (('cdc', 'abcd'), ('efgfe', 'abcefg'), ('ccb', 'abc'), ('ef', 'abcef')):
for C in set, frozenset, dict.fromkeys, str, list, tuple:
s = self.thetype('abcba')
self.assertEqual(s.update(C(p)), None)
self.assertEqual(s, set(q))
for p in ('cdc', 'efgfe', 'ccb', 'ef', 'abcda'):
q = 'ahi'
for C in set, frozenset, dict.fromkeys, str, list, tuple:
s = self.thetype('abcba')
self.assertEqual(s.update(C(p), C(q)), None)
self.assertEqual(s, set(s) | set(p) | set(q))
def test_ior(self):
self.s |= set(self.otherword)
for c in (self.word + self.otherword):
self.assertIn(c, self.s)
def test_intersection_update(self):
retval = self.s.intersection_update(self.otherword)
self.assertEqual(retval, None)
for c in (self.word + self.otherword):
if c in self.otherword and c in self.word:
self.assertIn(c, self.s)
else:
self.assertNotIn(c, self.s)
self.assertRaises(PassThru, self.s.intersection_update, check_pass_thru())
self.assertRaises(TypeError, self.s.intersection_update, [[]])
for p, q in (('cdc', 'c'), ('efgfe', ''), ('ccb', 'bc'), ('ef', '')):
for C in set, frozenset, dict.fromkeys, str, list, tuple:
s = self.thetype('abcba')
self.assertEqual(s.intersection_update(C(p)), None)
self.assertEqual(s, set(q))
ss = 'abcba'
s = self.thetype(ss)
t = 'cbc'
self.assertEqual(s.intersection_update(C(p), C(t)), None)
self.assertEqual(s, set('abcba')&set(p)&set(t))
def test_iand(self):
self.s &= set(self.otherword)
for c in (self.word + self.otherword):
if c in self.otherword and c in self.word:
self.assertIn(c, self.s)
else:
self.assertNotIn(c, self.s)
def test_difference_update(self):
retval = self.s.difference_update(self.otherword)
self.assertEqual(retval, None)
for c in (self.word + self.otherword):
if c in self.word and c not in self.otherword:
self.assertIn(c, self.s)
else:
self.assertNotIn(c, self.s)
self.assertRaises(PassThru, self.s.difference_update, check_pass_thru())
self.assertRaises(TypeError, self.s.difference_update, [[]])
self.assertRaises(TypeError, self.s.symmetric_difference_update, [[]])
for p, q in (('cdc', 'ab'), ('efgfe', 'abc'), ('ccb', 'a'), ('ef', 'abc')):
for C in set, frozenset, dict.fromkeys, str, list, tuple:
s = self.thetype('abcba')
self.assertEqual(s.difference_update(C(p)), None)
self.assertEqual(s, set(q))
s = self.thetype('abcdefghih')
s.difference_update()
self.assertEqual(s, self.thetype('abcdefghih'))
s = self.thetype('abcdefghih')
s.difference_update(C('aba'))
self.assertEqual(s, self.thetype('cdefghih'))
s = self.thetype('abcdefghih')
s.difference_update(C('cdc'), C('aba'))
self.assertEqual(s, self.thetype('efghih'))
def test_isub(self):
self.s -= set(self.otherword)
for c in (self.word + self.otherword):
if c in self.word and c not in self.otherword:
self.assertIn(c, self.s)
else:
self.assertNotIn(c, self.s)
def test_symmetric_difference_update(self):
retval = self.s.symmetric_difference_update(self.otherword)
self.assertEqual(retval, None)
for c in (self.word + self.otherword):
if (c in self.word) ^ (c in self.otherword):
self.assertIn(c, self.s)
else:
self.assertNotIn(c, self.s)
self.assertRaises(PassThru, self.s.symmetric_difference_update, check_pass_thru())
self.assertRaises(TypeError, self.s.symmetric_difference_update, [[]])
for p, q in (('cdc', 'abd'), ('efgfe', 'abcefg'), ('ccb', 'a'), ('ef', 'abcef')):
for C in set, frozenset, dict.fromkeys, str, list, tuple:
s = self.thetype('abcba')
self.assertEqual(s.symmetric_difference_update(C(p)), None)
self.assertEqual(s, set(q))
def test_ixor(self):
self.s ^= set(self.otherword)
for c in (self.word + self.otherword):
if (c in self.word) ^ (c in self.otherword):
self.assertIn(c, self.s)
else:
self.assertNotIn(c, self.s)
def test_inplace_on_self(self):
t = self.s.copy()
t |= t
self.assertEqual(t, self.s)
t &= t
self.assertEqual(t, self.s)
t -= t
self.assertEqual(t, self.thetype())
t = self.s.copy()
t ^= t
self.assertEqual(t, self.thetype())
def test_weakref(self):
s = self.thetype('gallahad')
p = weakref.proxy(s)
self.assertEqual(str(p), str(s))
s = None
self.assertRaises(ReferenceError, str, p)
def test_rich_compare(self):
class TestRichSetCompare:
def __gt__(self, some_set):
self.gt_called = True
return False
def __lt__(self, some_set):
self.lt_called = True
return False
def __ge__(self, some_set):
self.ge_called = True
return False
def __le__(self, some_set):
self.le_called = True
return False
# This first tries the builtin rich set comparison, which doesn't know
# how to handle the custom object. Upon returning NotImplemented, the
# corresponding comparison on the right object is invoked.
myset = {1, 2, 3}
myobj = TestRichSetCompare()
myset < myobj
self.assertTrue(myobj.gt_called)
myobj = TestRichSetCompare()
myset > myobj
self.assertTrue(myobj.lt_called)
myobj = TestRichSetCompare()
myset <= myobj
self.assertTrue(myobj.ge_called)
myobj = TestRichSetCompare()
myset >= myobj
self.assertTrue(myobj.le_called)
@unittest.skipUnless(hasattr(set, "test_c_api"),
'C API test only available in a debug build')
def test_c_api(self):
self.assertEqual(set().test_c_api(), True)
class SetSubclass(set):
pass
class TestSetSubclass(TestSet):
thetype = SetSubclass
basetype = set
class SetSubclassWithKeywordArgs(set):
def __init__(self, iterable=[], newarg=None):
set.__init__(self, iterable)
class TestSetSubclassWithKeywordArgs(TestSet):
def test_keywords_in_subclass(self):
'SF bug #1486663 -- this used to erroneously raise a TypeError'
SetSubclassWithKeywordArgs(newarg=1)
class TestFrozenSet(TestJointOps, unittest.TestCase):
thetype = frozenset
basetype = frozenset
def test_init(self):
s = self.thetype(self.word)
s.__init__(self.otherword)
self.assertEqual(s, set(self.word))
def test_singleton_empty_frozenset(self):
f = frozenset()
efs = [frozenset(), frozenset([]), frozenset(()), frozenset(''),
frozenset(), frozenset([]), frozenset(()), frozenset(''),
frozenset(range(0)), frozenset(frozenset()),
frozenset(f), f]
# All of the empty frozensets should have just one id()
self.assertEqual(len(set(map(id, efs))), 1)
def test_constructor_identity(self):
s = self.thetype(range(3))
t = self.thetype(s)
self.assertEqual(id(s), id(t))
def test_hash(self):
self.assertEqual(hash(self.thetype('abcdeb')),
hash(self.thetype('ebecda')))
# make sure that all permutations give the same hash value
n = 100
seq = [randrange(n) for i in range(n)]
results = set()
for i in range(200):
shuffle(seq)
results.add(hash(self.thetype(seq)))
self.assertEqual(len(results), 1)
def test_copy(self):
dup = self.s.copy()
self.assertEqual(id(self.s), id(dup))
def test_frozen_as_dictkey(self):
seq = list(range(10)) + list('abcdefg') + ['apple']
key1 = self.thetype(seq)
key2 = self.thetype(reversed(seq))
self.assertEqual(key1, key2)
self.assertNotEqual(id(key1), id(key2))
d = {}
d[key1] = 42
self.assertEqual(d[key2], 42)
def test_hash_caching(self):
f = self.thetype('abcdcda')
self.assertEqual(hash(f), hash(f))
def test_hash_effectiveness(self):
n = 13
hashvalues = set()
addhashvalue = hashvalues.add
elemmasks = [(i+1, 1<<i) for i in range(n)]
for i in range(2**n):
addhashvalue(hash(frozenset([e for e, m in elemmasks if m&i])))
self.assertEqual(len(hashvalues), 2**n)
class FrozenSetSubclass(frozenset):
pass
class TestFrozenSetSubclass(TestFrozenSet):
thetype = FrozenSetSubclass
basetype = frozenset
def test_constructor_identity(self):
s = self.thetype(range(3))
t = self.thetype(s)
self.assertNotEqual(id(s), id(t))
def test_copy(self):
dup = self.s.copy()
self.assertNotEqual(id(self.s), id(dup))
def test_nested_empty_constructor(self):
s = self.thetype()
t = self.thetype(s)
self.assertEqual(s, t)
def test_singleton_empty_frozenset(self):
Frozenset = self.thetype
f = frozenset()
F = Frozenset()
efs = [Frozenset(), Frozenset([]), Frozenset(()), Frozenset(''),
Frozenset(), Frozenset([]), Frozenset(()), Frozenset(''),
Frozenset(range(0)), Frozenset(Frozenset()),
Frozenset(frozenset()), f, F, Frozenset(f), Frozenset(F)]
# All empty frozenset subclass instances should have different ids
self.assertEqual(len(set(map(id, efs))), len(efs))
# Tests taken from test_sets.py =============================================
empty_set = set()
#==============================================================================
class TestBasicOps:
def test_repr(self):
if self.repr is not None:
self.assertEqual(repr(self.set), self.repr)
def check_repr_against_values(self):
text = repr(self.set)
self.assertTrue(text.startswith('{'))
self.assertTrue(text.endswith('}'))
result = text[1:-1].split(', ')
result.sort()
sorted_repr_values = [repr(value) for value in self.values]
sorted_repr_values.sort()
self.assertEqual(result, sorted_repr_values)
def test_print(self):
try:
fo = open(support.TESTFN, "w")
fo.write(str(self.set))
fo.close()
fo = open(support.TESTFN, "r")
self.assertEqual(fo.read(), repr(self.set))
finally:
fo.close()
support.unlink(support.TESTFN)
def test_length(self):
self.assertEqual(len(self.set), self.length)
def test_self_equality(self):
self.assertEqual(self.set, self.set)
def test_equivalent_equality(self):
self.assertEqual(self.set, self.dup)
def test_copy(self):
self.assertEqual(self.set.copy(), self.dup)
def test_self_union(self):
result = self.set | self.set
self.assertEqual(result, self.dup)
def test_empty_union(self):
result = self.set | empty_set
self.assertEqual(result, self.dup)
def test_union_empty(self):
result = empty_set | self.set
self.assertEqual(result, self.dup)
def test_self_intersection(self):
result = self.set & self.set
self.assertEqual(result, self.dup)
def test_empty_intersection(self):
result = self.set & empty_set
self.assertEqual(result, empty_set)
def test_intersection_empty(self):
result = empty_set & self.set
self.assertEqual(result, empty_set)
def test_self_isdisjoint(self):
result = self.set.isdisjoint(self.set)
self.assertEqual(result, not self.set)
def test_empty_isdisjoint(self):
result = self.set.isdisjoint(empty_set)
self.assertEqual(result, True)
def test_isdisjoint_empty(self):
result = empty_set.isdisjoint(self.set)
self.assertEqual(result, True)
def test_self_symmetric_difference(self):
result = self.set ^ self.set
self.assertEqual(result, empty_set)
def test_empty_symmetric_difference(self):
result = self.set ^ empty_set
self.assertEqual(result, self.set)
def test_self_difference(self):
result = self.set - self.set
self.assertEqual(result, empty_set)
def test_empty_difference(self):
result = self.set - empty_set
self.assertEqual(result, self.dup)
def test_empty_difference_rev(self):
result = empty_set - self.set
self.assertEqual(result, empty_set)
def test_iteration(self):
for v in self.set:
self.assertIn(v, self.values)
setiter = iter(self.set)
self.assertEqual(setiter.__length_hint__(), len(self.set))
def test_pickling(self):
p = pickle.dumps(self.set)
copy = pickle.loads(p)
self.assertEqual(self.set, copy,
"%s != %s" % (self.set, copy))
#------------------------------------------------------------------------------
class TestBasicOpsEmpty(TestBasicOps, unittest.TestCase):
def setUp(self):
self.case = "empty set"
self.values = []
self.set = set(self.values)
self.dup = set(self.values)
self.length = 0
self.repr = "set()"
#------------------------------------------------------------------------------
class TestBasicOpsSingleton(TestBasicOps, unittest.TestCase):
def setUp(self):
self.case = "unit set (number)"
self.values = [3]
self.set = set(self.values)
self.dup = set(self.values)
self.length = 1
self.repr = "{3}"
def test_in(self):
self.assertIn(3, self.set)
def test_not_in(self):
self.assertNotIn(2, self.set)
#------------------------------------------------------------------------------
class TestBasicOpsTuple(TestBasicOps, unittest.TestCase):
def setUp(self):
self.case = "unit set (tuple)"
self.values = [(0, "zero")]
self.set = set(self.values)
self.dup = set(self.values)
self.length = 1
self.repr = "{(0, 'zero')}"
def test_in(self):
self.assertIn((0, "zero"), self.set)
def test_not_in(self):
self.assertNotIn(9, self.set)
#------------------------------------------------------------------------------
class TestBasicOpsTriple(TestBasicOps, unittest.TestCase):
def setUp(self):
self.case = "triple set"
self.values = [0, "zero", operator.add]
self.set = set(self.values)
self.dup = set(self.values)
self.length = 3
self.repr = None
#------------------------------------------------------------------------------
class TestBasicOpsString(TestBasicOps, unittest.TestCase):
def setUp(self):
self.case = "string set"
self.values = ["a", "b", "c"]
self.set = set(self.values)
self.dup = set(self.values)
self.length = 3
def test_repr(self):
self.check_repr_against_values()
#------------------------------------------------------------------------------
class TestBasicOpsBytes(TestBasicOps, unittest.TestCase):
def setUp(self):
self.case = "string set"
self.values = [b"a", b"b", b"c"]
self.set = set(self.values)
self.dup = set(self.values)
self.length = 3
def test_repr(self):
self.check_repr_against_values()
#------------------------------------------------------------------------------
class TestBasicOpsMixedStringBytes(TestBasicOps, unittest.TestCase):
def setUp(self):
self._warning_filters = support.check_warnings()
self._warning_filters.__enter__()
warnings.simplefilter('ignore', BytesWarning)
self.case = "string and bytes set"
self.values = ["a", "b", b"a", b"b"]
self.set = set(self.values)
self.dup = set(self.values)
self.length = 4
def tearDown(self):
self._warning_filters.__exit__(None, None, None)
def test_repr(self):
self.check_repr_against_values()
#==============================================================================
def baditer():
raise TypeError
yield True
def gooditer():
yield True
class TestExceptionPropagation(unittest.TestCase):
"""SF 628246: Set constructor should not trap iterator TypeErrors"""
def test_instanceWithException(self):
self.assertRaises(TypeError, set, baditer())
def test_instancesWithoutException(self):
# All of these iterables should load without exception.
set([1,2,3])
set((1,2,3))
set({'one':1, 'two':2, 'three':3})
set(range(3))
set('abc')
set(gooditer())
def test_changingSizeWhileIterating(self):
s = set([1,2,3])
try:
for i in s:
s.update([4])
except RuntimeError:
pass
else:
self.fail("no exception when changing size during iteration")
#==============================================================================
class TestSetOfSets(unittest.TestCase):
def test_constructor(self):
inner = frozenset([1])
outer = set([inner])
element = outer.pop()
self.assertEqual(type(element), frozenset)
outer.add(inner) # Rebuild set of sets with .add method
outer.remove(inner)
self.assertEqual(outer, set()) # Verify that remove worked
outer.discard(inner) # Absence of KeyError indicates working fine
#==============================================================================
class TestBinaryOps(unittest.TestCase):
def setUp(self):
self.set = set((2, 4, 6))
def test_eq(self): # SF bug 643115
self.assertEqual(self.set, set({2:1,4:3,6:5}))
def test_union_subset(self):
result = self.set | set([2])
self.assertEqual(result, set((2, 4, 6)))
def test_union_superset(self):
result = self.set | set([2, 4, 6, 8])
self.assertEqual(result, set([2, 4, 6, 8]))
def test_union_overlap(self):
result = self.set | set([3, 4, 5])
self.assertEqual(result, set([2, 3, 4, 5, 6]))
def test_union_non_overlap(self):
result = self.set | set([8])
self.assertEqual(result, set([2, 4, 6, 8]))
def test_intersection_subset(self):
result = self.set & set((2, 4))
self.assertEqual(result, set((2, 4)))
def test_intersection_superset(self):
result = self.set & set([2, 4, 6, 8])
self.assertEqual(result, set([2, 4, 6]))
def test_intersection_overlap(self):
result = self.set & set([3, 4, 5])
self.assertEqual(result, set([4]))
def test_intersection_non_overlap(self):
result = self.set & set([8])
self.assertEqual(result, empty_set)
def test_isdisjoint_subset(self):
result = self.set.isdisjoint(set((2, 4)))
self.assertEqual(result, False)
def test_isdisjoint_superset(self):
result = self.set.isdisjoint(set([2, 4, 6, 8]))
self.assertEqual(result, False)
def test_isdisjoint_overlap(self):
result = self.set.isdisjoint(set([3, 4, 5]))
self.assertEqual(result, False)
def test_isdisjoint_non_overlap(self):
result = self.set.isdisjoint(set([8]))
self.assertEqual(result, True)
def test_sym_difference_subset(self):
result = self.set ^ set((2, 4))
self.assertEqual(result, set([6]))
def test_sym_difference_superset(self):
result = self.set ^ set((2, 4, 6, 8))
self.assertEqual(result, set([8]))
def test_sym_difference_overlap(self):
result = self.set ^ set((3, 4, 5))
self.assertEqual(result, set([2, 3, 5, 6]))
def test_sym_difference_non_overlap(self):
result = self.set ^ set([8])
self.assertEqual(result, set([2, 4, 6, 8]))
#==============================================================================
class TestUpdateOps(unittest.TestCase):
def setUp(self):
self.set = set((2, 4, 6))
def test_union_subset(self):
self.set |= set([2])
self.assertEqual(self.set, set((2, 4, 6)))
def test_union_superset(self):
self.set |= set([2, 4, 6, 8])
self.assertEqual(self.set, set([2, 4, 6, 8]))
def test_union_overlap(self):
self.set |= set([3, 4, 5])
self.assertEqual(self.set, set([2, 3, 4, 5, 6]))
def test_union_non_overlap(self):
self.set |= set([8])
self.assertEqual(self.set, set([2, 4, 6, 8]))
def test_union_method_call(self):
self.set.update(set([3, 4, 5]))
self.assertEqual(self.set, set([2, 3, 4, 5, 6]))
def test_intersection_subset(self):
self.set &= set((2, 4))
self.assertEqual(self.set, set((2, 4)))
def test_intersection_superset(self):
self.set &= set([2, 4, 6, 8])
self.assertEqual(self.set, set([2, 4, 6]))
def test_intersection_overlap(self):
self.set &= set([3, 4, 5])
self.assertEqual(self.set, set([4]))
def test_intersection_non_overlap(self):
self.set &= set([8])
self.assertEqual(self.set, empty_set)
def test_intersection_method_call(self):
self.set.intersection_update(set([3, 4, 5]))
self.assertEqual(self.set, set([4]))
def test_sym_difference_subset(self):
self.set ^= set((2, 4))
self.assertEqual(self.set, set([6]))
def test_sym_difference_superset(self):
self.set ^= set((2, 4, 6, 8))
self.assertEqual(self.set, set([8]))
def test_sym_difference_overlap(self):
self.set ^= set((3, 4, 5))
self.assertEqual(self.set, set([2, 3, 5, 6]))
def test_sym_difference_non_overlap(self):
self.set ^= set([8])
self.assertEqual(self.set, set([2, 4, 6, 8]))
def test_sym_difference_method_call(self):
self.set.symmetric_difference_update(set([3, 4, 5]))
self.assertEqual(self.set, set([2, 3, 5, 6]))
def test_difference_subset(self):
self.set -= set((2, 4))
self.assertEqual(self.set, set([6]))
def test_difference_superset(self):
self.set -= set((2, 4, 6, 8))
self.assertEqual(self.set, set([]))
def test_difference_overlap(self):
self.set -= set((3, 4, 5))
self.assertEqual(self.set, set([2, 6]))
def test_difference_non_overlap(self):
self.set -= set([8])
self.assertEqual(self.set, set([2, 4, 6]))
def test_difference_method_call(self):
self.set.difference_update(set([3, 4, 5]))
self.assertEqual(self.set, set([2, 6]))
#==============================================================================
class TestMutate(unittest.TestCase):
def setUp(self):
self.values = ["a", "b", "c"]
self.set = set(self.values)
def test_add_present(self):
self.set.add("c")
self.assertEqual(self.set, set("abc"))
def test_add_absent(self):
self.set.add("d")
self.assertEqual(self.set, set("abcd"))
def test_add_until_full(self):
tmp = set()
expected_len = 0
for v in self.values:
tmp.add(v)
expected_len += 1
self.assertEqual(len(tmp), expected_len)
self.assertEqual(tmp, self.set)
def test_remove_present(self):
self.set.remove("b")
self.assertEqual(self.set, set("ac"))
def test_remove_absent(self):
try:
self.set.remove("d")
self.fail("Removing missing element should have raised LookupError")
except LookupError:
pass
def test_remove_until_empty(self):
expected_len = len(self.set)
for v in self.values:
self.set.remove(v)
expected_len -= 1
self.assertEqual(len(self.set), expected_len)
def test_discard_present(self):
self.set.discard("c")
self.assertEqual(self.set, set("ab"))
def test_discard_absent(self):
self.set.discard("d")
self.assertEqual(self.set, set("abc"))
def test_clear(self):
self.set.clear()
self.assertEqual(len(self.set), 0)
def test_pop(self):
popped = {}
while self.set:
popped[self.set.pop()] = None
self.assertEqual(len(popped), len(self.values))
for v in self.values:
self.assertIn(v, popped)
def test_update_empty_tuple(self):
self.set.update(())
self.assertEqual(self.set, set(self.values))
def test_update_unit_tuple_overlap(self):
self.set.update(("a",))
self.assertEqual(self.set, set(self.values))
def test_update_unit_tuple_non_overlap(self):
self.set.update(("a", "z"))
self.assertEqual(self.set, set(self.values + ["z"]))
#==============================================================================
class TestSubsets:
case2method = {"<=": "issubset",
">=": "issuperset",
}
reverse = {"==": "==",
"!=": "!=",
"<": ">",
">": "<",
"<=": ">=",
">=": "<=",
}
def test_issubset(self):
x = self.left
y = self.right
for case in "!=", "==", "<", "<=", ">", ">=":
expected = case in self.cases
# Test the binary infix spelling.
result = eval("x" + case + "y", locals())
self.assertEqual(result, expected)
# Test the "friendly" method-name spelling, if one exists.
if case in TestSubsets.case2method:
method = getattr(x, TestSubsets.case2method[case])
result = method(y)
self.assertEqual(result, expected)
# Now do the same for the operands reversed.
rcase = TestSubsets.reverse[case]
result = eval("y" + rcase + "x", locals())
self.assertEqual(result, expected)
if rcase in TestSubsets.case2method:
method = getattr(y, TestSubsets.case2method[rcase])
result = method(x)
self.assertEqual(result, expected)
#------------------------------------------------------------------------------
class TestSubsetEqualEmpty(TestSubsets, unittest.TestCase):
left = set()
right = set()
name = "both empty"
cases = "==", "<=", ">="
#------------------------------------------------------------------------------
class TestSubsetEqualNonEmpty(TestSubsets, unittest.TestCase):
left = set([1, 2])
right = set([1, 2])
name = "equal pair"
cases = "==", "<=", ">="
#------------------------------------------------------------------------------
class TestSubsetEmptyNonEmpty(TestSubsets, unittest.TestCase):
left = set()
right = set([1, 2])
name = "one empty, one non-empty"
cases = "!=", "<", "<="
#------------------------------------------------------------------------------
class TestSubsetPartial(TestSubsets, unittest.TestCase):
left = set([1])
right = set([1, 2])
name = "one a non-empty proper subset of other"
cases = "!=", "<", "<="
#------------------------------------------------------------------------------
class TestSubsetNonOverlap(TestSubsets, unittest.TestCase):
left = set([1])
right = set([2])
name = "neither empty, neither contains"
cases = "!="
#==============================================================================
class TestOnlySetsInBinaryOps:
def test_eq_ne(self):
# Unlike the others, this is testing that == and != *are* allowed.
self.assertEqual(self.other == self.set, False)
self.assertEqual(self.set == self.other, False)
self.assertEqual(self.other != self.set, True)
self.assertEqual(self.set != self.other, True)
def test_ge_gt_le_lt(self):
self.assertRaises(TypeError, lambda: self.set < self.other)
self.assertRaises(TypeError, lambda: self.set <= self.other)
self.assertRaises(TypeError, lambda: self.set > self.other)
self.assertRaises(TypeError, lambda: self.set >= self.other)
self.assertRaises(TypeError, lambda: self.other < self.set)
self.assertRaises(TypeError, lambda: self.other <= self.set)
self.assertRaises(TypeError, lambda: self.other > self.set)
self.assertRaises(TypeError, lambda: self.other >= self.set)
def test_update_operator(self):
try:
self.set |= self.other
except TypeError:
pass
else:
self.fail("expected TypeError")
def test_update(self):
if self.otherIsIterable:
self.set.update(self.other)
else:
self.assertRaises(TypeError, self.set.update, self.other)
def test_union(self):
self.assertRaises(TypeError, lambda: self.set | self.other)
self.assertRaises(TypeError, lambda: self.other | self.set)
if self.otherIsIterable:
self.set.union(self.other)
else:
self.assertRaises(TypeError, self.set.union, self.other)
def test_intersection_update_operator(self):
try:
self.set &= self.other
except TypeError:
pass
else:
self.fail("expected TypeError")
def test_intersection_update(self):
if self.otherIsIterable:
self.set.intersection_update(self.other)
else:
self.assertRaises(TypeError,
self.set.intersection_update,
self.other)
def test_intersection(self):
self.assertRaises(TypeError, lambda: self.set & self.other)
self.assertRaises(TypeError, lambda: self.other & self.set)
if self.otherIsIterable:
self.set.intersection(self.other)
else:
self.assertRaises(TypeError, self.set.intersection, self.other)
def test_sym_difference_update_operator(self):
try:
self.set ^= self.other
except TypeError:
pass
else:
self.fail("expected TypeError")
def test_sym_difference_update(self):
if self.otherIsIterable:
self.set.symmetric_difference_update(self.other)
else:
self.assertRaises(TypeError,
self.set.symmetric_difference_update,
self.other)
def test_sym_difference(self):
self.assertRaises(TypeError, lambda: self.set ^ self.other)
self.assertRaises(TypeError, lambda: self.other ^ self.set)
if self.otherIsIterable:
self.set.symmetric_difference(self.other)
else:
self.assertRaises(TypeError, self.set.symmetric_difference, self.other)
def test_difference_update_operator(self):
try:
self.set -= self.other
except TypeError:
pass
else:
self.fail("expected TypeError")
def test_difference_update(self):
if self.otherIsIterable:
self.set.difference_update(self.other)
else:
self.assertRaises(TypeError,
self.set.difference_update,
self.other)
def test_difference(self):
self.assertRaises(TypeError, lambda: self.set - self.other)
self.assertRaises(TypeError, lambda: self.other - self.set)
if self.otherIsIterable:
self.set.difference(self.other)
else:
self.assertRaises(TypeError, self.set.difference, self.other)
#------------------------------------------------------------------------------
class TestOnlySetsNumeric(TestOnlySetsInBinaryOps, unittest.TestCase):
def setUp(self):
self.set = set((1, 2, 3))
self.other = 19
self.otherIsIterable = False
#------------------------------------------------------------------------------
class TestOnlySetsDict(TestOnlySetsInBinaryOps, unittest.TestCase):
def setUp(self):
self.set = set((1, 2, 3))
self.other = {1:2, 3:4}
self.otherIsIterable = True
#------------------------------------------------------------------------------
class TestOnlySetsOperator(TestOnlySetsInBinaryOps, unittest.TestCase):
def setUp(self):
self.set = set((1, 2, 3))
self.other = operator.add
self.otherIsIterable = False
#------------------------------------------------------------------------------
class TestOnlySetsTuple(TestOnlySetsInBinaryOps, unittest.TestCase):
def setUp(self):
self.set = set((1, 2, 3))
self.other = (2, 4, 6)
self.otherIsIterable = True
#------------------------------------------------------------------------------
class TestOnlySetsString(TestOnlySetsInBinaryOps, unittest.TestCase):
def setUp(self):
self.set = set((1, 2, 3))
self.other = 'abc'
self.otherIsIterable = True
#------------------------------------------------------------------------------
class TestOnlySetsGenerator(TestOnlySetsInBinaryOps, unittest.TestCase):
def setUp(self):
def gen():
for i in range(0, 10, 2):
yield i
self.set = set((1, 2, 3))
self.other = gen()
self.otherIsIterable = True
#==============================================================================
class TestCopying:
def test_copy(self):
dup = self.set.copy()
dup_list = sorted(dup, key=repr)
set_list = sorted(self.set, key=repr)
self.assertEqual(len(dup_list), len(set_list))
for i in range(len(dup_list)):
self.assertTrue(dup_list[i] is set_list[i])
def test_deep_copy(self):
dup = copy.deepcopy(self.set)
##print type(dup), repr(dup)
dup_list = sorted(dup, key=repr)
set_list = sorted(self.set, key=repr)
self.assertEqual(len(dup_list), len(set_list))
for i in range(len(dup_list)):
self.assertEqual(dup_list[i], set_list[i])
#------------------------------------------------------------------------------
class TestCopyingEmpty(TestCopying, unittest.TestCase):
def setUp(self):
self.set = set()
#------------------------------------------------------------------------------
class TestCopyingSingleton(TestCopying, unittest.TestCase):
def setUp(self):
self.set = set(["hello"])
#------------------------------------------------------------------------------
class TestCopyingTriple(TestCopying, unittest.TestCase):
def setUp(self):
self.set = set(["zero", 0, None])
#------------------------------------------------------------------------------
class TestCopyingTuple(TestCopying, unittest.TestCase):
def setUp(self):
self.set = set([(1, 2)])
#------------------------------------------------------------------------------
class TestCopyingNested(TestCopying, unittest.TestCase):
def setUp(self):
self.set = set([((1, 2), (3, 4))])
#==============================================================================
class TestIdentities(unittest.TestCase):
def setUp(self):
self.a = set('abracadabra')
self.b = set('alacazam')
def test_binopsVsSubsets(self):
a, b = self.a, self.b
self.assertTrue(a - b < a)
self.assertTrue(b - a < b)
self.assertTrue(a & b < a)
self.assertTrue(a & b < b)
self.assertTrue(a | b > a)
self.assertTrue(a | b > b)
self.assertTrue(a ^ b < a | b)
def test_commutativity(self):
a, b = self.a, self.b
self.assertEqual(a&b, b&a)
self.assertEqual(a|b, b|a)
self.assertEqual(a^b, b^a)
if a != b:
self.assertNotEqual(a-b, b-a)
def test_summations(self):
# check that sums of parts equal the whole
a, b = self.a, self.b
self.assertEqual((a-b)|(a&b)|(b-a), a|b)
self.assertEqual((a&b)|(a^b), a|b)
self.assertEqual(a|(b-a), a|b)
self.assertEqual((a-b)|b, a|b)
self.assertEqual((a-b)|(a&b), a)
self.assertEqual((b-a)|(a&b), b)
self.assertEqual((a-b)|(b-a), a^b)
def test_exclusion(self):
# check that inverse operations show non-overlap
a, b, zero = self.a, self.b, set()
self.assertEqual((a-b)&b, zero)
self.assertEqual((b-a)&a, zero)
self.assertEqual((a&b)&(a^b), zero)
# Tests derived from test_itertools.py =======================================
def R(seqn):
'Regular generator'
for i in seqn:
yield i
class G:
'Sequence using __getitem__'
def __init__(self, seqn):
self.seqn = seqn
def __getitem__(self, i):
return self.seqn[i]
class I:
'Sequence using iterator protocol'
def __init__(self, seqn):
self.seqn = seqn
self.i = 0
def __iter__(self):
return self
def __next__(self):
if self.i >= len(self.seqn): raise StopIteration
v = self.seqn[self.i]
self.i += 1
return v
class Ig:
'Sequence using iterator protocol defined with a generator'
def __init__(self, seqn):
self.seqn = seqn
self.i = 0
def __iter__(self):
for val in self.seqn:
yield val
class X:
'Missing __getitem__ and __iter__'
def __init__(self, seqn):
self.seqn = seqn
self.i = 0
def __next__(self):
if self.i >= len(self.seqn): raise StopIteration
v = self.seqn[self.i]
self.i += 1
return v
class N:
'Iterator missing __next__()'
def __init__(self, seqn):
self.seqn = seqn
self.i = 0
def __iter__(self):
return self
class E:
'Test propagation of exceptions'
def __init__(self, seqn):
self.seqn = seqn
self.i = 0
def __iter__(self):
return self
def __next__(self):
3 // 0
class S:
'Test immediate stop'
def __init__(self, seqn):
pass
def __iter__(self):
return self
def __next__(self):
raise StopIteration
from itertools import chain
def L(seqn):
'Test multiple tiers of iterators'
return chain(map(lambda x:x, R(Ig(G(seqn)))))
class TestVariousIteratorArgs(unittest.TestCase):
def test_constructor(self):
for cons in (set, frozenset):
for s in ("123", "", range(1000), ('do', 1.2), range(2000,2200,5)):
for g in (G, I, Ig, S, L, R):
self.assertEqual(sorted(cons(g(s)), key=repr), sorted(g(s), key=repr))
self.assertRaises(TypeError, cons , X(s))
self.assertRaises(TypeError, cons , N(s))
self.assertRaises(ZeroDivisionError, cons , E(s))
def test_inline_methods(self):
s = set('november')
for data in ("123", "", range(1000), ('do', 1.2), range(2000,2200,5), 'december'):
for meth in (s.union, s.intersection, s.difference, s.symmetric_difference, s.isdisjoint):
for g in (G, I, Ig, L, R):
expected = meth(data)
actual = meth(g(data))
if isinstance(expected, bool):
self.assertEqual(actual, expected)
else:
self.assertEqual(sorted(actual, key=repr), sorted(expected, key=repr))
self.assertRaises(TypeError, meth, X(s))
self.assertRaises(TypeError, meth, N(s))
self.assertRaises(ZeroDivisionError, meth, E(s))
def test_inplace_methods(self):
for data in ("123", "", range(1000), ('do', 1.2), range(2000,2200,5), 'december'):
for methname in ('update', 'intersection_update',
'difference_update', 'symmetric_difference_update'):
for g in (G, I, Ig, S, L, R):
s = set('january')
t = s.copy()
getattr(s, methname)(list(g(data)))
getattr(t, methname)(g(data))
self.assertEqual(sorted(s, key=repr), sorted(t, key=repr))
self.assertRaises(TypeError, getattr(set('january'), methname), X(data))
self.assertRaises(TypeError, getattr(set('january'), methname), N(data))
self.assertRaises(ZeroDivisionError, getattr(set('january'), methname), E(data))
class bad_eq:
def __eq__(self, other):
if be_bad:
set2.clear()
raise ZeroDivisionError
return self is other
def __hash__(self):
return 0
class bad_dict_clear:
def __eq__(self, other):
if be_bad:
dict2.clear()
return self is other
def __hash__(self):
return 0
class TestWeirdBugs(unittest.TestCase):
def test_8420_set_merge(self):
# This used to segfault
global be_bad, set2, dict2
be_bad = False
set1 = {bad_eq()}
set2 = {bad_eq() for i in range(75)}
be_bad = True
self.assertRaises(ZeroDivisionError, set1.update, set2)
be_bad = False
set1 = {bad_dict_clear()}
dict2 = {bad_dict_clear(): None}
be_bad = True
set1.symmetric_difference_update(dict2)
# Application tests (based on David Eppstein's graph recipes ====================================
def powerset(U):
"""Generates all subsets of a set or sequence U."""
U = iter(U)
try:
x = frozenset([next(U)])
for S in powerset(U):
yield S
yield S | x
except StopIteration:
yield frozenset()
def cube(n):
"""Graph of n-dimensional hypercube."""
singletons = [frozenset([x]) for x in range(n)]
return dict([(x, frozenset([x^s for s in singletons]))
for x in powerset(range(n))])
def linegraph(G):
"""Graph, the vertices of which are edges of G,
with two vertices being adjacent iff the corresponding
edges share a vertex."""
L = {}
for x in G:
for y in G[x]:
nx = [frozenset([x,z]) for z in G[x] if z != y]
ny = [frozenset([y,z]) for z in G[y] if z != x]
L[frozenset([x,y])] = frozenset(nx+ny)
return L
def faces(G):
'Return a set of faces in G. Where a face is a set of vertices on that face'
# currently limited to triangles,squares, and pentagons
f = set()
for v1, edges in G.items():
for v2 in edges:
for v3 in G[v2]:
if v1 == v3:
continue
if v1 in G[v3]:
f.add(frozenset([v1, v2, v3]))
else:
for v4 in G[v3]:
if v4 == v2:
continue
if v1 in G[v4]:
f.add(frozenset([v1, v2, v3, v4]))
else:
for v5 in G[v4]:
if v5 == v3 or v5 == v2:
continue
if v1 in G[v5]:
f.add(frozenset([v1, v2, v3, v4, v5]))
return f
class TestGraphs(unittest.TestCase):
def test_cube(self):
g = cube(3) # vert --> {v1, v2, v3}
vertices1 = set(g)
self.assertEqual(len(vertices1), 8) # eight vertices
for edge in g.values():
self.assertEqual(len(edge), 3) # each vertex connects to three edges
vertices2 = set(v for edges in g.values() for v in edges)
self.assertEqual(vertices1, vertices2) # edge vertices in original set
cubefaces = faces(g)
self.assertEqual(len(cubefaces), 6) # six faces
for face in cubefaces:
self.assertEqual(len(face), 4) # each face is a square
def test_cuboctahedron(self):
# http://en.wikipedia.org/wiki/Cuboctahedron
# 8 triangular faces and 6 square faces
# 12 indentical vertices each connecting a triangle and square
g = cube(3)
cuboctahedron = linegraph(g) # V( --> {V1, V2, V3, V4}
self.assertEqual(len(cuboctahedron), 12)# twelve vertices
vertices = set(cuboctahedron)
for edges in cuboctahedron.values():
self.assertEqual(len(edges), 4) # each vertex connects to four other vertices
othervertices = set(edge for edges in cuboctahedron.values() for edge in edges)
self.assertEqual(vertices, othervertices) # edge vertices in original set
cubofaces = faces(cuboctahedron)
facesizes = collections.defaultdict(int)
for face in cubofaces:
facesizes[len(face)] += 1
self.assertEqual(facesizes[3], 8) # eight triangular faces
self.assertEqual(facesizes[4], 6) # six square faces
for vertex in cuboctahedron:
edge = vertex # Cuboctahedron vertices are edges in Cube
self.assertEqual(len(edge), 2) # Two cube vertices define an edge
for cubevert in edge:
self.assertIn(cubevert, g)
#==============================================================================
if __name__ == "__main__":
unittest.main()
| lgpl-3.0 |
postla/OpenNFR-E2 | lib/python/Screens/Rc.py | 1 | 2807 | from Components.Pixmap import MovingPixmap, MultiPixmap
from Tools.Directories import resolveFilename, SCOPE_SKIN
from xml.etree.ElementTree import ElementTree
from Components.config import config, ConfigInteger
from Components.RcModel import rc_model
from boxbranding import getBrandOEM
config.misc.rcused = ConfigInteger(default = 1)
class Rc:
def __init__(self):
self["rc"] = MultiPixmap()
self["arrowdown"] = MovingPixmap()
self["arrowdown2"] = MovingPixmap()
self["arrowup"] = MovingPixmap()
self["arrowup2"] = MovingPixmap()
config.misc.rcused = ConfigInteger(default = 1)
self.isDefaultRc = rc_model.rcIsDefault()
self.rcheight = 500
self.rcheighthalf = 250
self.selectpics = []
self.selectpics.append((self.rcheighthalf, ["arrowdown", "arrowdown2"], (-18,-70)))
self.selectpics.append((self.rcheight, ["arrowup", "arrowup2"], (-18,0)))
self.readPositions()
self.clearSelectedKeys()
self.onShown.append(self.initRc)
def initRc(self):
if getBrandOEM() == 'ini':
self["rc"].setPixmapNum(config.misc.rcused.getValue())
else:
if self.isDefaultRc:
self["rc"].setPixmapNum(config.misc.rcused.getValue())
else:
self["rc"].setPixmapNum(0)
def readPositions(self):
if self.isDefaultRc:
target = resolveFilename(SCOPE_SKIN, "rcpositions.xml")
else:
target = rc_model.getRcLocation() + 'rcpositions.xml'
tree = ElementTree(file = target)
rcs = tree.getroot()
self.rcs = {}
for rc in rcs:
id = int(rc.attrib["id"])
self.rcs[id] = {}
for key in rc:
name = key.attrib["name"]
pos = key.attrib["pos"].split(",")
self.rcs[id][name] = (int(pos[0]), int(pos[1]))
def getSelectPic(self, pos):
for selectPic in self.selectpics:
if pos[1] <= selectPic[0]:
return selectPic[1], selectPic[2]
return None
def hideRc(self):
self["rc"].hide()
self.hideSelectPics()
def showRc(self):
self["rc"].show()
def selectKey(self, key):
if self.isDefaultRc:
rc = self.rcs[config.misc.rcused.getValue()]
else:
try:
rc = self.rcs[2]
except:
rc = self.rcs[config.misc.rcused.getValue()]
if rc.has_key(key):
rcpos = self["rc"].getPosition()
pos = rc[key]
selectPics = self.getSelectPic(pos)
selectPic = None
for x in selectPics[0]:
if x not in self.selectedKeys:
selectPic = x
break
if selectPic is not None:
print "selectPic:", selectPic
self[selectPic].moveTo(rcpos[0] + pos[0] + selectPics[1][0], rcpos[1] + pos[1] + selectPics[1][1], 1)
self[selectPic].startMoving()
self[selectPic].show()
self.selectedKeys.append(selectPic)
def clearSelectedKeys(self):
self.showRc()
self.selectedKeys = []
self.hideSelectPics()
def hideSelectPics(self):
for selectPic in self.selectpics:
for pic in selectPic[1]:
self[pic].hide()
| gpl-2.0 |
kushalbhola/MyStuff | Practice/PythonApplication/env/Lib/site-packages/pandas/tests/indexes/datetimes/test_scalar_compat.py | 2 | 12251 | """
Tests for DatetimeIndex methods behaving like their Timestamp counterparts
"""
from datetime import datetime
import numpy as np
import pytest
from pandas._libs.tslibs.np_datetime import OutOfBoundsDatetime
import pandas as pd
from pandas import DatetimeIndex, Timestamp, date_range
import pandas.util.testing as tm
from pandas.tseries.frequencies import to_offset
class TestDatetimeIndexOps:
def test_dti_time(self):
rng = date_range("1/1/2000", freq="12min", periods=10)
result = pd.Index(rng).time
expected = [t.time() for t in rng]
assert (result == expected).all()
def test_dti_date(self):
rng = date_range("1/1/2000", freq="12H", periods=10)
result = pd.Index(rng).date
expected = [t.date() for t in rng]
assert (result == expected).all()
@pytest.mark.parametrize("data", [["1400-01-01"], [datetime(1400, 1, 1)]])
def test_dti_date_out_of_range(self, data):
# GH#1475
msg = "Out of bounds nanosecond timestamp: 1400-01-01 00:00:00"
with pytest.raises(OutOfBoundsDatetime, match=msg):
DatetimeIndex(data)
@pytest.mark.parametrize(
"field",
[
"dayofweek",
"dayofyear",
"week",
"weekofyear",
"quarter",
"days_in_month",
"is_month_start",
"is_month_end",
"is_quarter_start",
"is_quarter_end",
"is_year_start",
"is_year_end",
"weekday_name",
],
)
def test_dti_timestamp_fields(self, field):
# extra fields from DatetimeIndex like quarter and week
idx = tm.makeDateIndex(100)
expected = getattr(idx, field)[-1]
if field == "weekday_name":
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
result = getattr(Timestamp(idx[-1]), field)
else:
result = getattr(Timestamp(idx[-1]), field)
assert result == expected
def test_dti_timestamp_freq_fields(self):
# extra fields from DatetimeIndex like quarter and week
idx = tm.makeDateIndex(100)
assert idx.freq == Timestamp(idx[-1], idx.freq).freq
assert idx.freqstr == Timestamp(idx[-1], idx.freq).freqstr
# ----------------------------------------------------------------
# DatetimeIndex.round
def test_round_daily(self):
dti = date_range("20130101 09:10:11", periods=5)
result = dti.round("D")
expected = date_range("20130101", periods=5)
tm.assert_index_equal(result, expected)
dti = dti.tz_localize("UTC").tz_convert("US/Eastern")
result = dti.round("D")
expected = date_range("20130101", periods=5).tz_localize("US/Eastern")
tm.assert_index_equal(result, expected)
result = dti.round("s")
tm.assert_index_equal(result, dti)
@pytest.mark.parametrize(
"freq, error_msg",
[
("Y", "<YearEnd: month=12> is a non-fixed frequency"),
("M", "<MonthEnd> is a non-fixed frequency"),
("foobar", "Invalid frequency: foobar"),
],
)
def test_round_invalid(self, freq, error_msg):
dti = date_range("20130101 09:10:11", periods=5)
dti = dti.tz_localize("UTC").tz_convert("US/Eastern")
with pytest.raises(ValueError, match=error_msg):
dti.round(freq)
def test_round(self, tz_naive_fixture):
tz = tz_naive_fixture
rng = date_range(start="2016-01-01", periods=5, freq="30Min", tz=tz)
elt = rng[1]
expected_rng = DatetimeIndex(
[
Timestamp("2016-01-01 00:00:00", tz=tz, freq="30T"),
Timestamp("2016-01-01 00:00:00", tz=tz, freq="30T"),
Timestamp("2016-01-01 01:00:00", tz=tz, freq="30T"),
Timestamp("2016-01-01 02:00:00", tz=tz, freq="30T"),
Timestamp("2016-01-01 02:00:00", tz=tz, freq="30T"),
]
)
expected_elt = expected_rng[1]
tm.assert_index_equal(rng.round(freq="H"), expected_rng)
assert elt.round(freq="H") == expected_elt
msg = pd._libs.tslibs.frequencies.INVALID_FREQ_ERR_MSG
with pytest.raises(ValueError, match=msg):
rng.round(freq="foo")
with pytest.raises(ValueError, match=msg):
elt.round(freq="foo")
msg = "<MonthEnd> is a non-fixed frequency"
with pytest.raises(ValueError, match=msg):
rng.round(freq="M")
with pytest.raises(ValueError, match=msg):
elt.round(freq="M")
# GH#14440 & GH#15578
index = DatetimeIndex(["2016-10-17 12:00:00.0015"], tz=tz)
result = index.round("ms")
expected = DatetimeIndex(["2016-10-17 12:00:00.002000"], tz=tz)
tm.assert_index_equal(result, expected)
for freq in ["us", "ns"]:
tm.assert_index_equal(index, index.round(freq))
index = DatetimeIndex(["2016-10-17 12:00:00.00149"], tz=tz)
result = index.round("ms")
expected = DatetimeIndex(["2016-10-17 12:00:00.001000"], tz=tz)
tm.assert_index_equal(result, expected)
index = DatetimeIndex(["2016-10-17 12:00:00.001501031"])
result = index.round("10ns")
expected = DatetimeIndex(["2016-10-17 12:00:00.001501030"])
tm.assert_index_equal(result, expected)
with tm.assert_produces_warning(False):
ts = "2016-10-17 12:00:00.001501031"
DatetimeIndex([ts]).round("1010ns")
def test_no_rounding_occurs(self, tz_naive_fixture):
# GH 21262
tz = tz_naive_fixture
rng = date_range(start="2016-01-01", periods=5, freq="2Min", tz=tz)
expected_rng = DatetimeIndex(
[
Timestamp("2016-01-01 00:00:00", tz=tz, freq="2T"),
Timestamp("2016-01-01 00:02:00", tz=tz, freq="2T"),
Timestamp("2016-01-01 00:04:00", tz=tz, freq="2T"),
Timestamp("2016-01-01 00:06:00", tz=tz, freq="2T"),
Timestamp("2016-01-01 00:08:00", tz=tz, freq="2T"),
]
)
tm.assert_index_equal(rng.round(freq="2T"), expected_rng)
@pytest.mark.parametrize(
"test_input, rounder, freq, expected",
[
(["2117-01-01 00:00:45"], "floor", "15s", ["2117-01-01 00:00:45"]),
(["2117-01-01 00:00:45"], "ceil", "15s", ["2117-01-01 00:00:45"]),
(
["2117-01-01 00:00:45.000000012"],
"floor",
"10ns",
["2117-01-01 00:00:45.000000010"],
),
(
["1823-01-01 00:00:01.000000012"],
"ceil",
"10ns",
["1823-01-01 00:00:01.000000020"],
),
(["1823-01-01 00:00:01"], "floor", "1s", ["1823-01-01 00:00:01"]),
(["1823-01-01 00:00:01"], "ceil", "1s", ["1823-01-01 00:00:01"]),
(["2018-01-01 00:15:00"], "ceil", "15T", ["2018-01-01 00:15:00"]),
(["2018-01-01 00:15:00"], "floor", "15T", ["2018-01-01 00:15:00"]),
(["1823-01-01 03:00:00"], "ceil", "3H", ["1823-01-01 03:00:00"]),
(["1823-01-01 03:00:00"], "floor", "3H", ["1823-01-01 03:00:00"]),
(
("NaT", "1823-01-01 00:00:01"),
"floor",
"1s",
("NaT", "1823-01-01 00:00:01"),
),
(
("NaT", "1823-01-01 00:00:01"),
"ceil",
"1s",
("NaT", "1823-01-01 00:00:01"),
),
],
)
def test_ceil_floor_edge(self, test_input, rounder, freq, expected):
dt = DatetimeIndex(list(test_input))
func = getattr(dt, rounder)
result = func(freq)
expected = DatetimeIndex(list(expected))
assert expected.equals(result)
@pytest.mark.parametrize(
"start, index_freq, periods",
[("2018-01-01", "12H", 25), ("2018-01-01 0:0:0.124999", "1ns", 1000)],
)
@pytest.mark.parametrize(
"round_freq",
[
"2ns",
"3ns",
"4ns",
"5ns",
"6ns",
"7ns",
"250ns",
"500ns",
"750ns",
"1us",
"19us",
"250us",
"500us",
"750us",
"1s",
"2s",
"3s",
"12H",
"1D",
],
)
def test_round_int64(self, start, index_freq, periods, round_freq):
dt = date_range(start=start, freq=index_freq, periods=periods)
unit = to_offset(round_freq).nanos
# test floor
result = dt.floor(round_freq)
diff = dt.asi8 - result.asi8
mod = result.asi8 % unit
assert (mod == 0).all(), "floor not a {} multiple".format(round_freq)
assert (0 <= diff).all() and (diff < unit).all(), "floor error"
# test ceil
result = dt.ceil(round_freq)
diff = result.asi8 - dt.asi8
mod = result.asi8 % unit
assert (mod == 0).all(), "ceil not a {} multiple".format(round_freq)
assert (0 <= diff).all() and (diff < unit).all(), "ceil error"
# test round
result = dt.round(round_freq)
diff = abs(result.asi8 - dt.asi8)
mod = result.asi8 % unit
assert (mod == 0).all(), "round not a {} multiple".format(round_freq)
assert (diff <= unit // 2).all(), "round error"
if unit % 2 == 0:
assert (
result.asi8[diff == unit // 2] % 2 == 0
).all(), "round half to even error"
# ----------------------------------------------------------------
# DatetimeIndex.normalize
def test_normalize(self):
rng = date_range("1/1/2000 9:30", periods=10, freq="D")
result = rng.normalize()
expected = date_range("1/1/2000", periods=10, freq="D")
tm.assert_index_equal(result, expected)
arr_ns = np.array([1380585623454345752, 1380585612343234312]).astype(
"datetime64[ns]"
)
rng_ns = DatetimeIndex(arr_ns)
rng_ns_normalized = rng_ns.normalize()
arr_ns = np.array([1380585600000000000, 1380585600000000000]).astype(
"datetime64[ns]"
)
expected = DatetimeIndex(arr_ns)
tm.assert_index_equal(rng_ns_normalized, expected)
assert result.is_normalized
assert not rng.is_normalized
def test_normalize_nat(self):
dti = DatetimeIndex([pd.NaT, Timestamp("2018-01-01 01:00:00")])
result = dti.normalize()
expected = DatetimeIndex([pd.NaT, Timestamp("2018-01-01")])
tm.assert_index_equal(result, expected)
class TestDateTimeIndexToJulianDate:
def test_1700(self):
dr = date_range(start=Timestamp("1710-10-01"), periods=5, freq="D")
r1 = pd.Index([x.to_julian_date() for x in dr])
r2 = dr.to_julian_date()
assert isinstance(r2, pd.Float64Index)
tm.assert_index_equal(r1, r2)
def test_2000(self):
dr = date_range(start=Timestamp("2000-02-27"), periods=5, freq="D")
r1 = pd.Index([x.to_julian_date() for x in dr])
r2 = dr.to_julian_date()
assert isinstance(r2, pd.Float64Index)
tm.assert_index_equal(r1, r2)
def test_hour(self):
dr = date_range(start=Timestamp("2000-02-27"), periods=5, freq="H")
r1 = pd.Index([x.to_julian_date() for x in dr])
r2 = dr.to_julian_date()
assert isinstance(r2, pd.Float64Index)
tm.assert_index_equal(r1, r2)
def test_minute(self):
dr = date_range(start=Timestamp("2000-02-27"), periods=5, freq="T")
r1 = pd.Index([x.to_julian_date() for x in dr])
r2 = dr.to_julian_date()
assert isinstance(r2, pd.Float64Index)
tm.assert_index_equal(r1, r2)
def test_second(self):
dr = date_range(start=Timestamp("2000-02-27"), periods=5, freq="S")
r1 = pd.Index([x.to_julian_date() for x in dr])
r2 = dr.to_julian_date()
assert isinstance(r2, pd.Float64Index)
tm.assert_index_equal(r1, r2)
| apache-2.0 |
SimonGriffioen/pascal | PASCAL/external/networkx/readwrite/nx_shp.py | 24 | 8543 | """
*********
Shapefile
*********
Generates a networkx.DiGraph from point and line shapefiles.
"The Esri Shapefile or simply a shapefile is a popular geospatial vector
data format for geographic information systems software. It is developed
and regulated by Esri as a (mostly) open specification for data
interoperability among Esri and other software products."
See http://en.wikipedia.org/wiki/Shapefile for additional information.
"""
# Copyright (C) 2004-2015 by
# Ben Reilly <[email protected]>
# Aric Hagberg <[email protected]>
# Dan Schult <[email protected]>
# Pieter Swart <[email protected]>
# All rights reserved.
# BSD license.
import networkx as nx
__author__ = """Ben Reilly ([email protected])"""
__all__ = ['read_shp', 'write_shp']
def read_shp(path, simplify=True):
"""Generates a networkx.DiGraph from shapefiles. Point geometries are
translated into nodes, lines into edges. Coordinate tuples are used as
keys. Attributes are preserved, line geometries are simplified into start
and end coordinates. Accepts a single shapefile or directory of many
shapefiles.
"The Esri Shapefile or simply a shapefile is a popular geospatial vector
data format for geographic information systems software [1]_."
Parameters
----------
path : file or string
File, directory, or filename to read.
simplify: bool
If ``True``, simplify line geometries to start and end coordinates.
If ``False``, and line feature geometry has multiple segments, the
non-geometric attributes for that feature will be repeated for each
edge comprising that feature.
Returns
-------
G : NetworkX graph
Examples
--------
>>> G=nx.read_shp('test.shp') # doctest: +SKIP
References
----------
.. [1] http://en.wikipedia.org/wiki/Shapefile
"""
try:
from osgeo import ogr
except ImportError:
raise ImportError("read_shp requires OGR: http://www.gdal.org/")
if not isinstance(path, str):
return
net = nx.DiGraph()
shp = ogr.Open(path)
for lyr in shp:
fields = [x.GetName() for x in lyr.schema]
for f in lyr:
flddata = [f.GetField(f.GetFieldIndex(x)) for x in fields]
g = f.geometry()
attributes = dict(zip(fields, flddata))
attributes["ShpName"] = lyr.GetName()
if g.GetGeometryType() == 1: # point
net.add_node((g.GetPoint_2D(0)), attributes)
if g.GetGeometryType() == 2: # linestring
last = g.GetPointCount() - 1
if simplify:
attributes["Wkb"] = g.ExportToWkb()
attributes["Wkt"] = g.ExportToWkt()
attributes["Json"] = g.ExportToJson()
net.add_edge(g.GetPoint_2D(0), g.GetPoint_2D(last), attributes)
else:
# separate out each segment as individual edge
for i in range(last):
pt1 = g.GetPoint_2D(i)
pt2 = g.GetPoint_2D(i + 1)
segment = ogr.Geometry(ogr.wkbLineString)
segment.AddPoint_2D(pt1[0], pt1[1])
segment.AddPoint_2D(pt2[0], pt2[1])
attributes["Wkb"] = segment.ExportToWkb()
attributes["Wkt"] = segment.ExportToWkt()
attributes["Json"] = segment.ExportToJson()
net.add_edge(pt1, pt2, attributes)
return net
def write_shp(G, outdir):
"""Writes a networkx.DiGraph to two shapefiles, edges and nodes.
Nodes and edges are expected to have a Well Known Binary (Wkb) or
Well Known Text (Wkt) key in order to generate geometries. Also
acceptable are nodes with a numeric tuple key (x,y).
"The Esri Shapefile or simply a shapefile is a popular geospatial vector
data format for geographic information systems software [1]_."
Parameters
----------
outdir : directory path
Output directory for the two shapefiles.
Returns
-------
None
Examples
--------
nx.write_shp(digraph, '/shapefiles') # doctest +SKIP
References
----------
.. [1] http://en.wikipedia.org/wiki/Shapefile
"""
try:
from osgeo import ogr
except ImportError:
raise ImportError("write_shp requires OGR: http://www.gdal.org/")
# easier to debug in python if ogr throws exceptions
ogr.UseExceptions()
def netgeometry(key, data):
if 'Wkb' in data:
geom = ogr.CreateGeometryFromWkb(data['Wkb'])
elif 'Wkt' in data:
geom = ogr.CreateGeometryFromWkt(data['Wkt'])
elif type(key[0]).__name__ == 'tuple': # edge keys are packed tuples
geom = ogr.Geometry(ogr.wkbLineString)
_from, _to = key[0], key[1]
try:
geom.SetPoint(0, *_from)
geom.SetPoint(1, *_to)
except TypeError:
# assume user used tuple of int and choked ogr
_ffrom = [float(x) for x in _from]
_fto = [float(x) for x in _to]
geom.SetPoint(0, *_ffrom)
geom.SetPoint(1, *_fto)
else:
geom = ogr.Geometry(ogr.wkbPoint)
try:
geom.SetPoint(0, *key)
except TypeError:
# assume user used tuple of int and choked ogr
fkey = [float(x) for x in key]
geom.SetPoint(0, *fkey)
return geom
# Create_feature with new optional attributes arg (should be dict type)
def create_feature(geometry, lyr, attributes=None):
feature = ogr.Feature(lyr.GetLayerDefn())
feature.SetGeometry(g)
if attributes != None:
# Loop through attributes, assigning data to each field
for field, data in attributes.items():
feature.SetField(field, data)
lyr.CreateFeature(feature)
feature.Destroy()
drv = ogr.GetDriverByName("ESRI Shapefile")
shpdir = drv.CreateDataSource(outdir)
# delete pre-existing output first otherwise ogr chokes
try:
shpdir.DeleteLayer("nodes")
except:
pass
nodes = shpdir.CreateLayer("nodes", None, ogr.wkbPoint)
for n in G:
data = G.node[n]
g = netgeometry(n, data)
create_feature(g, nodes)
try:
shpdir.DeleteLayer("edges")
except:
pass
edges = shpdir.CreateLayer("edges", None, ogr.wkbLineString)
# New edge attribute write support merged into edge loop
fields = {} # storage for field names and their data types
attributes = {} # storage for attribute data (indexed by field names)
# Conversion dict between python and ogr types
OGRTypes = {int: ogr.OFTInteger, str: ogr.OFTString, float: ogr.OFTReal}
# Edge loop
for e in G.edges(data=True):
data = G.get_edge_data(*e)
g = netgeometry(e, data)
# Loop through attribute data in edges
for key, data in e[2].items():
# Reject spatial data not required for attribute table
if (key != 'Json' and key != 'Wkt' and key != 'Wkb'
and key != 'ShpName'):
# For all edges check/add field and data type to fields dict
if key not in fields:
# Field not in previous edges so add to dict
if type(data) in OGRTypes:
fields[key] = OGRTypes[type(data)]
else:
# Data type not supported, default to string (char 80)
fields[key] = ogr.OFTString
# Create the new field
newfield = ogr.FieldDefn(key, fields[key])
edges.CreateField(newfield)
# Store the data from new field to dict for CreateLayer()
attributes[key] = data
else:
# Field already exists, add data to dict for CreateLayer()
attributes[key] = data
# Create the feature with, passing new attribute data
create_feature(g, edges, attributes)
nodes, edges = None, None
# fixture for nose tests
def setup_module(module):
from nose import SkipTest
try:
import ogr
except:
raise SkipTest("OGR not available")
| gpl-2.0 |
mfx9/MolarisTools | MolarisTools/QMMM/QMCallerORCA.py | 2 | 5536 | #-------------------------------------------------------------------------------
# . File : QMCallerORCA.py
# . Program : MolarisTools
# . Copyright : USC, Mikolaj Feliks (2015-2018)
# . License : GNU GPL v3.0 (http://www.gnu.org/licenses/gpl-3.0.en.html)
#-------------------------------------------------------------------------------
import subprocess, os.path, exceptions
from MolarisTools.Utilities import WriteData
from MolarisTools.Parser import ORCAOutputFile, PCgradFile, EngradFile
from MolarisTools.QMMM import QMCaller, CS_MULLIKEN, CS_CHELPG, CS_MERZKOLLMAN
class QMCallerORCA (QMCaller):
"""A class to provide communication between Molaris and ORCA."""
# . Options specific to ORCA.
# . Note that ORCA will by default reuse the previous orbitals as a guess for SCF, hence no restart option.
defaultAttributes = {
"job" : "job" ,
"scratch" : "orca" ,
"ncpu" : 1 ,
"memory" : 1 ,
"method" : "B3LYP/6-31G*" ,
"debug" : False ,
"pathORCA" : os.path.join (os.environ["HOME"], "local", "opt", "orca_3_0_0_linux_x86-64", "orca") ,
}
defaultAttributes.update (QMCaller.defaultAttributes)
def __init__ (self, **keywordArguments):
"""Constructor."""
super (QMCallerORCA, self).__init__ (**keywordArguments)
# . Prepare a ORCA input file
self._WriteInput ()
def _WriteInput (self):
"""Write ORCA input files."""
# . Check for the scratch directory
if not os.path.exists (self.scratch):
os.makedirs (self.scratch)
# . Header
lines = ["# . ORCA job", ]
# . Include solvent or protein
if self.qmmm:
pcFile = os.path.abspath (os.path.join (self.scratch, self.job + ".pc"))
lines.append ("%%pointcharges \"%s\"" % pcFile)
elif self.cosmo:
raise exceptions.StandardError ("COSMO model is not (yet) implemented in QMCallerORCA.")
# . Number of processors
if self.ncpu < 2:
cpus = ""
else:
cpus = " PAL%d" % self.ncpu
# . Level of theory
method, basis = self.method.split ("/")
lines.append ("! ENGRAD %s %s SCFCONV10%s" % (method, basis, cpus))
# . Electronic state
lines.append ("* xyz %d %d" % (self.charge, self.multiplicity))
# . Geometry
atoms = self.molaris.qatoms + self.molaris.latoms
for atom in atoms:
lines.append ("%2s %16.10f %16.10f %16.10f" % (atom.label, atom.x, atom.y, atom.z))
# . End of file
lines.append ("*")
# . Write everything to a file
fo = open (os.path.join (self.scratch, (self.job + ".inp")), "w")
for line in lines:
fo.write (line + "\n")
fo.close ()
# . Now prepare PC data
if self.qmmm:
pointCharges = self.molaris.patoms + self.molaris.watoms
ncharges = len (pointCharges)
lines = [" %d" % ncharges, ]
for atom in pointCharges:
lines.append ("%12.4f %16.10f %16.10f %16.10f" % (atom.charge, atom.x, atom.y, atom.z))
# . Write point charges to a file
fo = open (os.path.join (self.scratch, (self.job + ".pc")), "w")
for line in lines:
fo.write (line + "\n")
fo.close ()
def Run (self):
# . Run the calculation
orcaInput = os.path.join (self.scratch, self.job + ".inp")
orcaOutput = os.path.join (self.scratch, self.job + ".log")
# . In the debug mode, reuse the already existing log file
calculate = True
if self.debug:
if os.path.exists (orcaOutput):
calculate = False
if calculate:
fileOutput = open (orcaOutput, "w")
subprocess.check_call ([self.pathORCA, orcaInput], stdout=fileOutput, stderr=fileOutput)
fileOutput.close ()
# . Parse the output file
orca = ORCAOutputFile (orcaOutput, reverse=True)
# . In ORCA, the final QM energy does not seem to include the self interaction energy of point charges
self.Efinal = orca.Efinal
# . Include forces on QM atoms
engrad = EngradFile (os.path.join (self.scratch, self.job + ".engrad"), reverse=True)
self.forces = engrad.forces
# . Include forces on point charges
if self.qmmm:
pcgrad = PCgradFile (os.path.join (self.scratch, self.job + ".pcgrad"), reverse=True)
self.mmforces = pcgrad.forces
# . Include charges
if self.chargeScheme == CS_MULLIKEN:
charges = []
for atom in orca.atoms:
charges.append (atom.charge)
self.charges = charges
elif self.chargeScheme == CS_MERZKOLLMAN:
raise exceptions.StandardError ("Merz-Kollman charges are not (yet) implemented in QMCallerORCA.")
elif self.chargeScheme == CS_CHELPG:
raise exceptions.StandardError ("CHELPG charges are not (yet) implemented in QMCallerORCA.")
# . Finish up
self._Finalize ()
#===============================================================================
# . Main program
#===============================================================================
if __name__ == "__main__": pass
| gpl-3.0 |
mrshirts/InterMol | intermol/forces/lj_c_pair_type.py | 2 | 1591 | import parmed.unit as units
from intermol.decorators import accepts_compatible_units
from intermol.forces.abstract_pair_type import AbstractPairType
class LjCPairType(AbstractPairType):
__slots__ = ['C6', 'C12', 'scaleLJ', 'scaleQQ', 'long']
@accepts_compatible_units(None, None,
C6=units.kilojoules_per_mole * units.nanometers ** (6),
C12=units.kilojoules_per_mole * units.nanometers ** (12),
scaleLJ=None,
scaleQQ=None,
long=None)
def __init__(self, bondingtype1, bondingtype2,
C6=0.0 * units.kilojoules_per_mole * units.nanometers ** (6),
C12=0.0 * units.kilojoules_per_mole * units.nanometers ** (12),
scaleLJ=None, scaleQQ=None, long=False):
AbstractPairType.__init__(self, bondingtype1, bondingtype2, scaleLJ, scaleQQ, long)
self.C6 = C6
self.C12 = C12
class LjCPair(LjCPairType):
"""
stub documentation
"""
def __init__(self, atom1, atom2, bondingtype1=None, bondingtype2=None,
C6=0.0 * units.kilojoules_per_mole * units.nanometers ** (6),
C12=0.0 * units.kilojoules_per_mole * units.nanometers ** (12),
scaleLJ=None, scaleQQ=None, long=False):
self.atom1 = atom1
self.atom2 = atom2
LjCPairType.__init__(self, bondingtype1, bondingtype2,
C6=C6,
C12=C12,
scaleLJ=scaleLJ, scaleQQ=scaleQQ, long=long) | mit |
thoreg/satchmo | satchmo/apps/payment/modules/giftcertificate/tests.py | 12 | 4097 | from decimal import Decimal
from django.contrib.sites.models import Site
from django.test import TestCase
from keyedcache import cache_delete
from l10n.models import Country
from livesettings import config_value
from models import *
from product.models import Product
from satchmo_store.contact.models import AddressBook, Contact, ContactRole
from satchmo_store.shop.models import Order, OrderItem, OrderItemDetail
from utils import generate_certificate_code, generate_code
import logging
log = logging.getLogger('giftcertificate.tests')
alphabet = 'abcdefghijklmnopqrstuvwxyz'
def make_test_order(country, state):
c = Contact(first_name="Gift", last_name="Tester",
role=ContactRole.objects.get(pk='Customer'), email="[email protected]")
c.save()
if not isinstance(country, Country):
country = Country.objects.get(iso2_code__iexact = country)
ad = AddressBook(contact=c, description="home",
street1 = "test", state=state, city="Portland",
country = country, is_default_shipping=True,
is_default_billing=True)
ad.save()
site = Site.objects.get_current()
o = Order(contact=c, shipping_cost=Decimal('0.00'), site=site)
o.save()
p = Product.objects.get(slug='GIFT10')
price = p.unit_price
log.debug("creating with price: %s", price)
item1 = OrderItem(order=o, product=p, quantity='2.0',
unit_price=price, line_item_price=price*2)
item1.save()
detl = OrderItemDetail(name = 'email', value='[email protected]', sort_order=0, item=item1)
detl.save()
detl = OrderItemDetail(name = 'message', value='hello there', sort_order=0, item=item1)
detl.save()
return o
class TestGenerateCode(TestCase):
def testGetCode(self):
c = generate_code(alphabet, '^^^^')
self.assertEqual(len(c), 4)
for ch in c:
self.assert_(ch in alphabet)
def testGetCode2(self):
c = generate_code(alphabet, '^^^^-^^^^')
c2 = generate_code(alphabet, '^^^^-^^^^')
self.assertNotEqual(c,c2)
def testFormat(self):
c = generate_code(alphabet, '^-^-^-^')
for i in (0,2,4,6):
self.assert_(c[i] in alphabet)
for i in (1,3,5):
self.assertEqual(c[i], '-')
class TestGenerateCertificateCode(TestCase):
def setUp(self):
self.charset = config_value('PAYMENT_GIFTCERTIFICATE', 'CHARSET')
self.format = config_value('PAYMENT_GIFTCERTIFICATE', 'FORMAT')
def testGetCode(self):
c = generate_certificate_code()
self.assertEqual(len(c), len(self.format))
chars = [x for x in self.format if not x=='^']
chars.extend(self.charset)
for ch in c:
self.assert_(ch in chars)
class TestCertCreate(TestCase):
fixtures = ['l10n-data.yaml','test_shop']
def setUp(self):
self.site = Site.objects.get_current()
def tearDown(self):
cache_delete()
def testCreate(self):
gc = GiftCertificate(start_balance = '100.00', site=self.site)
gc.save()
self.assert_(gc.code)
self.assertEqual(gc.balance, Decimal('100.00'))
def testUse(self):
gc = GiftCertificate(start_balance = '100.00', site=self.site)
gc.save()
bal = gc.use('10.00')
self.assertEqual(bal, Decimal('90.00'))
self.assertEqual(gc.usages.count(), 1)
class GiftCertOrderTest(TestCase):
fixtures = ['l10n-data.yaml', 'test_shop.yaml', 'test_giftcertificate.yaml', 'test_giftcertificate_config.yaml', 'initial_data.yaml']
def tearDown(self):
cache_delete()
def testOrderSuccess(self):
"""Test cert creation on order success"""
cache_delete()
order = make_test_order('US', '')
order.order_success()
order = Order.objects.get(pk=order.id)
certs = order.giftcertificates.all()
self.assertEqual(len(certs), 1)
c = certs[0]
self.assertEqual(c.balance, Decimal('20.00'))
self.assertEqual(c.recipient_email, '[email protected]')
self.assertEqual(c.message, 'hello there')
| bsd-3-clause |
cjevning/tripping-octo-shame | retrieve_metadata.py | 1 | 8091 | import os
import eyed3
import re
import sys
import urllib
from selenium import webdriver
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.common.exceptions import NoSuchElementException
from selenium.common.exceptions import TimeoutException
from mutagen.mp3 import MP3
from mutagen.id3 import ID3, APIC, error
from mutagen.easyid3 import EasyID3
from mutagen import File
import logger_setup as ls
logger = ls.get_logger(__name__)
amazon_dict = {'space_delim':'+', 'search_url':'http://www.amazon.com/s/ref=nb_sb_noss?url=search-alias%3Ddigital-music&field-keywords=',
'table_class':'mp3Tracks', 'by_method':By.ID, 'no_results_locator':'noResultsTitle', 'result_class':'result',
'title_locator': 'title', 'artist_locator': 'mp3tArtist', 'album_locator': 'mp3tAlbum'}
soundcloud_dict = {'space_delim':'%20', 'search_url':'https://soundcloud.com/search?q=', 'table_class':'lazyLoadingList__list',
'by_method':By.CSS_SELECTOR, 'no_results_locator':'.sc-type-h2.sc-text', 'result_class':'searchList__item',
'title_locator': 'soundTitle__title', 'artist_locator': 'soundTitle__username'}
def get_metadata_for_song(song_path, song_name, driver):
amazon_song_info = get_song_info(driver, song_name, 'amazon')
soundcloud_song_info = get_song_info(driver, song_name, 'soundcloud')
all_song_info = amazon_song_info + soundcloud_song_info
all_info_w_artwork = get_artwork(driver, all_song_info)
current_song_info = get_current_metadata(song_path, song_name)
return current_song_info + all_info_w_artwork
def get_current_metadata(song_path, song_name):
try:
tags = ID3(song_path)
except Exception,e:
logger.error('couldn`t get tags on the song for this reason:')
logger.error(e)
logger.info('skipping')
return []
try:
title = tags["TIT2"].text[0]
except:
title = ''
try:
artist = tags["TPE1"].text[0]
except:
artist = ''
try:
album = tags["TALB"].text[0]
except:
album = ''
mfile = File(song_path)
file_key = song_name + '_default'
file_path = '-'
file_url = ''
try:
apic = mfile.tags['APIC:']
mime_sp = apic.mime.split('/')
ext = mime_sp[len(mime_sp) - 1]
artwork = apic.data # access APIC frame and grab the image
cwd = os.getcwd()
file_path = cwd + '/art_dump/' + file_key + '.' + ext
file_url = 'file://' + file_path
with open(file_path, 'wb') as img:
img.write(artwork)
except Exception,e:
logger.warn('failed to get artwork attached to mp3, probably doesn`t have any. here`s the exception:')
logger.warn(e)
song_dict = {'title':title, 'artist':artist, 'album':album, 'local_art':file_path, 'art_url':file_url, 'file_key':file_key}
return [song_dict]
def get_song_info(driver, name, source):
props = amazon_dict if source is 'amazon' else soundcloud_dict
query = re.sub('_', props['space_delim'], name)
url = props['search_url'] + query
logger.info('getting url: ' + url)
driver.get(url)
try:
logger.info('looking for search results list...')
table = WebDriverWait(driver, 10).until(EC.presence_of_element_located((By.CLASS_NAME, props['table_class'])))
driver.implicitly_wait(2)
except TimeoutException:
logger.error("took too long to find results table, checking for failed search...")
try:
thing = driver if source is 'amazon' else table
no_res = thing.findElement(props['by_method'], props['no_results_locator'])
logger.info("yep, no results")
except Exception,e:
logger.info("strange, couldn`t find failed search page either; slow internet maybe?")
return []
logger.info('results table found!')
rows = table.find_elements_by_class_name(props['result_class'])
results = []
for i in range(0, 4):
try:
row = rows[i]
except:
if i is 0:
logger.error('no ' + source + ' results found for ' + name)
else:
logger.warn(source + ' search exhausted after ' + str(i) + ' results')
break
try:
title_elem = row.find_element_by_class_name(props['title_locator'])
title = title_elem.text.encode('utf8') #str(title_elem.text)
title = re.sub('/', '', title)
artist = row.find_element_by_class_name(props['artist_locator']).text.encode('utf8') #str(row.find_element_by_class_name(props['artist_locator']).text)
if source is 'amazon':
album = row.find_element_by_class_name(props['album_locator']).text.encode('utf8') #str(row.find_element_by_class_name(props['album_locator']).text)
else:
album = 'soundcloud result, album unknown'
details_url = str(title_elem.get_attribute('href'))
key_title = re.sub(' ', '_', title)
file_key = key_title + '_' + source + str(i)
details_dict = {'title':title, 'artist':artist, 'album':album, 'details':details_url, 'file_key':file_key}
results.append(details_dict)
except:
logger.error('something went wrong getting details, checking for promoted link or user link...')
try:
promoted = row.find_element_by_class_name('promotedBadge')
logger.info('yep, promoted link. skipping!')
except:
try:
user = row.find_element_by_class_name('userStats')
logger.info('yep, user link. skipping!')
except:
logger.info('doesn`t seem to be promoted or user link, not sure what`s wrong')
return results
def return_amazon_art_url(artwork_cont):
return artwork_cont.find_element_by_css_selector('img').get_attribute('src')
def return_soundcloud_art_url(artwork_cont):
style = artwork_cont.get_attribute('style')
splits = style.split(';')
back_array = [s for s in splits if 'background-image' in s]
back = str(back_array[0])
start = back.index('(') + 1
end = back.index(')')
https_url = back[start:end]
return https_url.replace('https', 'http')
def get_artwork(driver, metadata):
with_arturls = []
for song_dict in metadata:
try:
details_url = song_dict["details"]
driver.get(details_url)
if 'amazon' in details_url:
by_method = By.ID
locator = 'coverArt_feature_div'
url_func = return_amazon_art_url
else:
by_method = By.CLASS_NAME
locator = 'image__full'
url_func = return_soundcloud_art_url
try:
artwork_cont = WebDriverWait(driver, 10).until(EC.presence_of_element_located((by_method, locator)))
art_url = url_func(artwork_cont)
ext = art_url[-3:]
file_key = song_dict["file_key"].decode('utf8')
file_path = os.getcwd() + '\\art_dump\\' + file_key + '.' + ext
urllib.urlretrieve(art_url, file_path)
song_dict['art_url'] = str(art_url)
song_dict['local_art'] = file_path.encode('utf8')
with_arturls.append(song_dict)
except Exception,e:
logger.error('failed to save artwork for some reason:')
logger.error(e)
except Exception,e:
logger.error('an unexpected error happened somewhere:')
logger.error(e)
return with_arturls
def already_marked(file_path):
try:
tags = EasyID3(file_path)
except Exception,e:
logger.info('no tags on the song, proceed')
return False
try:
marked = tags['website'][0]
if marked == 'connerjevning.com':
logger.info('already marked! skipping')
return True
else:
return False
except:
logger.info('doesn`t appear to be marked, proceed')
return False
def write_html_for_song(file_path, data):
with open(file_path, 'w') as myFile:
myFile.write('<html><body><table style="text-align:center;"><tr><td><h1>Option</h1></td><td><h1>Title</h1></td><td><h1>Artist</h1></td>')
myFile.write('<td><h1>Album</h1></td><td><h1>Artwork</h1></td></tr>')
for i in range(0, len(data)):
myFile.write('<tr><td><h1>')
myFile.write(str(i))
myFile.write('</h1></td><td><p>')
myFile.write(data[i]['title'])
myFile.write('</p></td><td><p>')
myFile.write(data[i]['artist'])
myFile.write('</p></td><td><p>')
myFile.write(data[i]['album'])
myFile.write('</p></td><td><img src="')
myFile.write(data[i]['art_url'])
myFile.write('"></td></tr>')
myFile.write('</table>')
myFile.write('</body>')
myFile.write('</html>')
| mit |
wearpants/osf.io | scripts/tests/test_migrate_presentation_service.py | 60 | 2854 | from tests.base import OsfTestCase
from tests.factories import UserFactory, ProjectFactory, UnconfirmedUserFactory
from framework.auth import Auth
from scripts.migrate_presentation_service import (
do_migration, get_targets, migrate_project_contributed
)
class TestMigrateManualMergedUser(OsfTestCase):
def test_get_targets(self):
user1 = UserFactory.build(username='[email protected]')
user2 = UserFactory()
user1.save()
user_list = get_targets()
assert user_list is not None
assert len(user_list) is 1
user3 = UserFactory.build(username='[email protected]')
user3.save()
user_list = get_targets()
assert len(user_list) is 2
def test_migrate_project_contributed(self):
user1 = UserFactory()
fullname1 = 'hello world'
email1 = '[email protected]'
project1 = ProjectFactory(creator=user1)
user2 = project1.add_unregistered_contributor(
fullname=fullname1, email=email1, auth=Auth(user=user1)
)
project1.save()
assert project1.is_contributor(user2) is True
assert len(project1.contributors) is 2
migrate_project_contributed(user2)
assert project1.is_contributor(user2) is False
assert len(project1.contributors) is 1
user3 = UserFactory()
project2 = ProjectFactory(creator=user1)
project2.add_contributor(user3)
project2.save()
assert project2.is_contributor(user3) is True
assert len(project2.contributors) is 2
migrate_project_contributed(user3)
assert project2.is_contributor(user3) is False
assert len(project2.contributors) is 1
def test_do_migration(self):
user1 = UserFactory()
fullname1 = 'Presentation Service'
email1 = '[email protected]'
project1 = ProjectFactory(creator=user1)
user2 = project1.add_unregistered_contributor(
fullname=fullname1, email=email1, auth=Auth(user=user1)
)
project1.save()
user3 = UserFactory.build(username='[email protected]', fullname=fullname1)
user3.save()
project2 = ProjectFactory(creator=user1)
project2.add_contributor(user3)
project2.save()
assert project1.is_contributor(user2) is True
assert len(project1.contributors) is 2
assert project2.is_contributor(user3) is True
assert len(project2.contributors) is 2
user_list = get_targets()
do_migration(user_list)
assert project2.is_contributor(user3) is False
assert len(project2.contributors) is 1
assert project1.is_contributor(user2) is False
assert len(project1.contributors) is 1
assert user2.is_disabled is True
assert user3.is_disabled is True | apache-2.0 |
bahamas10/Viridian | AmpacheTools/xmlparse.py | 2 | 3942 | #!/usr/bin/env python
#
# Copyright (c) 2012, Dave Eddy <[email protected]>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the project nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
xmlparse.py
Functions to convert XML into a python data structure
Original code from:
http://nonplatonic.com/ben.php?title=python_xml_to_dict_bow_to_my_recursive_g&more=1&c=1&tb=1&pb=1
Modified by:
Dave Eddy <[email protected]>
- Cleaned up whitespace errors
- Added support for attributes
"""
import xml.dom.minidom
from collections import defaultdict
def xmltodict(xmlstring):
"""
Convert an XML string into a dictionary
@param xmlstring {string} The XML string
@return {dict} The resultant object
"""
doc = xml.dom.minidom.parseString(xmlstring)
return _elementtodict(doc)
def _elementtodict(parent):
"""
[Private function]
Recursively search an XML element and construct a dictionary
@param element {Node.ELEMENT_NODE} The node to search
@return {dict} The resultant object
"""
child = parent.firstChild
while child and child.nodeType == xml.dom.minidom.Node.TEXT_NODE and not child.data.strip():
child = child.nextSibling
# Return None for the stopping condition
if not child:
return None
# If we hit a text node just return it
if child.nodeType == xml.dom.minidom.Node.TEXT_NODE or child.nodeType == xml.dom.minidom.Node.CDATA_SECTION_NODE:
value = child.nodeValue
if value.isdigit():
value = int(value)
return value
# Create a dictionary of lists
d = defaultdict(list)
while child:
# If we have a node with elements in it
if child.nodeType == xml.dom.minidom.Node.ELEMENT_NODE:
attr_dict = {}
# Check to see if there are attributes
if child.hasAttributes():
attrs = child.attributes
# Loop the attributes
for i in range(0, attrs.length):
_attr = attrs.item(i)
attr_dict[_attr.name] = _attr.value
d[child.tagName].append({'attr' : attr_dict, 'child' : _elementtodict(child)})
child = child.nextSibling
# Convert the default dict to regular dict
return dict(d)
if __name__ == '__main__':
import json
import sys
try:
xml_file = sys.argv[1]
s = open(xml_file, 'r').read()
except IndexError:
s = sys.stdin.read()
d = xmltodict(s)
print(json.dumps(d, indent=4))
| bsd-3-clause |
esikachev/scenario | sahara/plugins/mapr/versions/v4_0_1_mrv2/version_handler.py | 1 | 2104 | # Copyright (c) 2015, MapR Technologies
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from sahara.plugins.mapr.base import base_version_handler as bvh
from sahara.plugins.mapr.services.hbase import hbase
from sahara.plugins.mapr.services.hive import hive
from sahara.plugins.mapr.services.httpfs import httpfs
from sahara.plugins.mapr.services.mahout import mahout
from sahara.plugins.mapr.services.management import management
from sahara.plugins.mapr.services.maprfs import maprfs
from sahara.plugins.mapr.services.oozie import oozie
from sahara.plugins.mapr.services.pig import pig
from sahara.plugins.mapr.services.swift import swift
from sahara.plugins.mapr.services.yarn import yarn
import sahara.plugins.mapr.versions.v4_0_1_mrv2.context as c
version = '4.0.1.mrv2'
class VersionHandler(bvh.BaseVersionHandler):
def __init__(self):
super(VersionHandler, self).__init__()
self._version = version
self._required_services = [
yarn.YARNv241(),
maprfs.MapRFS(),
management.Management(),
oozie.Oozie(),
]
self._services = [
maprfs.MapRFS(),
management.Management(),
oozie.Oozie(),
hive.HiveV012(),
hive.HiveV013(),
hbase.HBaseV094(),
hbase.HBaseV098(),
httpfs.HttpFS(),
mahout.Mahout(),
pig.Pig(),
swift.Swift(),
yarn.YARNv241(),
]
def get_context(self, cluster, added=None, removed=None):
return c.Context(cluster, self, added, removed)
| apache-2.0 |
anorfleet/turntable | test/lib/python2.7/site-packages/scipy/cluster/setup.py | 71 | 1216 | #!/usr/bin/env python
from __future__ import division, print_function, absolute_import
import sys
if sys.version_info[0] >= 3:
DEFINE_MACROS = [("SCIPY_PY3K", None)]
else:
DEFINE_MACROS = []
def configuration(parent_package='', top_path=None):
from numpy.distutils.system_info import get_info
from numpy.distutils.misc_util import Configuration, get_numpy_include_dirs
config = Configuration('cluster', parent_package, top_path)
blas_opt = get_info('lapack_opt')
config.add_data_dir('tests')
config.add_extension('_vq',
sources=[('_vq.c')],
include_dirs=[get_numpy_include_dirs()],
extra_info=blas_opt)
config.add_extension('_hierarchy',
sources=[('_hierarchy.c')],
include_dirs=[get_numpy_include_dirs()])
return config
if __name__ == '__main__':
from numpy.distutils.core import setup
setup(maintainer="SciPy Developers",
author="Eric Jones",
maintainer_email="[email protected]",
description="Clustering Algorithms (Information Theory)",
url="http://www.scipy.org",
license="SciPy License (BSD Style)",
**configuration(top_path='').todict()
)
| mit |
ychfan/tensorflow | tensorflow/python/kernel_tests/segment_reduction_ops_test.py | 13 | 28505 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functional tests for segment reduction ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import itertools
import numpy as np
from tensorflow.python.client import session
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes as dtypes_lib
from tensorflow.python.framework import ops
from tensorflow.python.ops import gradient_checker
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
class SegmentReductionHelper(test.TestCase):
def _input(self, input_shape, dtype=dtypes_lib.int32):
num_elem = 1
for x in input_shape:
num_elem *= x
values = np.arange(1, num_elem + 1)
np_values = values.reshape(input_shape).astype(dtype.as_numpy_dtype)
# Add a non-zero imaginary component to complex types.
if dtype.is_complex:
np_values -= 1j * np_values
return constant_op.constant(
np_values, shape=input_shape, dtype=dtype), np_values
def _segmentReduce(self, indices, x, op1, op2=None, num_out_rows=None):
if not x.size:
return np.array([])
indices = np.asarray(indices)
if num_out_rows is None:
num_out_rows = indices[-1] + 1
output = [None] * num_out_rows
slice_shape = x.shape[indices.ndim:]
x_flat = x.reshape((indices.size,) + slice_shape)
for i, index in enumerate(indices.ravel()):
if (output[index] is not None) and op1 == np.max:
for j in range(0, output[index].shape[0]):
output[index][j] = op1([output[index][j], x_flat[i][j]])
elif output[index] is not None:
output[index] = op1(output[index], x_flat[i])
else:
output[index] = x_flat[i]
# zero initialize values that are still uncalcuated.
# output = [o if o is not None else np.zeros(slice_shape) for o in output]
if not op1 == np.max:
output = [o if o is not None else np.zeros(slice_shape) for o in output]
else:
zeroslice = np.zeros(slice_shape)
zeroslice.fill(dtype.min)
output = [o if o is not None else zeroslice for o in output]
if op2 is not None:
output = [op2(o) for o in output]
output = [o.reshape(slice_shape) for o in output]
return np.array(output)
def _mean_cum_op(self, x, y):
return (x[0] + y, x[1] + 1) if isinstance(x, tuple) else (x + y, 2)
def _mean_reduce_op(self, x):
return x[0] / x[1] if isinstance(x, tuple) else x
class SegmentReductionOpTest(SegmentReductionHelper):
def testValues(self):
dtypes = [
dtypes_lib.float32, dtypes_lib.float64, dtypes_lib.int64,
dtypes_lib.int32, dtypes_lib.complex64, dtypes_lib.complex128
]
# Each item is np_op1, np_op2, tf_op
ops_list = [(np.add, None, math_ops.segment_sum), (self._mean_cum_op,
self._mean_reduce_op,
math_ops.segment_mean),
(np.ndarray.__mul__, None, math_ops.segment_prod),
(np.minimum, None, math_ops.segment_min),
(np.maximum, None, math_ops.segment_max)]
# A subset of ops has been enabled for complex numbers
complex_ops_list = [(np.add, None, math_ops.segment_sum),
(np.ndarray.__mul__, None, math_ops.segment_prod)]
n = 10
shape = [n, 2]
indices = [i // 3 for i in range(n)]
for dtype in dtypes:
if dtype in (dtypes_lib.complex64, dtypes_lib.complex128):
curr_ops_list = complex_ops_list
else:
curr_ops_list = ops_list
for use_gpu in [True, False]:
with self.test_session(use_gpu=use_gpu):
tf_x, np_x = self._input(shape, dtype=dtype)
for np_op1, np_op2, tf_op in curr_ops_list:
np_ans = self._segmentReduce(indices, np_x, np_op1, np_op2)
s = tf_op(data=tf_x, segment_ids=indices)
tf_ans = s.eval()
self.assertAllClose(np_ans, tf_ans)
# NOTE(mrry): The static shape inference that computes
# `tf_ans.shape` can only infer that sizes from dimension 1
# onwards, because the size of dimension 0 is data-dependent
# and may therefore vary dynamically.
self.assertAllEqual(np_ans.shape[1:], tf_ans.shape[1:])
def testSegmentIdsShape(self):
shape = [4, 4]
tf_x, _ = self._input(shape)
indices = constant_op.constant([0, 1, 2, 2], shape=[2, 2])
with self.assertRaises(ValueError):
math_ops.segment_sum(data=tf_x, segment_ids=indices)
def testSegmentIdsSize(self):
shape = [4, 4]
for use_gpu in [True, False]:
with self.test_session(use_gpu=use_gpu):
tf_x, _ = self._input(shape)
indices = [0, 1]
s = math_ops.segment_sum(data=tf_x, segment_ids=indices)
with self.assertRaisesOpError("segment_ids should be the same size"):
s.eval()
def testSegmentIdsValid(self):
# This is a baseline for the following SegmentIdsInvalid* tests.
shape = [4, 4]
for use_gpu in [True, False]:
with self.test_session(use_gpu=use_gpu):
tf_x, _ = self._input(shape, dtype=dtypes_lib.float32)
indices = [0, 0, 0, 1]
result = math_ops.segment_sum(data=tf_x, segment_ids=indices).eval()
self.assertAllEqual([[15, 18, 21, 24], [13, 14, 15, 16]], result)
def testSegmentIdsGreaterThanZero(self):
shape = [4, 4]
for use_gpu in [True, False]:
with self.test_session(use_gpu=use_gpu):
tf_x, np_x = self._input(shape, dtype=dtypes_lib.float32)
indices = [1, 1, 2, 2]
np_ans = self._segmentReduce(indices, np_x, np.add)
s = math_ops.segment_sum(data=tf_x, segment_ids=indices)
tf_ans = s.eval()
self.assertAllClose(np_ans, tf_ans)
def testSegmentIdsHole(self):
shape = [4, 4]
for use_gpu in [True, False]:
with self.test_session(use_gpu=use_gpu):
tf_x, np_x = self._input(shape, dtype=dtypes_lib.float32)
indices = [0, 0, 3, 3]
np_ans = self._segmentReduce(indices, np_x, np.add)
s = math_ops.segment_sum(data=tf_x, segment_ids=indices)
tf_ans = s.eval()
self.assertAllClose(np_ans, tf_ans)
def testSegmentIdsInvalid1(self):
shape = [4, 4]
with self.test_session():
tf_x, _ = self._input(shape)
indices = [-1, -1, 0, 0]
s = math_ops.segment_sum(data=tf_x, segment_ids=indices)
with self.assertRaisesOpError(
r"Segment id -1 out of range \[0, 1\), possibly because "
"'segment_ids' input is not sorted."):
s.eval()
def testSegmentIdsInvalid2(self):
shape = [4, 4]
with self.test_session():
tf_x, _ = self._input(shape)
indices = [0, 1, 0, 1]
s = math_ops.segment_sum(data=tf_x, segment_ids=indices)
with self.assertRaisesOpError("segment ids are not increasing"):
s.eval()
def testSegmentIdsInvalid3(self):
shape = [4, 4]
with self.test_session():
tf_x, _ = self._input(shape)
indices = [0, 1, 2, 0]
s = math_ops.segment_sum(data=tf_x, segment_ids=indices)
with self.assertRaisesOpError(
r"Segment id 1 out of range \[0, 1\), possibly "
"because 'segment_ids' input is not sorted."):
s.eval()
def testSegmentIdsInvalid4(self):
shape = [4, 4]
for use_gpu in [True, False]:
with self.test_session(use_gpu=use_gpu):
tf_x, _ = self._input(shape, dtype=dtypes_lib.float32)
indices = [0, 0, 0, -1]
s = math_ops.segment_sum(data=tf_x, segment_ids=indices)
with self.assertRaisesOpError("segment ids must be >= 0"):
s.eval()
def testSegmentIdsInvalid5(self):
shape = [4, 4]
for use_gpu in [True, False]:
with self.test_session(use_gpu=use_gpu):
tf_x, _ = self._input(shape, dtype=dtypes_lib.float32)
indices = [0, 0, 0, -2]
s = math_ops.segment_sum(data=tf_x, segment_ids=indices)
with self.assertRaisesOpError("segment ids must be >= 0"):
s.eval()
def testGradient(self):
shape = [4, 4]
indices = [0, 1, 2, 2]
for tf_op in [
math_ops.segment_sum, math_ops.segment_mean, math_ops.segment_min,
math_ops.segment_max
]:
with self.test_session():
tf_x, np_x = self._input(shape, dtype=dtypes_lib.float64)
s = tf_op(data=tf_x, segment_ids=indices)
jacob_t, jacob_n = gradient_checker.compute_gradient(
tf_x,
shape,
s, [3, 4],
x_init_value=np_x.astype(np.double),
delta=1)
self.assertAllClose(jacob_t, jacob_n)
class UnsortedSegmentSumTest(SegmentReductionHelper):
def testValues(self):
dtypes = [
dtypes_lib.float32, dtypes_lib.float64, dtypes_lib.int64,
dtypes_lib.int32, dtypes_lib.complex64, dtypes_lib.complex128
]
indices_flat = np.array([0, 4, 0, 8, 3, 8, 4, 7, 7, 3])
num_segments = 12
for indices in indices_flat, indices_flat.reshape(5, 2):
shape = indices.shape + (2,)
for dtype in dtypes:
with self.test_session(use_gpu=True):
tf_x, np_x = self._input(shape, dtype=dtype)
np_ans = self._segmentReduce(
indices, np_x, np.add, op2=None, num_out_rows=num_segments)
s = math_ops.unsorted_segment_sum(
data=tf_x, segment_ids=indices, num_segments=num_segments)
tf_ans = s.eval()
self.assertAllClose(np_ans, tf_ans)
self.assertShapeEqual(np_ans, s)
def testGradientSegmentSum(self):
num_cols = 2
indices_flat = np.array([0, 4, 0, 8, 3, 8, 4, 7, 7, 3])
num_segments = max(indices_flat) + 3
for dtype in [dtypes_lib.float32, dtypes_lib.float64, dtypes_lib.complex64,
dtypes_lib.complex128]:
for indices in indices_flat, indices_flat.reshape(5, 2):
shape = indices.shape + (num_cols,)
with self.test_session(use_gpu=True):
tf_x, np_x = self._input(shape, dtype=dtype)
s = math_ops.unsorted_segment_sum(
data=tf_x, segment_ids=indices, num_segments=num_segments)
jacob_t, jacob_n = gradient_checker.compute_gradient(
tf_x,
shape,
s, [num_segments, num_cols],
x_init_value=np_x,
delta=1)
self.assertAllClose(jacob_t, jacob_n)
def testGradientMatchesSegmentSum(self):
# Strategy: compute the gradient for UnsortedSegmentSum and SegmentSum
# and compare the outputs, which should be identical.
# NB: for this test to work, indices must be valid for SegmentSum, namely
# it must be sorted, the indices must be contiguous, and num_segments
# must be max(indices) + 1.
indices = [0, 0, 1, 1, 1, 2, 3, 4, 5]
n = len(indices)
num_cols = 2
shape = [n, num_cols]
num_segments = max(indices) + 1
for dtype in [dtypes_lib.float32, dtypes_lib.float64, dtypes_lib.complex64,
dtypes_lib.complex128]:
with self.test_session(use_gpu=True):
tf_x, np_x = self._input(shape, dtype=dtype)
# Results from UnsortedSegmentSum
unsorted_s = math_ops.unsorted_segment_sum(
data=tf_x, segment_ids=indices, num_segments=num_segments)
unsorted_jacob_t, unsorted_jacob_n = (
gradient_checker.compute_gradient(tf_x, shape, unsorted_s,
[num_segments, num_cols],
x_init_value=np_x, delta=1))
# Results from SegmentSum
sorted_s = math_ops.segment_sum(data=tf_x, segment_ids=indices)
sorted_jacob_t, sorted_jacob_n = gradient_checker.compute_gradient(
tf_x,
shape,
sorted_s, [num_segments, num_cols],
x_init_value=np_x,
delta=1)
self.assertAllClose(unsorted_jacob_t, sorted_jacob_t)
self.assertAllClose(unsorted_jacob_n, sorted_jacob_n)
def testBadIndices(self):
# Note: GPU kernel does not return the out-of-range error needed for this
# test, so this test is marked as cpu-only.
with self.test_session(use_gpu=False):
for bad in [[-1]], [[7]]:
unsorted = math_ops.unsorted_segment_sum([[17]], bad, num_segments=2)
with self.assertRaisesOpError(
r"segment_ids\[0,0\] = %d is out of range \[0, 2\)" % bad[0][0]):
unsorted.eval()
def testEmptySecondDimension(self):
dtypes = [
np.float32, np.float64, np.int64, np.int32, np.complex64, np.complex128
]
with self.test_session(use_gpu=True):
for dtype in dtypes:
for itype in (np.int32, np.int64):
data = np.zeros((2, 0), dtype=dtype)
segment_ids = np.array([0, 1], dtype=itype)
unsorted = math_ops.unsorted_segment_sum(data, segment_ids, 2)
self.assertAllEqual(unsorted.eval(), np.zeros((2, 0), dtype=dtype))
def testGradientSegmentMax(self):
num_cols = 2
indices_flat = np.array([0, 4, 0, 8, 3, 8, 4, 7, 7, 3])
num_segments = max(indices_flat) + 3
for indices in indices_flat, indices_flat.reshape(5, 2):
shape = indices.shape + (num_cols,)
with self.test_session(use_gpu=True):
tf_x, np_x = self._input(shape, dtype=dtypes_lib.float64)
s = math_ops.unsorted_segment_max(
data=tf_x, segment_ids=indices, num_segments=num_segments)
jacob_t, jacob_n = gradient_checker.compute_gradient(
tf_x,
shape,
s,
[num_segments, num_cols],
x_init_value=np_x.astype(np.double), delta=1)
self.assertAllClose(jacob_t, jacob_n)
class SparseSegmentReductionHelper(SegmentReductionHelper):
def _sparse_input(self, input_shape, num_indices, dtype=dtypes_lib.int32):
a, b = super(SparseSegmentReductionHelper, self)._input(input_shape, dtype)
indices = np.random.randint(0, input_shape[0], num_indices).astype(np.int32)
return (constant_op.constant(
indices, dtype=dtypes_lib.int32), indices, a, b)
def _sparseSegmentReduce(self, x, indices, segment_indices, op1, op2=None):
return self._segmentReduce(segment_indices, x[indices], op1, op2)
class SparseSegmentReductionOpTest(SparseSegmentReductionHelper):
def testValues(self):
dtypes = [
dtypes_lib.float32, dtypes_lib.float64, dtypes_lib.int64,
dtypes_lib.int32
]
mean_dtypes = [dtypes_lib.float32, dtypes_lib.float64]
# Each item is np_op1, np_op2, tf_op
ops_list = [(np.add, None, math_ops.sparse_segment_sum),
(self._mean_cum_op, self._mean_reduce_op,
math_ops.sparse_segment_mean)]
n = 400
shape = [n, 2]
segment_indices = []
for i in range(20):
for _ in range(i + 1):
segment_indices.append(i)
num_indices = len(segment_indices)
for dtype in dtypes:
with self.test_session(use_gpu=False):
tf_indices, np_indices, tf_x, np_x = self._sparse_input(
shape, num_indices, dtype=dtype)
for np_op1, np_op2, tf_op in ops_list:
if tf_op == math_ops.sparse_segment_mean and dtype not in mean_dtypes:
continue
np_ans = self._sparseSegmentReduce(np_x, np_indices, segment_indices,
np_op1, np_op2)
s = tf_op(data=tf_x, indices=tf_indices, segment_ids=segment_indices)
tf_ans = s.eval()
self.assertAllClose(np_ans, tf_ans)
# NOTE(mrry): The static shape inference that computes
# `tf_ans.shape` can only infer that sizes from dimension 1
# onwards, because the size of dimension 0 is data-dependent
# and may therefore vary dynamically.
self.assertAllEqual(np_ans.shape[1:], tf_ans.shape[1:])
def testSegmentIdsHole(self):
tf_x, np_x = self._input([10, 4], dtype=dtypes_lib.float32)
ops_list = [(np.add, None, math_ops.sparse_segment_sum), (
self._mean_cum_op, self._mean_reduce_op, math_ops.sparse_segment_mean)]
segment_indices = [0, 2, 2, 2]
tf_indices = [8, 3, 0, 9]
with self.test_session(use_gpu=False):
for np_op1, np_op2, tf_op in ops_list:
np_ans = self._sparseSegmentReduce(np_x, tf_indices, segment_indices,
np_op1, np_op2)
s = tf_op(data=tf_x, indices=tf_indices, segment_ids=segment_indices)
tf_ans = s.eval()
self.assertAllClose(np_ans, tf_ans)
def testSegmentIdsGreaterThanZero(self):
tf_x, np_x = self._input([10, 4], dtype=dtypes_lib.float32)
ops_list = [(np.add, None, math_ops.sparse_segment_sum), (
self._mean_cum_op, self._mean_reduce_op, math_ops.sparse_segment_mean)]
segment_indices = [1, 2, 2, 2]
tf_indices = [8, 3, 0, 9]
with self.test_session(use_gpu=False):
for np_op1, np_op2, tf_op in ops_list:
np_ans = self._sparseSegmentReduce(np_x, tf_indices, segment_indices,
np_op1, np_op2)
s = tf_op(data=tf_x, indices=tf_indices, segment_ids=segment_indices)
tf_ans = s.eval()
self.assertAllClose(np_ans, tf_ans)
def testValid(self):
# Baseline for the test*Invalid* methods below.
tf_x, _ = self._input([10, 4], dtype=dtypes_lib.float32)
ops_list = [math_ops.sparse_segment_sum, math_ops.sparse_segment_mean]
segment_indices = [0, 1, 2, 2]
tf_indices = [8, 3, 0, 9]
with self.test_session(use_gpu=False):
for tf_op in ops_list:
s = tf_op(data=tf_x, indices=tf_indices, segment_ids=segment_indices)
s.eval()
def testIndicesInvalid1(self):
tf_x, _ = self._input([10, 4], dtype=dtypes_lib.float32)
ops_list = [math_ops.sparse_segment_sum, math_ops.sparse_segment_mean]
segment_indices = [0, 1, 2, 2]
tf_indices = [8, -1, 0, 9]
with self.test_session(use_gpu=False):
for tf_op in ops_list:
s = tf_op(data=tf_x, indices=tf_indices, segment_ids=segment_indices)
with self.assertRaisesOpError(
r"indices\[1\] == -1 out of range \[0, 10\)"):
s.eval()
def testIndicesInvalid2(self):
tf_x, _ = self._input([10, 4], dtype=dtypes_lib.float32)
ops_list = [math_ops.sparse_segment_sum, math_ops.sparse_segment_mean]
segment_indices = [0, 1, 2, 2]
tf_indices = [8, 3, 0, 10]
with self.test_session(use_gpu=False):
for tf_op in ops_list:
s = tf_op(data=tf_x, indices=tf_indices, segment_ids=segment_indices)
with self.assertRaisesOpError(
r"indices\[3\] == 10 out of range \[0, 10\)"):
s.eval()
def testSegmentsInvalid2(self):
tf_x, _ = self._input([10, 4], dtype=dtypes_lib.float32)
ops_list = [math_ops.sparse_segment_sum, math_ops.sparse_segment_mean]
segment_indices = [0, 1, 0, 1]
tf_indices = [8, 3, 0, 9]
with self.test_session(use_gpu=False):
for tf_op in ops_list:
s = tf_op(data=tf_x, indices=tf_indices, segment_ids=segment_indices)
with self.assertRaisesOpError("segment ids are not increasing"):
s.eval()
def testSegmentsInvalid3(self):
tf_x, _ = self._input([10, 4], dtype=dtypes_lib.float32)
ops_list = [math_ops.sparse_segment_sum, math_ops.sparse_segment_mean]
segment_indices = [0, 1, 2, 0]
tf_indices = [8, 3, 0, 9]
with self.test_session(use_gpu=False):
for tf_op in ops_list:
s = tf_op(data=tf_x, indices=tf_indices, segment_ids=segment_indices)
with self.assertRaisesOpError(
r"Segment id 1 out of range \[0, 1\), possibly because "
"'segment_ids' input is not sorted"):
s.eval()
def testSegmentsInvalid4(self):
tf_x, _ = self._input([10, 4], dtype=dtypes_lib.float32)
ops_list = [math_ops.sparse_segment_sum, math_ops.sparse_segment_mean]
segment_indices = [-1, 0, 1, 1]
tf_indices = [8, 3, 0, 9]
with self.test_session(use_gpu=False):
for tf_op in ops_list:
s = tf_op(data=tf_x, indices=tf_indices, segment_ids=segment_indices)
with self.assertRaisesOpError(
r"Segment id -1 out of range \[0, 2\), possibly because "
"'segment_ids' input is not sorted"):
s.eval()
def testSegmentsInvalid6(self):
tf_x, _ = self._input([10, 4], dtype=dtypes_lib.float32)
ops_list = [math_ops.sparse_segment_sum, math_ops.sparse_segment_mean]
segment_indices = [0, 0, 0, -1]
tf_indices = [8, 3, 0, 9]
with self.test_session(use_gpu=False):
for tf_op in ops_list:
s = tf_op(data=tf_x, indices=tf_indices, segment_ids=segment_indices)
with self.assertRaisesOpError("segment ids must be >= 0"):
s.eval()
def testSegmentsInvalid7(self):
tf_x, _ = self._input([10, 4], dtype=dtypes_lib.float32)
ops_list = [math_ops.sparse_segment_sum, math_ops.sparse_segment_mean]
segment_indices = [0, 0, 0, -2]
tf_indices = [8, 3, 0, 9]
with self.test_session(use_gpu=False):
for tf_op in ops_list:
s = tf_op(data=tf_x, indices=tf_indices, segment_ids=segment_indices)
with self.assertRaisesOpError("segment ids must be >= 0"):
s.eval()
def testGradient(self):
shape = [10, 4]
segment_indices = [0, 1, 2, 2]
num_indices = len(segment_indices)
for tf_op in [math_ops.sparse_segment_sum, math_ops.sparse_segment_mean]:
with self.test_session():
tf_indices, _, tf_x, np_x = self._sparse_input(
shape, num_indices, dtype=dtypes_lib.float64)
s = tf_op(data=tf_x, indices=tf_indices, segment_ids=segment_indices)
jacob_t, jacob_n = gradient_checker.compute_gradient(
tf_x,
shape,
s, [3, 4],
x_init_value=np_x.astype(np.double),
delta=1)
self.assertAllClose(jacob_t, jacob_n)
def testGradientValid(self):
# Baseline for the testGradient*Invalid* methods below.
tf_x, _ = self._input([3, 4], dtype=dtypes_lib.float32)
ops_list = [
math_ops.sparse_segment_mean_grad, math_ops.sparse_segment_sqrt_n_grad
]
segment_indices = [0, 1, 2, 2]
tf_indices = [8, 3, 0, 9]
with self.test_session(use_gpu=False):
for tf_op in ops_list:
s = tf_op(tf_x, tf_indices, segment_indices, 10)
s.eval()
def testGradientIndicesInvalid1(self):
tf_x, _ = self._input([3, 4], dtype=dtypes_lib.float32)
ops_list = [
math_ops.sparse_segment_mean_grad, math_ops.sparse_segment_sqrt_n_grad
]
segment_indices = [0, 1, 2, 2]
tf_indices = [8, 3, 0, 10]
with self.test_session(use_gpu=False):
for tf_op in ops_list:
s = tf_op(tf_x, tf_indices, segment_indices, 10)
with self.assertRaisesOpError(r"Index 10 out of range \[0, 10\)"):
s.eval()
def testGradientIndicesInvalid2(self):
tf_x, _ = self._input([3, 4], dtype=dtypes_lib.float32)
ops_list = [
math_ops.sparse_segment_mean_grad, math_ops.sparse_segment_sqrt_n_grad
]
segment_indices = [0, 1, 2, 2]
tf_indices = [8, 3, -1, 9]
with self.test_session(use_gpu=False):
for tf_op in ops_list:
s = tf_op(tf_x, tf_indices, segment_indices, 10)
with self.assertRaisesOpError(r"Index -1 out of range \[0, 10\)"):
s.eval()
def testGradientSegmentsInvalid1(self):
tf_x, _ = self._input(
[3, 4], dtype=dtypes_lib.float32) # expecting 3 segments
ops_list = [
math_ops.sparse_segment_mean_grad, math_ops.sparse_segment_sqrt_n_grad
]
segment_indices = [0, 1, 1, 1] # 2 segments
tf_indices = [8, 3, 0, 9]
with self.test_session(use_gpu=False):
for tf_op in ops_list:
s = tf_op(tf_x, tf_indices, segment_indices, 10)
with self.assertRaisesOpError("Invalid number of segments"):
s.eval()
def testGradientSegmentsInvalid2(self):
tf_x, _ = self._input([1, 4], dtype=dtypes_lib.float32)
ops_list = [
math_ops.sparse_segment_mean_grad, math_ops.sparse_segment_sqrt_n_grad
]
segment_indices = [0, 1, 2, 0]
tf_indices = [8, 3, 0, 9]
with self.test_session(use_gpu=False):
for tf_op in ops_list:
s = tf_op(tf_x, tf_indices, segment_indices, 10)
with self.assertRaisesOpError(r"Segment id 1 out of range \[0, 1\)"):
s.eval()
def testGradientSegmentsInvalid3(self):
tf_x, _ = self._input([2, 4], dtype=dtypes_lib.float32)
ops_list = [
math_ops.sparse_segment_mean_grad, math_ops.sparse_segment_sqrt_n_grad
]
segment_indices = [-1, 0, 1, 1]
tf_indices = [8, 3, 0, 9]
with self.test_session(use_gpu=False):
for tf_op in ops_list:
s = tf_op(tf_x, tf_indices, segment_indices, 10)
with self.assertRaisesOpError(r"Segment id -1 out of range \[0, 2\)"):
s.eval()
def testGradientSegmentsInvalid4(self):
tf_x, _ = self._input([0, 4], dtype=dtypes_lib.float32)
ops_list = [
math_ops.sparse_segment_mean_grad, math_ops.sparse_segment_sqrt_n_grad
]
segment_indices = [0, 1, 2, -1]
tf_indices = [8, 3, 0, 9]
with self.test_session(use_gpu=False):
for tf_op in ops_list:
s = tf_op(tf_x, tf_indices, segment_indices, 10)
with self.assertRaisesOpError(r"Segment id 0 out of range \[0, 0\)"):
s.eval()
class SegmentReductionOpBenchmark(test.Benchmark):
outer_dim_options = [2**x for x in range(9, 14, 2)]
ratio_options = [2**x for x in range(1, 6, 2)]
inner_dim_options = [2**x for x in range(9, 14, 2)]
# randomly generated sizes with less alignments
inner_dim_options += [
1120, 1215, 1856, 1302, 1329, 1531, 1313, 1672, 1851, 1584
]
dtype_options = [np.float32, np.float64]
options = (outer_dim_options, ratio_options, inner_dim_options, dtype_options)
# pylint: disable=g-long-lambda
op_functors = [lambda vc, vs, seg_ids:
("sorted", math_ops.segment_sum(vc, vs)),
lambda vc, vs, seg_ids:
("unsorted",
math_ops.unsorted_segment_sum(vc, vs, seg_ids[-1]+1))]
# pylint: enable=g-long-lambda
repeat = 10
def _npTypeToStr(self, t):
if t == np.float32:
return "fp32"
if t == np.float64:
return "fp64"
def _runGraph(self, op_functor, outer_dim, ratio, inner_dim, dtype):
output_outer_dim = int(outer_dim / ratio)
const = np.random.randint(5, size=(outer_dim, inner_dim))
seg_ids = np.sort(np.random.randint(output_outer_dim, size=outer_dim))
vs = variables.Variable(seg_ids.astype(np.int32))
with ops.device("/gpu:0"):
vc = variables.Variable(const.astype(dtype))
name, op = op_functor(vc, vs, seg_ids)
with session.Session() as sess:
variables.global_variables_initializer().run()
r = self.run_op_benchmark(
sess,
op,
min_iters=self.repeat,
name="_".join(
map(str,
[name, outer_dim, ratio, inner_dim,
self._npTypeToStr(dtype)])))
return name, r["wall_time"]
def benchmarkSegmentSumGPU(self):
if not test.is_gpu_available(cuda_only=True):
return
for outer_dim, ratio, inner_dim, dtype in itertools.product(*self.options):
op_functor = self.op_functors[0]
with ops.Graph().as_default():
self._runGraph(op_functor, outer_dim, ratio, inner_dim, dtype)
def benchmarkUnsortedSegmentSumGPU(self):
if not test.is_gpu_available(cuda_only=True):
return
for outer_dim, ratio, inner_dim, dtype in itertools.product(*self.options):
op_functor = self.op_functors[1]
with ops.Graph().as_default():
self._runGraph(op_functor, outer_dim, ratio, inner_dim, dtype)
if __name__ == "__main__":
test.main()
| apache-2.0 |
anksp21/Community-Zenpacks | ZenPacks.community.HPMon/ZenPacks/community/HPMon/modeler/plugins/community/snmp/HPExpansionCardMap.py | 2 | 2624 | ################################################################################
#
# This program is part of the HPMon Zenpack for Zenoss.
# Copyright (C) 2008 Egor Puzanov.
#
# This program can be used under the GNU General Public License version 2
# You can find full information here: http://www.zenoss.com/oss
#
################################################################################
__doc__="""HPExpansionCardMap
HPExpansionCardMap maps the cpqSePciSlotTable table to cards objects
$Id: HPExpansionCardMap.py,v 1.1 2009/08/18 16:40:53 egor Exp $"""
__version__ = '$Revision: 1.1 $'[11:-2]
from Products.DataCollector.plugins.CollectorPlugin import SnmpPlugin, GetTableMap
class HPExpansionCardMap(SnmpPlugin):
"""Map HP/Compaq insight manager PCI table to model."""
maptype = "HPExpansionCardMap"
modname = "ZenPacks.community.HPMon.cpqSePciSlot"
relname = "cards"
compname = "hw"
deviceProperties = \
SnmpPlugin.deviceProperties + ('zHPExpansionCardMapIgnorePci','zCollectorPlugins',)
oms = {}
snmpGetTableMaps = (
GetTableMap('cpqSePciSlotTable',
'.1.3.6.1.4.1.232.1.2.13.1.1',
{
'.3': 'slot',
'.5': '_model',
}
),
)
def process(self, device, results, log):
"""collect snmp information from this device"""
log.info('processing %s for device %s', self.name(), device.id)
if not device.id in self.oms:
self.oms[device.id] = []
rm = self.relMap()
ignorepci = getattr(device, 'zHPExpansionCardMapIgnorePci', False)
if not ignorepci:
getdata, tabledata = results
pcimap = {}
pcicardtable = tabledata.get('cpqSePciSlotTable')
for om in self.oms[device.id]:
if om.modname == "ZenPacks.community.HPMon.cpqSiMemModule": continue
pcimap[int(om.slot)] = 1
for oid, card in pcicardtable.iteritems():
try:
om = self.objectMap(card)
om.snmpindex = oid.strip('.')
if int(om.slot) == 0: continue
if int(om.slot) in pcimap: continue
om.id = self.prepId("cpqSePciSlot%d" % om.slot)
om.setProductKey = "%s" % om._model
except AttributeError:
continue
self.oms[device.id].append(om)
for om in self.oms[device.id]:
rm.append(om)
del self.oms[device.id]
return rm
| gpl-2.0 |
dudonwai/dudonsblog | Lib/site-packages/django/middleware/gzip.py | 478 | 1831 | import re
from django.utils.cache import patch_vary_headers
from django.utils.text import compress_sequence, compress_string
re_accepts_gzip = re.compile(r'\bgzip\b')
class GZipMiddleware(object):
"""
This middleware compresses content if the browser allows gzip compression.
It sets the Vary header accordingly, so that caches will base their storage
on the Accept-Encoding header.
"""
def process_response(self, request, response):
# It's not worth attempting to compress really short responses.
if not response.streaming and len(response.content) < 200:
return response
# Avoid gzipping if we've already got a content-encoding.
if response.has_header('Content-Encoding'):
return response
patch_vary_headers(response, ('Accept-Encoding',))
ae = request.META.get('HTTP_ACCEPT_ENCODING', '')
if not re_accepts_gzip.search(ae):
return response
if response.streaming:
# Delete the `Content-Length` header for streaming content, because
# we won't know the compressed size until we stream it.
response.streaming_content = compress_sequence(response.streaming_content)
del response['Content-Length']
else:
# Return the compressed content only if it's actually shorter.
compressed_content = compress_string(response.content)
if len(compressed_content) >= len(response.content):
return response
response.content = compressed_content
response['Content-Length'] = str(len(response.content))
if response.has_header('ETag'):
response['ETag'] = re.sub('"$', ';gzip"', response['ETag'])
response['Content-Encoding'] = 'gzip'
return response
| mit |
gmalecha/loop-to-sat | loop.py | 1 | 6779 | #!/usr/bin/python
#
# This program converts a infinity game board into an smt2 formula.
# Use --help to get a description of the arguments. The input file
# uses the following symbols:
#
# - '0' represents a blank
# - '1' represents one connection
# - '|' represents a line
# - 'L' represents an angle connector
# - 'T' represents a connector with three outputs
# - '+' represents a connector with four outputs
#
# The file should have one line for each row in the puzzle. All of
# the lines should be the same length.
#
import os
def isTrue(x):
return x
def isFalse(x):
return "(not %s)" % x
def allof(xs):
return '(and %s)' % ' '.join(xs)
def oneof(xs):
return '(or %s)' % ' '.join(xs)
def edge(i):
return 'edge_%d' % i
def up(row, col):
return edge(row * (2*COLS + 1) + col)
def down(row, col):
return edge((row + 1) * (2*COLS + 1) + col)
def left(row, col):
return edge(row * (2*COLS + 1) + COLS + col)
def right(row, col):
return edge(row * (2*COLS + 1) + COLS + col + 1)
def constraint0(up, down, left, right):
return allof([isFalse(up), isFalse(down), isFalse(left), isFalse(right)])
def constraint1(up, down, left, right):
return oneof([ allof([isTrue(up), isFalse(down), isFalse(left), isFalse(right)])
, allof([isFalse(up), isTrue(down), isFalse(left), isFalse(right)])
, allof([isFalse(up), isFalse(down), isTrue(left), isFalse(right)])
, allof([isFalse(up), isFalse(down), isFalse(left), isTrue(right)]) ])
def constraint_line(up, down, left, right):
return oneof([ allof([isFalse(up), isFalse(down),
isTrue(left), isTrue(right)])
, allof([isTrue(up), isTrue(down),
isFalse(left), isFalse(right)]) ])
def constraint_elbow(up, down, left, right):
return oneof([ allof([isTrue(up), isTrue(left),
isFalse(down), isFalse(right)])
, allof([isTrue(up), isTrue(right),
isFalse(down), isFalse(left)])
, allof([isTrue(down), isTrue(left),
isFalse(up), isFalse(right)])
, allof([isTrue(down), isTrue(right),
isFalse(up), isFalse(left)]) ])
def constraint3(up, down, left, right):
return oneof([ allof([ isTrue(up), isTrue(down), isTrue(left), isFalse(right)])
, allof([ isTrue(up), isTrue(down), isFalse(left), isTrue(right)])
, allof([ isTrue(left), isTrue(right), isTrue(up), isFalse(down)])
, allof([ isTrue(left), isTrue(right), isFalse(up), isTrue(down)]) ])
def constraint4(up, down, left, right):
return allof([ isTrue(up), isTrue(down), isTrue(left), isTrue(right) ])
def read_board(inf):
lines = [x.replace('\r', '').replace('\n','') for x in inf.readlines()]
while lines[-1].strip() == '':
lines = lines[:-1]
for x in lines:
assert len(lines[0]) == len(x)
return lines
def to_sat(inst):
global ROWS, COLS
ROWS = len(inst)
COLS = len(inst[0])
result = []
all_vars = set([])
for i in range(0, ROWS):
for j in range(0, COLS):
all_vars = all_vars.union(set([up(i,j), down(i,j), left(i,j), right(i,j)]))
if inst[i][j] == '0':
result.append(constraint0(up(i,j), down(i,j), left(i,j), right(i,j)))
elif inst[i][j] == '1':
result.append(constraint1(up(i,j), down(i,j), left(i,j), right(i,j)))
elif inst[i][j] == '|':
result.append(constraint_line(up(i,j), down(i,j), left(i,j), right(i,j)))
elif inst[i][j] == 'L':
result.append(constraint_elbow(up(i,j), down(i,j), left(i,j), right(i,j)))
elif inst[i][j] == 'T':
result.append(constraint3(up(i,j), down(i,j), left(i,j), right(i,j)))
elif inst[i][j] == '+':
result.append(constraint4(up(i,j), down(i,j), left(i,j), right(i,j)))
else:
assert False
for i in range(0, ROWS):
result.append(isFalse(left(i,0)))
result.append(isFalse(right(i,COLS-1)))
for i in range(0, COLS):
result.append(isFalse(up(0, i)))
result.append(isFalse(down(ROWS-1, i)))
return (all_vars, result)
def to_z3(out, vs, csts):
for x in vs:
out.write('(declare-fun %s () Bool)\n' % x)
for c in csts:
out.write('(assert %s)\n' % c)
out.write('(check-sat)\n')
out.write('(get-model)')
import re
def read_model(inf):
ptrn = re.compile(r'\(define-fun (edge_[0-9]+)\s+\(\)\s+Bool\s+(false|true)\)')
result = {}
for (i,v) in ptrn.findall(inf):
result[i] = v=='true'
return result
import subprocess
def solve(board, dump=None):
(r,w) = os.pipe()
(vs, csts) = to_sat(board)
to_z3(os.fdopen(w, 'w'), vs, csts)
if not dump is None:
to_z3(dump, vs, csts)
dump.write('\r\n')
raw_result = subprocess.check_output(['z3', '-smt2', '-in'],
stdin=r)
return read_model(raw_result)
# right left down up
TABLE = { 0b0000 : ' '
, 0b0001 : unichr(0x2579)
, 0b0010 : unichr(0x257B)
, 0b0011 : unichr(0x2503)
, 0b0100 : unichr(0x2578)
, 0b0101 : unichr(0x251B)
, 0b0110 : unichr(0x2513)
, 0b0111 : unichr(0x252B)
, 0b1000 : unichr(0x257A)
, 0b1001 : unichr(0x2517)
, 0b1010 : unichr(0x250F)
, 0b1011 : unichr(0x2523)
, 0b1100 : unichr(0x2501)
, 0b1101 : unichr(0x253B)
, 0b1110 : unichr(0x2533)
, 0b1111 : unichr(0x254B) }
def print_board(out, rows, cols, at_intersect):
for r in range(0,rows):
for c in range(0,cols):
val = int(at_intersect[up(r,c)]) \
| int(at_intersect[down(r,c)])*2 \
| int(at_intersect[left(r,c)])*4 \
| int(at_intersect[right(r,c)])*8
out.write(TABLE[val])
out.write('\r\n')
import sys
import argparse
if __name__ == '__main__':
parser = argparse.ArgumentParser('loop.py')
parser.add_argument('--in', default=False,
action='store_true', help='read from standard input')
parser.add_argument('--dump', action='store_const',
const=sys.stderr, help='dump smt problem to standard error')
parser.add_argument('file', help='the file the read the problem from')
res = vars(parser.parse_args(sys.argv[1:]))
if res['in']:
board = read_board(sys.stdin)
else:
board = read_board(file(res['file']))
answer = solve(board, dump=res['dump'])
print_board(sys.stdout, ROWS, COLS, answer)
| mit |
suhe/odoo | addons/website_mail_channel/models/mail_channel.py | 30 | 1083 | # -*- coding: utf-8 -*-
from openerp import api
from openerp.osv import osv
from openerp.tools.safe_eval import safe_eval as eval
from openerp.addons.website.models.website import slug
class MailGroup(osv.Model):
_inherit = 'mail.channel'
@api.cr_uid_ids_context
def message_get_email_values(self, cr, uid, id, notif_mail=None, context=None):
res = super(MailGroup, self).message_get_email_values(cr, uid, id, notif_mail=notif_mail, context=context)
group = self.browse(cr, uid, id, context=context)
base_url = self.pool['ir.config_parameter'].get_param(cr, uid, 'web.base.url')
headers = {}
if res.get('headers'):
try:
headers = eval(res['headers'])
except Exception:
pass
headers.update({
'List-Archive': '<%s/groups/%s>' % (base_url, slug(group)),
'List-Subscribe': '<%s/groups>' % (base_url),
'List-Unsubscribe': '<%s/groups?unsubscribe>' % (base_url,),
})
res['headers'] = repr(headers)
return res
| gpl-3.0 |
amirrpp/django-oscar | src/oscar/apps/payment/abstract_models.py | 32 | 11209 | from decimal import Decimal
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
from django.utils.translation import ugettext_lazy as _
from oscar.core.utils import get_default_currency
from oscar.core.compat import AUTH_USER_MODEL
from oscar.templatetags.currency_filters import currency
from oscar.models.fields import AutoSlugField
from . import bankcards
@python_2_unicode_compatible
class AbstractTransaction(models.Model):
"""
A transaction for a particular payment source.
These are similar to the payment events within the order app but model a
slightly different aspect of payment. Crucially, payment sources and
transactions have nothing to do with the lines of the order while payment
events do.
For example:
* A 'pre-auth' with a bankcard gateway
* A 'settle' with a credit provider (see django-oscar-accounts)
"""
source = models.ForeignKey(
'payment.Source', related_name='transactions',
verbose_name=_("Source"))
# We define some sample types but don't constrain txn_type to be one of
# these as there will be domain-specific ones that we can't anticipate
# here.
AUTHORISE, DEBIT, REFUND = 'Authorise', 'Debit', 'Refund'
txn_type = models.CharField(_("Type"), max_length=128, blank=True)
amount = models.DecimalField(_("Amount"), decimal_places=2, max_digits=12)
reference = models.CharField(_("Reference"), max_length=128, blank=True)
status = models.CharField(_("Status"), max_length=128, blank=True)
date_created = models.DateTimeField(_("Date Created"), auto_now_add=True)
def __str__(self):
return _(u"%(type)s of %(amount).2f") % {
'type': self.txn_type,
'amount': self.amount}
class Meta:
abstract = True
app_label = 'payment'
ordering = ['-date_created']
verbose_name = _("Transaction")
verbose_name_plural = _("Transactions")
@python_2_unicode_compatible
class AbstractSource(models.Model):
"""
A source of payment for an order.
This is normally a credit card which has been pre-authed for the order
amount, but some applications will allow orders to be paid for using
multiple sources such as cheque, credit accounts, gift cards. Each payment
source will have its own entry.
This source object tracks how much money has been authorised, debited and
refunded, which is useful when payment takes place in multiple stages.
"""
order = models.ForeignKey(
'order.Order', related_name='sources', verbose_name=_("Order"))
source_type = models.ForeignKey(
'payment.SourceType', verbose_name=_("Source Type"),
related_name="sources")
currency = models.CharField(
_("Currency"), max_length=12, default=get_default_currency)
# Track the various amounts associated with this source
amount_allocated = models.DecimalField(
_("Amount Allocated"), decimal_places=2, max_digits=12,
default=Decimal('0.00'))
amount_debited = models.DecimalField(
_("Amount Debited"), decimal_places=2, max_digits=12,
default=Decimal('0.00'))
amount_refunded = models.DecimalField(
_("Amount Refunded"), decimal_places=2, max_digits=12,
default=Decimal('0.00'))
# Reference number for this payment source. This is often used to look up
# a transaction model for a particular payment partner.
reference = models.CharField(_("Reference"), max_length=128, blank=True)
# A customer-friendly label for the source, eg XXXX-XXXX-XXXX-1234
label = models.CharField(_("Label"), max_length=128, blank=True)
# A dictionary of submission data that is stored as part of the
# checkout process, where we need to pass an instance of this class around
submission_data = None
# We keep a list of deferred transactions that are only actually saved when
# the source is saved for the first time
deferred_txns = None
class Meta:
abstract = True
app_label = 'payment'
verbose_name = _("Source")
verbose_name_plural = _("Sources")
def __str__(self):
description = _("Allocation of %(amount)s from type %(type)s") % {
'amount': currency(self.amount_allocated, self.currency),
'type': self.source_type}
if self.reference:
description += _(" (reference: %s)") % self.reference
return description
def save(self, *args, **kwargs):
super(AbstractSource, self).save(*args, **kwargs)
if self.deferred_txns:
for txn in self.deferred_txns:
self._create_transaction(*txn)
def create_deferred_transaction(self, txn_type, amount, reference=None,
status=None):
"""
Register the data for a transaction that can't be created yet due to FK
constraints. This happens at checkout where create an payment source
and a transaction but can't save them until the order model exists.
"""
if self.deferred_txns is None:
self.deferred_txns = []
self.deferred_txns.append((txn_type, amount, reference, status))
def _create_transaction(self, txn_type, amount, reference='',
status=''):
self.transactions.create(
txn_type=txn_type, amount=amount,
reference=reference, status=status)
# =======
# Actions
# =======
def allocate(self, amount, reference='', status=''):
"""
Convenience method for ring-fencing money against this source
"""
self.amount_allocated += amount
self.save()
self._create_transaction(
AbstractTransaction.AUTHORISE, amount, reference, status)
allocate.alters_data = True
def debit(self, amount=None, reference='', status=''):
"""
Convenience method for recording debits against this source
"""
if amount is None:
amount = self.balance
self.amount_debited += amount
self.save()
self._create_transaction(
AbstractTransaction.DEBIT, amount, reference, status)
debit.alters_data = True
def refund(self, amount, reference='', status=''):
"""
Convenience method for recording refunds against this source
"""
self.amount_refunded += amount
self.save()
self._create_transaction(
AbstractTransaction.REFUND, amount, reference, status)
refund.alters_data = True
# ==========
# Properties
# ==========
@property
def balance(self):
"""
Return the balance of this source
"""
return (self.amount_allocated - self.amount_debited +
self.amount_refunded)
@property
def amount_available_for_refund(self):
"""
Return the amount available to be refunded
"""
return self.amount_debited - self.amount_refunded
@python_2_unicode_compatible
class AbstractSourceType(models.Model):
"""
A type of payment source.
This could be an external partner like PayPal or DataCash,
or an internal source such as a managed account.
"""
name = models.CharField(_("Name"), max_length=128)
code = AutoSlugField(
_("Code"), max_length=128, populate_from='name', unique=True,
help_text=_("This is used within forms to identify this source type"))
class Meta:
abstract = True
app_label = 'payment'
verbose_name = _("Source Type")
verbose_name_plural = _("Source Types")
def __str__(self):
return self.name
@python_2_unicode_compatible
class AbstractBankcard(models.Model):
"""
Model representing a user's bankcard. This is used for two purposes:
1. The bankcard form will return an instance of this model that can be
used with payment gateways. In this scenario, the instance will
have additional attributes (start_date, issue_number, ccv) that
payment gateways need but that we don't save.
2. To keep a record of a user's bankcards and allow them to be
re-used. This is normally done using the 'partner reference'.
.. warning::
Some of the fields of this model (name, expiry_date) are considered
"cardholder data" under PCI DSS v2. Hence, if you use this model and
store those fields then the requirements for PCI compliance will be
more stringent.
"""
user = models.ForeignKey(AUTH_USER_MODEL, related_name='bankcards',
verbose_name=_("User"))
card_type = models.CharField(_("Card Type"), max_length=128)
# Often you don't actually need the name on the bankcard
name = models.CharField(_("Name"), max_length=255, blank=True)
# We store an obfuscated version of the card number, just showing the last
# 4 digits.
number = models.CharField(_("Number"), max_length=32)
# We store a date even though only the month is visible. Bankcards are
# valid until the last day of the month.
expiry_date = models.DateField(_("Expiry Date"))
# For payment partners who are storing the full card details for us
partner_reference = models.CharField(
_("Partner Reference"), max_length=255, blank=True)
# Temporary data not persisted to the DB
start_date = None
issue_number = None
ccv = None
def __str__(self):
return _(u"%(card_type)s %(number)s (Expires: %(expiry)s)") % {
'card_type': self.card_type,
'number': self.number,
'expiry': self.expiry_month()}
def __init__(self, *args, **kwargs):
# Pop off the temporary data
self.start_date = kwargs.pop('start_date', None)
self.issue_number = kwargs.pop('issue_number', None)
self.ccv = kwargs.pop('ccv', None)
super(AbstractBankcard, self).__init__(*args, **kwargs)
# Initialise the card-type
if self.id is None:
self.card_type = bankcards.bankcard_type(self.number)
if self.card_type is None:
self.card_type = 'Unknown card type'
class Meta:
abstract = True
app_label = 'payment'
verbose_name = _("Bankcard")
verbose_name_plural = _("Bankcards")
def save(self, *args, **kwargs):
if not self.number.startswith('X'):
self.prepare_for_save()
super(AbstractBankcard, self).save(*args, **kwargs)
def prepare_for_save(self):
# This is the first time this card instance is being saved. We
# remove all sensitive data
self.number = u"XXXX-XXXX-XXXX-%s" % self.number[-4:]
self.start_date = self.issue_number = self.ccv = None
@property
def cvv(self):
return self.ccv
@property
def obfuscated_number(self):
return u'XXXX-XXXX-XXXX-%s' % self.number[-4:]
def start_month(self, format='%m/%y'):
return self.start_date.strftime(format)
def expiry_month(self, format='%m/%y'):
return self.expiry_date.strftime(format)
| bsd-3-clause |
dymkowsk/mantid | scripts/test/SANS/gui_logic/run_tab_presenter_test.py | 1 | 13302 |
from __future__ import (absolute_import, division, print_function)
import unittest
import sys
import mantid
from mantid.kernel import config
from mantid.kernel import PropertyManagerDataService
from sans.gui_logic.presenter.run_tab_presenter import RunTabPresenter
from sans.common.enums import (SANSFacility, ReductionDimensionality, SaveType, OutputMode, ISISReductionMode,
RangeStepType, FitType)
from sans.test_helper.user_file_test_helper import (create_user_file, sample_user_file)
from sans.test_helper.mock_objects import (create_mock_view)
from sans.test_helper.common import (remove_file, save_to_csv)
if sys.version_info.major == 3:
from unittest import mock
else:
import mock
BATCH_FILE_TEST_CONTENT_1 = "# MANTID_BATCH_FILE add more text here\n" \
"sample_sans,1,sample_trans,2,sample_direct_beam,3," \
"output_as,test_file,user_file,user_test_file\n" \
"sample_sans,1,can_sans,2,output_as,test_file2\n"
BATCH_FILE_TEST_CONTENT_2 = "# MANTID_BATCH_FILE add more text here\n" \
"sample_sans,SANS2D00022024,sample_trans,SANS2D00022048," \
"sample_direct_beam,SANS2D00022048,output_as,test_file\n" \
"sample_sans,SANS2D00022024,output_as,test_file2\n"
class RunTabPresenterTest(unittest.TestCase):
def setUp(self):
config.setFacility("ISIS")
config.setString("default.instrument", "SANS2D")
def test_that_will_load_user_file(self):
# Setup presenter and mock view
user_file_path = create_user_file(sample_user_file)
view, settings_diagnostic_tab, _ = create_mock_view(user_file_path)
presenter = RunTabPresenter(SANSFacility.ISIS)
presenter.set_view(view)
# Act
presenter.on_user_file_load()
# Assert
# Note that the event slices are not set in the user file
self.assertFalse(view.event_slices)
self.assertTrue(view.reduction_dimensionality is ReductionDimensionality.OneDim)
self.assertTrue(view.save_types[0] is SaveType.NXcanSAS)
self.assertTrue(view.zero_error_free)
self.assertTrue(view.use_optimizations)
self.assertTrue(view.reduction_mode is ISISReductionMode.LAB)
self.assertTrue(view.merge_scale == 1.)
self.assertTrue(view.merge_shift == 0.)
self.assertFalse(view.merge_scale_fit)
self.assertFalse(view.merge_shift_fit)
self.assertTrue(view.event_binning == "7000.0,500.0,60000.0")
self.assertTrue(view.wavelength_step_type is RangeStepType.Lin)
self.assertTrue(view.wavelength_min == 1.5)
self.assertTrue(view.wavelength_max == 12.5)
self.assertTrue(view.wavelength_step == 0.125)
self.assertTrue(view.absolute_scale == 0.074)
self.assertTrue(view.z_offset == 53.)
self.assertTrue(view.normalization_incident_monitor == 1)
self.assertTrue(view.normalization_interpolate)
self.assertTrue(view.transmission_incident_monitor == 1)
self.assertTrue(view.transmission_interpolate)
self.assertTrue(view.transmission_roi_files == "test2.xml")
self.assertTrue(view.transmission_mask_files == "test4.xml")
self.assertTrue(view.transmission_radius == 7.)
self.assertTrue(view.transmission_monitor == 4)
self.assertTrue(view.transmission_mn_shift == -70)
self.assertTrue(view.transmission_sample_use_fit)
self.assertTrue(view.transmission_sample_fit_type is FitType.Logarithmic)
self.assertTrue(view.transmission_sample_polynomial_order == 2)
self.assertTrue(view.transmission_sample_wavelength_min == 1.5)
self.assertTrue(view.transmission_sample_wavelength_max == 12.5)
self.assertTrue(view.transmission_sample_use_wavelength)
self.assertFalse(view.pixel_adjustment_det_1)
self.assertFalse(view.pixel_adjustment_det_2)
self.assertFalse(view.wavelength_adjustment_det_1)
self.assertFalse(view.wavelength_adjustment_det_2)
self.assertTrue(view.q_1d_min_or_rebin_string == "0.001,0.001,0.0126,-0.08,0.2")
self.assertTrue(view.q_xy_max == 0.05)
self.assertTrue(view.q_xy_step == 0.001)
self.assertTrue(view.q_xy_step_type == RangeStepType.Lin)
self.assertTrue(view.gravity_on_off)
self.assertTrue(view.use_q_resolution)
self.assertTrue(view.q_resolution_sample_a == 14.)
self.assertTrue(view.q_resolution_source_a == 13.)
self.assertTrue(view.q_resolution_delta_r == 11.)
self.assertTrue(view.q_resolution_collimation_length == 12.)
self.assertTrue(view.q_resolution_moderator_file == "moderator_rkh_file.txt")
self.assertFalse(view.phi_limit_use_mirror)
self.assertTrue(view.radius_limit_min == 12.)
self.assertTrue(view.radius_limit_min == 12.)
self.assertTrue(view.radius_limit_max == 15.)
# Assert certain function calls
self.assertTrue(view.get_user_file_path.call_count == 3)
self.assertTrue(view.get_batch_file_path.call_count == 2) # called twice for the sub presenter updates (masking table and settings diagnostic tab) # noqa
self.assertTrue(view.get_cell.call_count == 60)
self.assertTrue(view.get_number_of_rows.call_count == 6)
# clean up
remove_file(user_file_path)
def test_fails_silently_when_user_file_does_not_exist(self):
view, _, _ = create_mock_view("non_existent_user_file")
presenter = RunTabPresenter(SANSFacility.ISIS)
presenter.set_view(view)
try:
presenter.on_user_file_load()
has_raised = False
except: # noqa
has_raised = True
self.assertFalse(has_raised)
def test_that_loads_batch_file_and_places_it_into_table(self):
# Arrange
batch_file_path, user_file_path, presenter, view = self._get_files_and_mock_presenter(BATCH_FILE_TEST_CONTENT_1)
# Act
presenter.on_batch_file_load()
# Assert
self.assertTrue(view.add_row.call_count == 2)
expected_first_row = "SampleScatter:1,ssp:,SampleTrans:2,stp:,SampleDirect:3,sdp:," \
"CanScatter:,csp:,CanTrans:,ctp:,CanDirect:,cdp:,OutputName:test_file"
expected_second_row = "SampleScatter:1,ssp:,SampleTrans:,stp:,SampleDirect:,sdp:," \
"CanScatter:2,csp:,CanTrans:,ctp:,CanDirect:,cdp:,OutputName:test_file2"
calls = [mock.call(expected_first_row), mock.call(expected_second_row)]
view.add_row.assert_has_calls(calls)
# Clean up
self._remove_files(user_file_path=user_file_path, batch_file_path=batch_file_path)
def test_fails_silently_when_batch_file_does_not_exist(self):
presenter = RunTabPresenter(SANSFacility.ISIS)
user_file_path = create_user_file(sample_user_file)
view, settings_diagnostic_tab, masking_table = create_mock_view(user_file_path, "non_existent_batch_file")
presenter.set_view(view)
try:
presenter.on_batch_file_load()
has_raised = False
except: # noqa
has_raised = True
self.assertFalse(has_raised)
# Clean up
self._remove_files(user_file_path=user_file_path)
def test_that_gets_states_from_view(self):
# Arrange
batch_file_path, user_file_path, presenter, _ = self._get_files_and_mock_presenter(BATCH_FILE_TEST_CONTENT_2)
presenter.on_user_file_load()
presenter.on_batch_file_load()
# Act
states = presenter.get_states()
# Assert
self.assertTrue(len(states) == 2)
for _, state in states.items():
try:
state.validate()
has_raised = False
except: # noqa
has_raised = True
self.assertFalse(has_raised)
# Check state 0
state0 = states[0]
self.assertTrue(state0.data.sample_scatter == "SANS2D00022024")
self.assertTrue(state0.data.sample_transmission == "SANS2D00022048")
self.assertTrue(state0.data.sample_direct == "SANS2D00022048")
self.assertTrue(state0.data.can_scatter is None)
self.assertTrue(state0.data.can_transmission is None)
self.assertTrue(state0.data.can_direct is None)
# Check state 1
state1 = states[1]
self.assertTrue(state1.data.sample_scatter == "SANS2D00022024")
self.assertTrue(state1.data.sample_transmission is None)
self.assertTrue(state1.data.sample_direct is None)
self.assertTrue(state1.data.can_scatter is None)
self.assertTrue(state1.data.can_transmission is None)
self.assertTrue(state1.data.can_direct is None)
# Check some entries
self.assertTrue(state0.slice.start_time is None)
self.assertTrue(state0.slice.end_time is None)
self.assertTrue(state0.reduction.reduction_dimensionality is ReductionDimensionality.OneDim)
# Clean up
self._remove_files(user_file_path=user_file_path, batch_file_path=batch_file_path)
def test_that_can_get_state_for_index_if_index_exists(self):
# Arrange
batch_file_path, user_file_path, presenter, _ = self._get_files_and_mock_presenter(BATCH_FILE_TEST_CONTENT_2)
presenter.on_user_file_load()
presenter.on_batch_file_load()
# Act
state = presenter.get_state_for_row(1)
# Assert
self.assertTrue(state.data.sample_scatter == "SANS2D00022024")
self.assertTrue(state.data.sample_transmission is None)
self.assertTrue(state.data.sample_direct is None)
self.assertTrue(state.data.can_scatter is None)
self.assertTrue(state.data.can_transmission is None)
self.assertTrue(state.data.can_direct is None)
# Clean up
self._remove_files(user_file_path=user_file_path, batch_file_path=batch_file_path)
def test_that_returns_none_when_index_does_not_exist(self):
# Arrange
batch_file_path = save_to_csv(BATCH_FILE_TEST_CONTENT_2)
user_file_path = create_user_file(sample_user_file)
view, _, _ = create_mock_view(user_file_path, batch_file_path)
presenter = RunTabPresenter(SANSFacility.ISIS)
presenter.set_view(view)
presenter.on_user_file_load()
presenter.on_batch_file_load()
# Act
state = presenter.get_state_for_row(3)
# Assert
self.assertTrue(state is None)
# Clean up
remove_file(batch_file_path)
remove_file(user_file_path)
def test_that_populates_the_property_manager_data_service_when_processing_is_called(self):
# Arrange
self._clear_property_manager_data_service()
batch_file_path, user_file_path, presenter, _ = self._get_files_and_mock_presenter(BATCH_FILE_TEST_CONTENT_2)
# This is not the nicest of tests, but better to test this functionality than not
presenter.on_user_file_load()
presenter.on_batch_file_load()
# Act
presenter.on_processed_clicked()
# Assert
# We should have two states in the PropertyManagerDataService
self.assertTrue(len(PropertyManagerDataService.getObjectNames()) == 2)
# clean up
self._remove_files(user_file_path=user_file_path, batch_file_path=batch_file_path)
self._clear_property_manager_data_service()
def test_that_can_add_new_masks(self):
# Arrange
self._clear_property_manager_data_service()
batch_file_path, user_file_path, presenter, _ = self._get_files_and_mock_presenter(BATCH_FILE_TEST_CONTENT_2)
presenter.on_user_file_load()
presenter.on_batch_file_load()
# Act
presenter.on_mask_file_add()
# Assert
state = presenter.get_state_for_row(0)
mask_info = state.mask
mask_files = mask_info.mask_files
self.assertTrue(mask_files == [user_file_path])
# clean up
self._remove_files(user_file_path=user_file_path, batch_file_path=batch_file_path)
@staticmethod
def _clear_property_manager_data_service():
for element in PropertyManagerDataService.getObjectNames():
if PropertyManagerDataService.doesExist(element):
PropertyManagerDataService.remove(element)
@staticmethod
def _get_files_and_mock_presenter(content):
batch_file_path = save_to_csv(content)
user_file_path = create_user_file(sample_user_file)
view, _, _ = create_mock_view(user_file_path, batch_file_path)
# We just use the sample_user_file since it exists.
view.get_mask_file = mock.MagicMock(return_value=user_file_path)
presenter = RunTabPresenter(SANSFacility.ISIS)
presenter.set_view(view)
return batch_file_path, user_file_path, presenter, view
@staticmethod
def _remove_files(user_file_path=None, batch_file_path=None):
if user_file_path:
remove_file(user_file_path)
if batch_file_path:
remove_file(batch_file_path)
if __name__ == '__main__':
unittest.main()
| gpl-3.0 |
leppa/home-assistant | homeassistant/components/roku/media_player.py | 2 | 6134 | """Support for the Roku media player."""
import logging
import requests.exceptions
from roku import Roku
from homeassistant.components.media_player import MediaPlayerDevice
from homeassistant.components.media_player.const import (
MEDIA_TYPE_MOVIE,
SUPPORT_NEXT_TRACK,
SUPPORT_PLAY,
SUPPORT_PLAY_MEDIA,
SUPPORT_PREVIOUS_TRACK,
SUPPORT_SELECT_SOURCE,
SUPPORT_TURN_OFF,
SUPPORT_TURN_ON,
SUPPORT_VOLUME_MUTE,
SUPPORT_VOLUME_SET,
)
from homeassistant.const import (
CONF_HOST,
STATE_HOME,
STATE_IDLE,
STATE_PLAYING,
STATE_STANDBY,
)
DEFAULT_PORT = 8060
_LOGGER = logging.getLogger(__name__)
SUPPORT_ROKU = (
SUPPORT_PREVIOUS_TRACK
| SUPPORT_NEXT_TRACK
| SUPPORT_PLAY_MEDIA
| SUPPORT_VOLUME_SET
| SUPPORT_VOLUME_MUTE
| SUPPORT_SELECT_SOURCE
| SUPPORT_PLAY
| SUPPORT_TURN_ON
| SUPPORT_TURN_OFF
)
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
"""Set up the Roku platform."""
if not discovery_info:
return
host = discovery_info[CONF_HOST]
async_add_entities([RokuDevice(host)], True)
class RokuDevice(MediaPlayerDevice):
"""Representation of a Roku device on the network."""
def __init__(self, host):
"""Initialize the Roku device."""
self.roku = Roku(host)
self.ip_address = host
self.channels = []
self.current_app = None
self._device_info = {}
self._power_state = "Unknown"
def update(self):
"""Retrieve latest state."""
try:
self._device_info = self.roku.device_info
self._power_state = self.roku.power_state
self.ip_address = self.roku.host
self.channels = self.get_source_list()
if self.roku.current_app is not None:
self.current_app = self.roku.current_app
else:
self.current_app = None
except (requests.exceptions.ConnectionError, requests.exceptions.ReadTimeout):
pass
def get_source_list(self):
"""Get the list of applications to be used as sources."""
return ["Home"] + sorted(channel.name for channel in self.roku.apps)
@property
def should_poll(self):
"""Device should be polled."""
return True
@property
def name(self):
"""Return the name of the device."""
if self._device_info.user_device_name:
return self._device_info.user_device_name
return f"Roku {self._device_info.serial_num}"
@property
def state(self):
"""Return the state of the device."""
if self._power_state == "Off":
return STATE_STANDBY
if self.current_app is None:
return None
if self.current_app.name == "Power Saver" or self.current_app.is_screensaver:
return STATE_IDLE
if self.current_app.name == "Roku":
return STATE_HOME
if self.current_app.name is not None:
return STATE_PLAYING
return None
@property
def supported_features(self):
"""Flag media player features that are supported."""
return SUPPORT_ROKU
@property
def unique_id(self):
"""Return a unique, HASS-friendly identifier for this entity."""
return self._device_info.serial_num
@property
def media_content_type(self):
"""Content type of current playing media."""
if self.current_app is None:
return None
if self.current_app.name == "Power Saver":
return None
if self.current_app.name == "Roku":
return None
return MEDIA_TYPE_MOVIE
@property
def media_image_url(self):
"""Image url of current playing media."""
if self.current_app is None:
return None
if self.current_app.name == "Roku":
return None
if self.current_app.name == "Power Saver":
return None
if self.current_app.id is None:
return None
return "http://{0}:{1}/query/icon/{2}".format(
self.ip_address, DEFAULT_PORT, self.current_app.id
)
@property
def app_name(self):
"""Name of the current running app."""
if self.current_app is not None:
return self.current_app.name
@property
def app_id(self):
"""Return the ID of the current running app."""
if self.current_app is not None:
return self.current_app.id
@property
def source(self):
"""Return the current input source."""
if self.current_app is not None:
return self.current_app.name
@property
def source_list(self):
"""List of available input sources."""
return self.channels
def turn_on(self):
"""Turn on the Roku."""
self.roku.poweron()
def turn_off(self):
"""Turn off the Roku."""
self.roku.poweroff()
def media_play_pause(self):
"""Send play/pause command."""
if self.current_app is not None:
self.roku.play()
def media_previous_track(self):
"""Send previous track command."""
if self.current_app is not None:
self.roku.reverse()
def media_next_track(self):
"""Send next track command."""
if self.current_app is not None:
self.roku.forward()
def mute_volume(self, mute):
"""Mute the volume."""
if self.current_app is not None:
self.roku.volume_mute()
def volume_up(self):
"""Volume up media player."""
if self.current_app is not None:
self.roku.volume_up()
def volume_down(self):
"""Volume down media player."""
if self.current_app is not None:
self.roku.volume_down()
def select_source(self, source):
"""Select input source."""
if self.current_app is not None:
if source == "Home":
self.roku.home()
else:
channel = self.roku[source]
channel.launch()
| apache-2.0 |
yanlookwatchsee/odoo_management_script | dpsync.py | 1 | 1203 | #!/usr/bin/python
import subprocess as sp
import os
from dropbox import client, rest, session
class DpClient:
def login(self):
self.DP_PASS_FILE = '/home/ubuntu/.dropboxpass'
with open(self.DP_PASS_FILE) as f:
l = f.read().split()
self.app_key, self.app_secret = l[0], l[1]
self.flow = client.DropboxOAuth2FlowNoRedirect(self.app_key, self.app_secret)
authorize_url = self.flow.start()
print "1. Go to: " + authorize_url
print "2. Click \"Allow\" (you might have to log in first)."
print "3. Copy the authorization code."
code = raw_input("Enter the authorization code here: ").strip()
access_token, user_id = self.flow.finish(code)
with open(self.TOKEN_FILE, 'w') as f:
f.write(access_token)
def __init__(self):
self.TOKEN_FILE = '/etc/odoo/.dp_token_file'
try:
with open(self.TOKEN_FILE, 'r') as f:
access_token = f.read().strip()
self.api_client = client.DropboxClient(access_token)
except:
raise
self.login()
def put(self, path):
with open(path, 'rb') as from_file:
self.api_client.put_file(path.split('/')[-1], from_file)
def delete(self, path):
self.api_client.file_delete(path)
if __name__ == '__main__':
c = DpClient()
| apache-2.0 |
vinayan3/clpricehistory | django/conf/__init__.py | 146 | 6707 | """
Settings and configuration for Django.
Values will be read from the module specified by the DJANGO_SETTINGS_MODULE environment
variable, and then from django.conf.global_settings; see the global settings file for
a list of all possible variables.
"""
import os
import re
import time # Needed for Windows
import warnings
from django.conf import global_settings
from django.utils.functional import LazyObject
from django.utils import importlib
ENVIRONMENT_VARIABLE = "DJANGO_SETTINGS_MODULE"
class LazySettings(LazyObject):
"""
A lazy proxy for either global Django settings or a custom settings object.
The user can manually configure settings prior to using them. Otherwise,
Django uses the settings module pointed to by DJANGO_SETTINGS_MODULE.
"""
def _setup(self):
"""
Load the settings module pointed to by the environment variable. This
is used the first time we need any settings at all, if the user has not
previously configured the settings manually.
"""
try:
settings_module = os.environ[ENVIRONMENT_VARIABLE]
if not settings_module: # If it's set but is an empty string.
raise KeyError
except KeyError:
# NOTE: This is arguably an EnvironmentError, but that causes
# problems with Python's interactive help.
raise ImportError("Settings cannot be imported, because environment variable %s is undefined." % ENVIRONMENT_VARIABLE)
self._wrapped = Settings(settings_module)
def configure(self, default_settings=global_settings, **options):
"""
Called to manually configure the settings. The 'default_settings'
parameter sets where to retrieve any unspecified values from (its
argument must support attribute access (__getattr__)).
"""
if self._wrapped != None:
raise RuntimeError('Settings already configured.')
holder = UserSettingsHolder(default_settings)
for name, value in options.items():
setattr(holder, name, value)
self._wrapped = holder
def configured(self):
"""
Returns True if the settings have already been configured.
"""
return bool(self._wrapped)
configured = property(configured)
class BaseSettings(object):
"""
Common logic for settings whether set by a module or by the user.
"""
def __setattr__(self, name, value):
if name in ("MEDIA_URL", "STATIC_URL") and value and not value.endswith('/'):
warnings.warn('If set, %s must end with a slash' % name,
PendingDeprecationWarning)
object.__setattr__(self, name, value)
class Settings(BaseSettings):
def __init__(self, settings_module):
# update this dict from global settings (but only for ALL_CAPS settings)
for setting in dir(global_settings):
if setting == setting.upper():
setattr(self, setting, getattr(global_settings, setting))
# store the settings module in case someone later cares
self.SETTINGS_MODULE = settings_module
try:
mod = importlib.import_module(self.SETTINGS_MODULE)
except ImportError, e:
raise ImportError("Could not import settings '%s' (Is it on sys.path?): %s" % (self.SETTINGS_MODULE, e))
# Settings that should be converted into tuples if they're mistakenly entered
# as strings.
tuple_settings = ("INSTALLED_APPS", "TEMPLATE_DIRS")
for setting in dir(mod):
if setting == setting.upper():
setting_value = getattr(mod, setting)
if setting in tuple_settings and type(setting_value) == str:
setting_value = (setting_value,) # In case the user forgot the comma.
setattr(self, setting, setting_value)
# Expand entries in INSTALLED_APPS like "django.contrib.*" to a list
# of all those apps.
new_installed_apps = []
for app in self.INSTALLED_APPS:
if app.endswith('.*'):
app_mod = importlib.import_module(app[:-2])
appdir = os.path.dirname(app_mod.__file__)
app_subdirs = os.listdir(appdir)
app_subdirs.sort()
name_pattern = re.compile(r'[a-zA-Z]\w*')
for d in app_subdirs:
if name_pattern.match(d) and os.path.isdir(os.path.join(appdir, d)):
new_installed_apps.append('%s.%s' % (app[:-2], d))
else:
new_installed_apps.append(app)
self.INSTALLED_APPS = new_installed_apps
if hasattr(time, 'tzset') and self.TIME_ZONE:
# When we can, attempt to validate the timezone. If we can't find
# this file, no check happens and it's harmless.
zoneinfo_root = '/usr/share/zoneinfo'
if (os.path.exists(zoneinfo_root) and not
os.path.exists(os.path.join(zoneinfo_root, *(self.TIME_ZONE.split('/'))))):
raise ValueError("Incorrect timezone setting: %s" % self.TIME_ZONE)
# Move the time zone info into os.environ. See ticket #2315 for why
# we don't do this unconditionally (breaks Windows).
os.environ['TZ'] = self.TIME_ZONE
time.tzset()
# Settings are configured, so we can set up the logger if required
if self.LOGGING_CONFIG:
# First find the logging configuration function ...
logging_config_path, logging_config_func_name = self.LOGGING_CONFIG.rsplit('.', 1)
logging_config_module = importlib.import_module(logging_config_path)
logging_config_func = getattr(logging_config_module, logging_config_func_name)
# ... then invoke it with the logging settings
logging_config_func(self.LOGGING)
class UserSettingsHolder(BaseSettings):
"""
Holder for user configured settings.
"""
# SETTINGS_MODULE doesn't make much sense in the manually configured
# (standalone) case.
SETTINGS_MODULE = None
def __init__(self, default_settings):
"""
Requests for configuration variables not in this class are satisfied
from the module specified in default_settings (if possible).
"""
self.default_settings = default_settings
def __getattr__(self, name):
return getattr(self.default_settings, name)
def __dir__(self):
return self.__dict__.keys() + dir(self.default_settings)
# For Python < 2.6:
__members__ = property(lambda self: self.__dir__())
settings = LazySettings()
| bsd-3-clause |
Webstr-framework/webstr | webstr/core/config.py | 1 | 1347 | """
Central configuration module of webstr selenium tests.
This module provides configuration options along with default values and
function to redefine values.
"""
# Copyright 2016 Red Hat
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import sys
SELENIUM_LOG_LEVEL = logging.INFO
SCHEME = 'https'
PORT = 443
BROWSER = 'Firefox'
BROWSER_VERSION = ''
BROWSER_PLATFORM = 'ANY'
SELENIUM_SERVER = None
SELENIUM_PORT = 4444
BROWSER_WIDTH = 1280
BROWSER_HEIGHT = 1024
def update_value(key_name, value, force=False):
"""
Update single value of this config module.
"""
this_module = sys.modules[__name__]
key_name = key_name.upper()
# raise AttributeError if we try to define new value (unless force is used)
if not force:
getattr(this_module, key_name)
setattr(this_module, key_name, value)
| apache-2.0 |
madscientist42/gnome15 | src/plugins/voip-teamspeak3/ts3/__init__.py | 8 | 7766 | # Gnome15 - Suite of tools for the Logitech G series keyboards and headsets
# Copyright (C) 2012 Brett Smith <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from telnetlib import Telnet
from threading import Thread
from threading import RLock
from message import MessageFactory
from message import Command
# Logging
import logging
logger = logging.getLogger(__name__)
def _receive_message(client):
while True:
incoming_message = client.read_until('\n', 10).strip()
if incoming_message is not None and incoming_message.strip():
logger.info("Received: %s", incoming_message)
message = MessageFactory.get_message(incoming_message)
if message:
return message
class TS3CommandException(Exception):
def __init__(self, code, message):
Exception.__init__(self, message)
self.code = code
class TS3():
class ReceiveThread(Thread):
def __init__(self, client):
Thread.__init__(self)
self._client = client
self.setDaemon(True)
self.setName("TS3ReceiveThread")
self._reply_handler = None
self._error_handler = None
self._stop = False
def stop(self):
self._stop = True
def run(self):
try:
while True:
try:
if self._stop:
raise EOFError()
msg = _receive_message(self._client)
except TS3CommandException as e:
logger.debug("Error while receving message", exc_info = e)
self._error_handler(e)
else:
self._reply_handler(msg)
except Exception as e:
logger.debug("Error in main loop", exc_info = e)
self._error_handler(e)
def __init__(self, hostname="127.0.0.1", port=25639, timeout=10):
self.timeout = timeout
self.hostname = hostname
self.port = port
self._event_client = None
self._event_thread = None
self._command_client = None
self._lock = RLock()
self.schandlerid = None
def change_server(self, schandlerid):
if self._event_client is not None:
self._write_command(self._event_client, Command(
'clientnotifyunregister')
)
self.schandlerid = schandlerid
self._send_command(self._command_client, Command(
'use',
schandlerid=self.schandlerid)
)
if self._event_client is not None:
self._send_command(self._event_client, Command(
'use',
schandlerid=self.schandlerid)
)
self._send_command(self._event_client, Command(
'clientnotifyregister',
schandlerid=self.schandlerid,
event=self._event_type
)
)
def close(self):
if self._event_thread is not None:
self._event_thread.stop()
self._command_client.close()
self._command_client = None
if self._event_client is not None:
self._event_client.close()
self._event_client = None
def start(self):
self._create_command_client()
def send_event_command(self, command):
try:
self._lock.acquire()
if self._event_client is not None:
self._write_command(self._event_client, command)
finally:
self._lock.release()
def send_command(self, command):
try:
self._lock.acquire()
if self._command_client is None:
self.start()
return self._send_command(self._command_client, command)
finally:
self._lock.release()
def subscribe(self, reply_handler, type='any', error_handler = None):
"""
Shortcut method to subscribe to all messages received from the client.
Keyword arguments:
reply_handler -- function called with Message as argument
error_handler -- function called with TSCommandException as argument
type -- type of event to subscribe to
"""
try:
self._lock.acquire()
if self._event_client is not None:
raise Exception("Already subscribed")
self._event_type = type
self._create_event_client()
self._event_thread._reply_handler = reply_handler
self._event_thread._error_handler = error_handler
self._write_command(self._event_client, Command(
'clientnotifyregister',
schandlerid=self.schandlerid,
event=type
)
)
finally:
self._lock.release()
"""
Private
"""
def _send_command(self, client, command):
try:
self._lock.acquire()
self._write_command(client, command)
r_reply = None
while True:
reply = _receive_message(client)
if reply.command == 'error':
msg = reply.args['msg']
if msg != 'ok':
raise TS3CommandException(int(reply.args['id']), msg)
else:
break
else:
if r_reply is None:
r_reply = reply
else:
raise TS3CommandException(9999, "Multiple replies")
return r_reply
finally:
self._lock.release()
def _write_command(self, client, command):
logger.info("Sending command: %s", command.output)
client.write("%s\n" % command.output)
def _create_command_client(self):
self._command_client = Telnet(host=self.hostname, port=self.port)
self.schandlerid = int(_receive_message(self._command_client).args['schandlerid'])
def _create_event_client(self):
self._event_client = Telnet(host=self.hostname, port=self.port)
_receive_message(self._event_client)
self._event_thread = self.ReceiveThread(self._event_client)
self._event_thread.start() | gpl-3.0 |
brijeshkesariya/odoo | addons/l10n_vn/__openerp__.py | 342 | 1697 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# This module is Copyright (c) 2009-2013 General Solutions (http://gscom.vn) All Rights Reserved.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
"name" : "Vietnam Chart of Accounts",
"version" : "1.0",
"author" : "General Solutions",
'website': 'http://gscom.vn',
"category" : "Localization/Account Charts",
"description": """
This is the module to manage the accounting chart for Vietnam in OpenERP.
=========================================================================
This module applies to companies based in Vietnamese Accounting Standard (VAS).
**Credits:** General Solutions.
""",
"depends" : ["account","base_vat","base_iban"],
"data" : ["account_tax_code.xml","account_chart.xml","account_tax.xml","l10n_vn_wizard.xml"],
"demo" : [],
'auto_install': False,
"installable": True,
}
| agpl-3.0 |
avneesh91/django | django/contrib/gis/gdal/datasource.py | 32 | 4577 | """
DataSource is a wrapper for the OGR Data Source object, which provides
an interface for reading vector geometry data from many different file
formats (including ESRI shapefiles).
When instantiating a DataSource object, use the filename of a
GDAL-supported data source. For example, a SHP file or a
TIGER/Line file from the government.
The ds_driver keyword is used internally when a ctypes pointer
is passed in directly.
Example:
ds = DataSource('/home/foo/bar.shp')
for layer in ds:
for feature in layer:
# Getting the geometry for the feature.
g = feature.geom
# Getting the 'description' field for the feature.
desc = feature['description']
# We can also increment through all of the fields
# attached to this feature.
for field in feature:
# Get the name of the field (e.g. 'description')
nm = field.name
# Get the type (integer) of the field, e.g. 0 => OFTInteger
t = field.type
# Returns the value the field; OFTIntegers return ints,
# OFTReal returns floats, all else returns string.
val = field.value
"""
from ctypes import byref
from django.contrib.gis.gdal.base import GDALBase
from django.contrib.gis.gdal.driver import Driver
from django.contrib.gis.gdal.error import GDALException, OGRIndexError
from django.contrib.gis.gdal.layer import Layer
from django.contrib.gis.gdal.prototypes import ds as capi
from django.utils.encoding import force_bytes, force_text
# For more information, see the OGR C API source code:
# http://www.gdal.org/ogr__api_8h.html
#
# The OGR_DS_* routines are relevant here.
class DataSource(GDALBase):
"Wraps an OGR Data Source object."
destructor = capi.destroy_ds
def __init__(self, ds_input, ds_driver=False, write=False, encoding='utf-8'):
# The write flag.
if write:
self._write = 1
else:
self._write = 0
# See also http://trac.osgeo.org/gdal/wiki/rfc23_ogr_unicode
self.encoding = encoding
Driver.ensure_registered()
if isinstance(ds_input, str):
# The data source driver is a void pointer.
ds_driver = Driver.ptr_type()
try:
# OGROpen will auto-detect the data source type.
ds = capi.open_ds(force_bytes(ds_input), self._write, byref(ds_driver))
except GDALException:
# Making the error message more clear rather than something
# like "Invalid pointer returned from OGROpen".
raise GDALException('Could not open the datasource at "%s"' % ds_input)
elif isinstance(ds_input, self.ptr_type) and isinstance(ds_driver, Driver.ptr_type):
ds = ds_input
else:
raise GDALException('Invalid data source input type: %s' % type(ds_input))
if ds:
self.ptr = ds
self.driver = Driver(ds_driver)
else:
# Raise an exception if the returned pointer is NULL
raise GDALException('Invalid data source file "%s"' % ds_input)
def __iter__(self):
"Allows for iteration over the layers in a data source."
for i in range(self.layer_count):
yield self[i]
def __getitem__(self, index):
"Allows use of the index [] operator to get a layer at the index."
if isinstance(index, str):
layer = capi.get_layer_by_name(self.ptr, force_bytes(index))
if not layer:
raise OGRIndexError('invalid OGR Layer name given: "%s"' % index)
elif isinstance(index, int):
if index < 0 or index >= self.layer_count:
raise OGRIndexError('index out of range')
layer = capi.get_layer(self._ptr, index)
else:
raise TypeError('Invalid index type: %s' % type(index))
return Layer(layer, self)
def __len__(self):
"Return the number of layers within the data source."
return self.layer_count
def __str__(self):
"Return OGR GetName and Driver for the Data Source."
return '%s (%s)' % (self.name, self.driver)
@property
def layer_count(self):
"Return the number of layers in the data source."
return capi.get_layer_count(self._ptr)
@property
def name(self):
"Return the name of the data source."
name = capi.get_ds_name(self._ptr)
return force_text(name, self.encoding, strings_only=True)
| bsd-3-clause |
idea4bsd/idea4bsd | python/lib/Lib/site-packages/django/views/csrf.py | 289 | 3834 | from django.http import HttpResponseForbidden
from django.template import Context, Template
from django.conf import settings
# We include the template inline since we need to be able to reliably display
# this error message, especially for the sake of developers, and there isn't any
# other way of making it available independent of what is in the settings file.
CSRF_FAILRE_TEMPLATE = """
<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd">
<html lang="en">
<head>
<meta http-equiv="content-type" content="text/html; charset=utf-8">
<meta name="robots" content="NONE,NOARCHIVE">
<title>403 Forbidden</title>
<style type="text/css">
html * { padding:0; margin:0; }
body * { padding:10px 20px; }
body * * { padding:0; }
body { font:small sans-serif; background:#eee; }
body>div { border-bottom:1px solid #ddd; }
h1 { font-weight:normal; margin-bottom:.4em; }
h1 span { font-size:60%; color:#666; font-weight:normal; }
#info { background:#f6f6f6; }
#info ul { margin: 0.5em 4em; }
#info p, #summary p { padding-top:10px; }
#summary { background: #ffc; }
#explanation { background:#eee; border-bottom: 0px none; }
</style>
</head>
<body>
<div id="summary">
<h1>Forbidden <span>(403)</span></h1>
<p>CSRF verification failed. Request aborted.</p>
{% if no_referer %}
<p>You are seeing this message because this HTTPS site requires a 'Referer
header' to be sent by your Web browser, but none was sent. This header is
required for security reasons, to ensure that your browser is not being
hijacked by third parties.</p>
<p>If you have configured your browser to disable 'Referer' headers, please
re-enable them, at least for this site, or for HTTPS connections, or for
'same-origin' requests.</p>
{% endif %}
</div>
{% if DEBUG %}
<div id="info">
<h2>Help</h2>
{% if reason %}
<p>Reason given for failure:</p>
<pre>
{{ reason }}
</pre>
{% endif %}
<p>In general, this can occur when there is a genuine Cross Site Request Forgery, or when
<a
href='http://docs.djangoproject.com/en/dev/ref/contrib/csrf/#ref-contrib-csrf'>Django's
CSRF mechanism</a> has not been used correctly. For POST forms, you need to
ensure:</p>
<ul>
<li>The view function uses <a
href='http://docs.djangoproject.com/en/dev/ref/templates/api/#subclassing-context-requestcontext'><code>RequestContext</code></a>
for the template, instead of <code>Context</code>.</li>
<li>In the template, there is a <code>{% templatetag openblock %} csrf_token
{% templatetag closeblock %}</code> template tag inside each POST form that
targets an internal URL.</li>
<li>If you are not using <code>CsrfViewMiddleware</code>, then you must use
<code>csrf_protect</code> on any views that use the <code>csrf_token</code>
template tag, as well as those that accept the POST data.</li>
</ul>
<p>You're seeing the help section of this page because you have <code>DEBUG =
True</code> in your Django settings file. Change that to <code>False</code>,
and only the initial error message will be displayed. </p>
<p>You can customize this page using the CSRF_FAILURE_VIEW setting.</p>
</div>
{% else %}
<div id="explanation">
<p><small>More information is available with DEBUG=True.</small></p>
</div>
{% endif %}
</body>
</html>
"""
def csrf_failure(request, reason=""):
"""
Default view used when request fails CSRF protection
"""
from django.middleware.csrf import REASON_NO_REFERER
t = Template(CSRF_FAILRE_TEMPLATE)
c = Context({'DEBUG': settings.DEBUG,
'reason': reason,
'no_referer': reason == REASON_NO_REFERER
})
return HttpResponseForbidden(t.render(c), mimetype='text/html')
| apache-2.0 |
MQQiang/kbengine | kbe/res/scripts/common/Lib/idlelib/WindowList.py | 153 | 2472 | from tkinter import *
class WindowList:
def __init__(self):
self.dict = {}
self.callbacks = []
def add(self, window):
window.after_idle(self.call_callbacks)
self.dict[str(window)] = window
def delete(self, window):
try:
del self.dict[str(window)]
except KeyError:
# Sometimes, destroy() is called twice
pass
self.call_callbacks()
def add_windows_to_menu(self, menu):
list = []
for key in self.dict:
window = self.dict[key]
try:
title = window.get_title()
except TclError:
continue
list.append((title, key, window))
list.sort()
for title, key, window in list:
menu.add_command(label=title, command=window.wakeup)
def register_callback(self, callback):
self.callbacks.append(callback)
def unregister_callback(self, callback):
try:
self.callbacks.remove(callback)
except ValueError:
pass
def call_callbacks(self):
for callback in self.callbacks:
try:
callback()
except:
t, v, tb = sys.exc_info()
print("warning: callback failed in WindowList", t, ":", v)
registry = WindowList()
add_windows_to_menu = registry.add_windows_to_menu
register_callback = registry.register_callback
unregister_callback = registry.unregister_callback
class ListedToplevel(Toplevel):
def __init__(self, master, **kw):
Toplevel.__init__(self, master, kw)
registry.add(self)
self.focused_widget = self
def destroy(self):
registry.delete(self)
Toplevel.destroy(self)
# If this is Idle's last window then quit the mainloop
# (Needed for clean exit on Windows 98)
if not registry.dict:
self.quit()
def update_windowlist_registry(self, window):
registry.call_callbacks()
def get_title(self):
# Subclass can override
return self.wm_title()
def wakeup(self):
try:
if self.wm_state() == "iconic":
self.wm_withdraw()
self.wm_deiconify()
self.tkraise()
self.focused_widget.focus_set()
except TclError:
# This can happen when the window menu was torn off.
# Simply ignore it.
pass
| lgpl-3.0 |
felipenaselva/felipe.repository | script.module.placenta/lib/resources/lib/sources/en/to_be_fixed/needsfixing/pubfilmonline.py | 1 | 4457 | # -*- coding: UTF-8 -*-
#######################################################################
# ----------------------------------------------------------------------------
# "THE BEER-WARE LICENSE" (Revision 42):
# @tantrumdev wrote this file. As long as you retain this notice you
# can do whatever you want with this stuff. If we meet some day, and you think
# this stuff is worth it, you can buy me a beer in return. - Muad'Dib
# ----------------------------------------------------------------------------
#######################################################################
# Addon Name: Placenta
# Addon id: plugin.video.placenta
# Addon Provider: Mr.Blamo
import re,traceback,urllib,urlparse,json
from resources.lib.modules import cfscrape
from resources.lib.modules import cleantitle
from resources.lib.modules import client
from resources.lib.modules import directstream
from resources.lib.modules import jsunpack
from resources.lib.modules import log_utils
from resources.lib.modules import source_utils
class source:
def __init__(self):
self.priority = 1
self.language = ['en']
self.domains = ['pubfilmonline.net']
self.base_link = 'http://pubfilmonline.net/'
self.post_link = '/wp-admin/admin-ajax.php'
self.search_link = '/?s=%s'
self.scraper = cfscrape.create_scraper()
def movie(self, imdb, title, localtitle, aliases, year):
try:
url = '%s/movies/%s-%s/' % (self.base_link, cleantitle.geturl(title),year)
r = self.scraper.get(url).content
if '<h2>ERROR <span>404</span></h2>' in r:
url = '%s/movies/%s/' % (self.base_link, cleantitle.geturl(title))
r = self.scraper.get(url).content
if '<h2>ERROR <span>404</span></h2>' in r: return
return url
except:
failure = traceback.format_exc()
log_utils.log('PubFilmOnline - Exception: \n' + str(failure))
return
def tvshow(self, imdb, tvdb, tvshowtitle, localtvshowtitle, aliases, year):
try:
url = {'imdb': imdb, 'tvdb': tvdb, 'tvshowtitle': tvshowtitle, 'year': year}
url = urllib.urlencode(url)
return url
except:
failure = traceback.format_exc()
log_utils.log('PubFilmOnline - Exception: \n' + str(failure))
return
def episode(self, url, imdb, tvdb, title, premiered, season, episode):
try:
if url == None: return
url = urlparse.parse_qs(url)
url = dict([(i, url[i][0]) if url[i] else (i, '') for i in url])
url['title'], url['premiered'], url['season'], url['episode'] = title, premiered, season, episode
url = urllib.urlencode(url)
return url
except:
failure = traceback.format_exc()
log_utils.log('PubFilmOnline - Exception: \n' + str(failure))
return
def sources(self, url, hostDict, hostprDict):
try:
sources = []
if url == None: return sources
data = urlparse.parse_qs(url)
data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])
if 'tvshowtitle' in data:
url = '%s/episodes/%s-%01dx%01d/' % (self.base_link, cleantitle.geturl(data['tvshowtitle']), int(data['season']), int(data['episode']))
year = re.findall('(\d{4})', data['premiered'])[0]
r = self.scraper.get(url).content
y = client.parseDOM(r, 'span', attrs = {'class': 'date'})[0]
y = re.findall('(\d{4})', y)[0]
if not y == year: raise Exception()
else:
r = self.scraper.get(url).content
result = re.findall('''['"]file['"]:['"]([^'"]+)['"],['"]label['"]:['"]([^'"]+)''', r)
for i in result:
url = i[0].replace('\/', '/')
sources.append({'source': 'gvideo', 'quality': source_utils.label_to_quality(i[1]), 'language': 'en', 'url': url, 'direct': True, 'debridonly': False})
return sources
except:
failure = traceback.format_exc()
log_utils.log('PubFilmOnline - Exception: \n' + str(failure))
return sources
def resolve(self, url):
if 'google' in url:
return directstream.googlepass(url)
else:
return url
| gpl-2.0 |
ryfeus/lambda-packs | Tensorflow/source/tensorflow/python/estimator/training.py | 2 | 31332 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Classes and functions related to train_and_evaluate."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import json
import os
import time
import six
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.estimator import estimator as estimator_lib
from tensorflow.python.estimator import exporter as exporter_lib
from tensorflow.python.estimator import run_config as run_config_lib
from tensorflow.python.framework import ops
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.training import basic_session_run_hooks
from tensorflow.python.training import server_lib
from tensorflow.python.training import session_run_hook
from tensorflow.python.util import compat
_MAX_DELAY_SECS = 60
_DELAY_SECS_PER_WORKER = 5
_TF_CONFIG_ENV = 'TF_CONFIG'
_ENVIRONMENT_KEY = 'environment'
_ENVIRONMENT_GOOGLE_VALUE = 'google'
def _validate_input_fn(input_fn):
"""Validates the `input_fn`."""
if not callable(input_fn):
raise TypeError(
'`input_fn` must be callable, given: {}'.format(input_fn))
def _validate_hooks(hooks):
"""Validates the `hooks`."""
hooks = tuple(hooks or [])
for hook in hooks:
if not isinstance(hook, session_run_hook.SessionRunHook):
raise TypeError(
'All hooks must be `SessionRunHook` instances, given: {}'.format(
hook))
return hooks
def _validate_exporters(exporters):
"""Validates `exporters` and returns them as a tuple."""
if not exporters:
return ()
if isinstance(exporters, exporter_lib.Exporter):
exporters = [exporters]
unique_names = [] # `Exporter`s should have unique names.
try:
for exporter in exporters:
if not isinstance(exporter, exporter_lib.Exporter):
# Error message will be printed out by the outer try/except.
raise TypeError
if not exporter.name:
full_list_of_names = [e.name for e in exporters]
raise ValueError('An Exporter cannot have a name that is `None` or'
' empty. All exporter names:'
' {}'.format(full_list_of_names))
if not isinstance(exporter.name, six.string_types):
raise ValueError('An Exporter must have a string name. Given: '
'{}'.format(type(exporter.name)))
if exporter.name in unique_names:
full_list_of_names = [e.name for e in exporters]
raise ValueError(
'`exporters` must have unique names. Such a name cannot be `None`.'
' All exporter names: {}'.format(full_list_of_names))
unique_names.append(exporter.name)
except TypeError:
# Two possibilities:
# - `exporters` is neither `Exporter` nor iterable. Python has
# raised a `TypeError` when iterating over `exporters`.
# - an `exporter` was None or not of type `Exporter`, so we raised a
# `TypeError`.
raise TypeError('`exporters` must be an Exporter,'
' an iterable of Exporter, or `None`,'
' found %s.' % exporters)
return tuple(exporters)
def _is_google_env():
"""Detects whether current environment is google."""
tf_config = json.loads(os.environ.get(_TF_CONFIG_ENV) or '{}')
if not tf_config:
logging.warn('TF_CONFIG should not be empty in distributed environment.')
return tf_config.get(_ENVIRONMENT_KEY) == _ENVIRONMENT_GOOGLE_VALUE
class TrainSpec(
collections.namedtuple('TrainSpec', ['input_fn', 'max_steps', 'hooks'])):
"""Configuration for the "train" part for the `train_and_evaluate` call.
`TrainSpec` determines the input data for the training, as well as the
duration. Optional hooks run at various stages of training.
"""
def __new__(cls,
input_fn,
max_steps=None,
hooks=None):
"""Creates a validated `TrainSpec` instance.
Args:
input_fn: Training input function returning a tuple of:
features - `Tensor` or dictionary of string feature name to `Tensor`.
labels - `Tensor` or dictionary of `Tensor` with labels.
max_steps: Int. Positive number of total steps for which to train model.
If `None`, train forever. The training `input_fn` is not expected to
generate `OutOfRangeError` or `StopIteration` exceptions. See the
`train_and_evaluate` stop condition section for details.
hooks: Iterable of `tf.train.SessionRunHook` objects to run
on all workers (including chief) during training.
Returns:
A validated `TrainSpec` object.
Raises:
ValueError: If any of the input arguments is invalid.
TypeError: If any of the arguments is not of the expected type.
"""
# Validate input_fn.
_validate_input_fn(input_fn)
# Validate max_steps.
if max_steps is not None and max_steps <= 0:
raise ValueError(
'Must specify max_steps > 0, given: {}'.format(max_steps))
# Validate hooks.
hooks = _validate_hooks(hooks)
return super(TrainSpec, cls).__new__(
cls,
input_fn=input_fn,
max_steps=max_steps,
hooks=hooks)
class EvalSpec(
collections.namedtuple('EvalSpec', [
'input_fn', 'steps', 'name', 'hooks', 'exporters',
'start_delay_secs', 'throttle_secs'
])):
"""Configuration for the "eval" part for the `train_and_evaluate` call.
`EvalSpec` combines details of evaluation of the trained model as well as its
export. Evaluation consists of computing metrics to judge the performance of
the trained model. Export writes out the trained model on to external
storage.
"""
def __new__(cls,
input_fn,
steps=100,
name=None,
hooks=None,
exporters=None,
start_delay_secs=120,
throttle_secs=600):
"""Creates a validated `EvalSpec` instance.
Args:
input_fn: Evaluation input function returning a tuple of:
features - `Tensor` or dictionary of string feature name to `Tensor`.
labels - `Tensor` or dictionary of `Tensor` with labels.
steps: Int. Positive number of steps for which to evaluate model. If
`None`, evaluates until `input_fn` raises an end-of-input exception.
See `Estimator.evaluate` for details.
name: String. Name of the evaluation if user needs to run multiple
evaluations on different data sets. Metrics for different evaluations
are saved in separate folders, and appear separately in tensorboard.
hooks: Iterable of `tf.train.SessionRunHook` objects to run
on all workers (including chief) during training.
exporters: Iterable of `Exporter`s, or a single one, or `None`.
`exporters` will be invoked after each evaluation.
start_delay_secs: Int. Start evaluating after waiting for this many
seconds.
throttle_secs: Int. Do not re-evaluate unless the last evaluation was
started at least this many seconds ago. Of course, evaluation does not
occur if no new checkpoints are available, hence, this is the minimum.
Returns:
A validated `EvalSpec` object.
Raises:
ValueError: If any of the input arguments is invalid.
TypeError: If any of the arguments is not of the expected type.
"""
# Validate input_fn.
_validate_input_fn(input_fn)
# Validate steps.
if steps is not None and steps <= 0:
raise ValueError('Must specify steps > 0, given: {}'.format(steps))
# Validate name.
if name is not None and not isinstance(name, six.string_types):
raise TypeError('`name` must be string, given: {}'.format(name))
# Validate hooks.
hooks = _validate_hooks(hooks)
# Validate exporters.
exporters = _validate_exporters(exporters)
# Validate start_delay_secs.
if start_delay_secs < 0:
raise ValueError('Must specify start_delay_secs >= 0, given: {}'.format(
start_delay_secs))
# Validate throttle_secs.
if throttle_secs < 0:
raise ValueError(
'Must specify throttle_secs >= 0, given: {}'.format(throttle_secs))
return super(EvalSpec, cls).__new__(
cls,
input_fn=input_fn,
steps=steps,
name=name,
hooks=hooks,
exporters=exporters,
start_delay_secs=start_delay_secs,
throttle_secs=throttle_secs)
def train_and_evaluate(estimator, train_spec, eval_spec):
"""Train and evaluate the `estimator`.
This utility function trains, evaluates, and (optionally) exports the model by
using the given `estimator`. All training related specification is held in
`train_spec`, including training `input_fn` and training max steps, etc. All
evaluation and export related specification is held in `eval_spec`, including
evaluation `input_fn`, steps, etc.
This utility function provides consistent behavior for both local
(non-distributed) and distributed configurations. Currently, the only
supported distributed training configuration is between-graph replication.
Overfitting: In order to avoid overfitting, it is recommended to set up the
training `input_fn` to shuffle the training data properly. It is also
recommended to train the model a little longer, say multiple epochs, before
performing evaluation, as the input pipeline starts from scratch for each
training. It is particularly important for local training and evaluation.
Stop condition: In order to support both distributed and non-distributed
configuration reliably, the only supported stop condition for model
training is `train_spec.max_steps`. If `train_spec.max_steps` is `None`, the
model is trained forever. *Use with care* if model stop condition is
different. For example, assume that the model is expected to be trained with
one epoch of training data, and the training `input_fn` is configured to throw
`OutOfRangeError` after going through one epoch, which stops the
`Estimator.train`. For a three-training-worker distributed configuration, each
training worker is likely to go through the whole epoch independently. So, the
model will be trained with three epochs of training data instead of one epoch.
Example of local (non-distributed) training:
```python
# Set up feature columns.
categorial_feature_a = categorial_column_with_hash_bucket(...)
categorial_feature_a_emb = embedding_column(
categorical_column=categorial_feature_a, ...)
... # other feature columns
estimator = DNNClassifier(
feature_columns=[categorial_feature_a_emb, ...],
hidden_units=[1024, 512, 256])
# Or set up the model directory
# estimator = DNNClassifier(
# config=tf.estimator.RunConfig(
# model_dir='/my_model', save_summary_steps=100),
# feature_columns=[categorial_feature_a_emb, ...],
# hidden_units=[1024, 512, 256])
# Input pipeline for train and evaluate.
def train_input_fn: # returns x, y
# please shuffle the data.
pass
def eval_input_fn_eval: # returns x, y
pass
train_spec = tf.estimator.TrainSpec(input_fn=train_input_fn, max_steps=1000)
eval_spec = tf.estimator.EvalSpec(input_fn=eval_input_fn)
tf.estimator.train_and_evaluate(estimator, train_spec, eval_spec)
```
Example of distributed training:
Regarding the example of distributed training, the code above can be used
without a change (Please do make sure that the `RunConfig.model_dir` for all
workers is set to the same directory, i.e., a shared file system all workers
can read and write). The only extra work to do is setting the environment
variable `TF_CONFIG` properly for each worker correspondingly.
Also see: https://www.tensorflow.org/deploy/distributed
Setting environment variable depends on the platform. For example, on Linux,
it can be done as follows (`$` is the shell prompt):
```
$ TF_CONFIG='<replace_with_real_content>' python train_model.py
```
For the content in `TF_CONFIG`, assume that the training cluster spec looks
like:
```
cluster = {"chief": ["host0:2222"],
"worker": ["host1:2222", "host2:2222", "host3:2222"],
"ps": ["host4:2222", "host5:2222"]}
```
Example of `TF_CONFIG` for chief training worker (must have one and only one):
```
# This should be a JSON string, which is set as environment variable. Usually
# the cluster manager handles that.
TF_CONFIG='{
"cluster": {
"chief": ["host0:2222"],
"worker": ["host1:2222", "host2:2222", "host3:2222"],
"ps": ["host4:2222", "host5:2222"]
},
"task": {"type": "chief", "index": 0}
}'
```
Note that the chief worker also does the model training job, similar to other
non-chief training workers (see next paragraph). In addition to the model
training, it manages some extra work, e.g., checkpoint saving and restoring,
writing summaries, etc.
Example of `TF_CONFIG` for non-chief training worker (optional, could be
multiple):
```
# This should be a JSON string, which is set as environment variable. Usually
# the cluster manager handles that.
TF_CONFIG='{
"cluster": {
"chief": ["host0:2222"],
"worker": ["host1:2222", "host2:2222", "host3:2222"],
"ps": ["host4:2222", "host5:2222"]
},
"task": {"type": "worker", "index": 0}
}'
```
where the `task.index` should be set as 0, 1, 2, in this example, respectively
for non-chief training workers.
Example of `TF_CONFIG` for parameter server, aka ps (could be multiple):
```
# This should be a JSON string, which is set as environment variable. Usually
# the cluster manager handles that.
TF_CONFIG='{
"cluster": {
"chief": ["host0:2222"],
"worker": ["host1:2222", "host2:2222", "host3:2222"],
"ps": ["host4:2222", "host5:2222"]
},
"task": {"type": "ps", "index": 0}
}'
```
where the `task.index` should be set as 0 and 1, in this example, respectively
for parameter servers.
Example of `TF_CONFIG` for evaluator task. Evaluator is a special task that is
not part of the training cluster. There could be only one. It is used for
model evaluation.
```
# This should be a JSON string, which is set as environment variable. Usually
# the cluster manager handles that.
TF_CONFIG='{
"cluster": {
"chief": ["host0:2222"],
"worker": ["host1:2222", "host2:2222", "host3:2222"],
"ps": ["host4:2222", "host5:2222"]
},
"task": {"type": "evaluator", "index": 0}
}'
```
Args:
estimator: An `Estimator` instance to train and evaluate.
train_spec: A `TrainSpec instance to specify the training specification.
eval_spec: A `EvalSpec instance to specify the evaluation and export
specification.
Raises:
ValueError: if environment variable `TF_CONFIG` is incorrectly set.
"""
if not isinstance(estimator, estimator_lib.Estimator):
raise TypeError('`estimator` must have type `tf.estimator.Estimator`, '
'given {}'.format(type(estimator)))
config = estimator.config
executor = _TrainingExecutor(estimator=estimator, train_spec=train_spec,
eval_spec=eval_spec)
if (not config.cluster_spec and
config.task_type != run_config_lib.TaskType.EVALUATOR):
logging.info('Running training and evaluation locally (non-distributed).')
executor.run_local()
return
# Distributed case.
if not config.task_type:
# TODO(xiejw): Improve the error message about how to set the TF_CONFIG
# correctly.
raise ValueError(
'`estimator.config` must have task_type set. This usually means '
'TF_CONFIG environment is not set correctly.')
if config.task_type == 'local':
raise ValueError(
'`task.type` in TF_CONFIG cannot be `local`. Leaving `cluster` and '
'`task` properties in TF_CONFIG absent triggers train and evaluate '
'`Estimator` locally (non-distributed).')
if (config.task_type == run_config_lib.TaskType.EVALUATOR and
config.task_id > 0):
raise ValueError(
'For distributed training, there can only be one `evaluator` task '
'(with task id 0). Given task id {}'.format(config.task_id))
# For task type foo, call executor.run_foo.
available_tasks = [x for x in dir(executor) if x.startswith('run_')
and x != 'run_local'
and callable(getattr(executor, x))]
task_to_run = 'run_' + config.task_type
if task_to_run not in available_tasks:
raise ValueError(
'Task type {} is not supported. Supported task types are {}'.format(
config.task_type, [x[len('run_'):] for x in available_tasks]))
getattr(executor, task_to_run)()
return
class _StopAtSecsHook(session_run_hook.SessionRunHook):
"""Stops given secs after begin is called."""
def __init__(self, stop_after_secs):
self._stop_after_secs = stop_after_secs
self._start_time = None
def begin(self):
self._start_time = time.time()
def after_run(self, run_context, run_values):
del run_values
if time.time() - self._start_time >= self._stop_after_secs:
run_context.request_stop()
class _TrainingExecutor(object):
"""The executor to run `Estimator` training and evaluation.
This implementation supports both distributed and non-distributed (aka local)
training and evaluation based on the setting in `tf.estimator.RunConfig`.
"""
def __init__(self, estimator, train_spec, eval_spec):
if not isinstance(estimator, estimator_lib.Estimator):
raise TypeError('`estimator` must have type `tf.estimator.Estimator`.')
self._estimator = estimator
if not isinstance(train_spec, TrainSpec):
raise TypeError('`train_spec` must have type `tf.estimator.TrainSpec`.')
self._train_spec = train_spec
if not isinstance(eval_spec, EvalSpec):
raise TypeError('`eval_spec` must have type `tf.estimator.EvalSpec`.')
self._eval_spec = eval_spec
@property
def estimator(self):
return self._estimator
def run_chief(self):
"""Runs task chief."""
# TODO(xiejw): To allow execution framework to add train hooks.
return self._start_distributed_training()
def run_worker(self):
"""Runs task (training) worker."""
# TODO(xiejw): To allow execution framework to add train hooks.
return self._start_distributed_training()
def run_master(self):
"""Runs task master."""
class NewCheckpointListener(
basic_session_run_hooks.CheckpointSaverListener):
def __init__(self, evaluator, eval_throttle_secs):
self._evaluator = evaluator
self._eval_throttle_secs = eval_throttle_secs
def begin(self):
self._timer = basic_session_run_hooks.SecondOrStepTimer(
every_secs=self._eval_throttle_secs)
def after_save(self, session, global_step_value):
del session # unused; required by signature.
if self._timer.should_trigger_for_step(global_step_value):
self._timer.update_last_triggered_step(global_step_value)
self._evaluator.evaluate_and_export()
else:
logging.info(
'Skip the current checkpoint eval due to throttle secs '
'({} secs).'.format(self._eval_throttle_secs))
# Final export signal: For any eval result with global_step >= train
# max_steps, the evaluator will send the final export signal. There is a
# small chance that the Estimator.train stopping logic sees a different
# global_step value (due to global step race condition and the fact the
# saver sees a larger value for checkpoing saving), which does not end
# the training. When the training ends, a new checkpoint is generated, which
# triggers the listener again. So, it could be the case the final export is
# triggered twice.
#
# But here, throttle_secs will skip the next intermediate checkpoint and,
# so, the double final export chance is very small.
evaluator = _TrainingExecutor._Evaluator(
self._estimator, self._eval_spec, self._train_spec.max_steps)
# When the underlying `Estimator` object saves a new checkpoint, we would
# like this callback to be called so that evaluation and export can trigger.
saving_listeners = [
NewCheckpointListener(evaluator, self._eval_spec.throttle_secs)
]
self._start_distributed_training(saving_listeners=saving_listeners)
if not evaluator.is_final_export_triggered:
logging.info('Training has already ended. But the last eval is skipped '
'due to eval throttle_secs. Now evaluating the final '
'checkpoint.')
evaluator.evaluate_and_export()
def run_evaluator(self):
"""Runs task evaluator."""
# TODO(xiejw): To allow execution framework to add continuous eval listener.
return self._start_continuous_evaluation()
def run_ps(self):
"""Runs task parameter server (in training cluster spec)."""
config = self._estimator.config
server = self._start_std_server(config)
server.join()
def run_local(self):
"""Runs training and evaluation locally (non-distributed)."""
def _should_stop_local_train(global_step):
if self._train_spec.max_steps is None:
return False
if global_step >= self._train_spec.max_steps:
return True
return False
if self._eval_spec.throttle_secs <= 0:
raise ValueError('eval_spec.throttle_secs should be positive, given: {}.'
'It is used do determine how long each training '
'iteration should go when train and evaluate '
'locally.'.format(
self._eval_spec.throttle_secs))
stop_hook = _StopAtSecsHook(self._eval_spec.throttle_secs)
train_hooks = list(self._train_spec.hooks) + [stop_hook]
logging.info('Start train and evaluate loop. The evaluate will happen '
'after {} secs (eval_spec.throttle_secs) or training is '
'finished.'.format(self._eval_spec.throttle_secs))
evaluator = _TrainingExecutor._Evaluator(self._estimator, self._eval_spec,
self._train_spec.max_steps)
while True:
self._estimator.train(
input_fn=self._train_spec.input_fn,
max_steps=self._train_spec.max_steps,
hooks=train_hooks)
# Final export signal: For any eval result with global_step >= train
# max_steps, the evaluator will send the final export signal. The
# _should_stop_local_train will then end the while True as the stopping
# condition is satisfied (both checks use the same global_step value,
# i.e., no race condition)
metrics = evaluator.evaluate_and_export()
if not metrics:
# This is unexpected. Training should always end with a new checkpoint.
raise RuntimeError('There was no new checkpoint after the training.')
if _should_stop_local_train(metrics[ops.GraphKeys.GLOBAL_STEP]):
break
def _start_std_server(self, config):
"""Creates, starts, and returns a server_lib.Server."""
if (not config.cluster_spec or not config.task_type or not config.master or
config.task_id is None):
raise RuntimeError('Could not start server; be sure to specify '
'cluster_spec, task_type, master, and task in '
'RunConfig or set the TF_CONFIG environment variable.')
server = server_lib.Server(
config.cluster_spec,
job_name=config.task_type,
task_index=config.task_id,
config=config_pb2.ConfigProto(log_device_placement=False),
start=False)
server.start()
return server
def _start_distributed_training(self, saving_listeners=None):
"""Calls `Estimator` train in a distributed setting."""
config = self._estimator.config
# Start in-process TensorFlow server if needed. It's important to start the
# server before we (optionally) sleep. Otherwise, the servers will wait to
# connect to each other before starting to train.
if not _is_google_env():
self._start_std_server(config)
# Delay worker to start. For asynchronous training, this usually helps model
# to converge faster. Chief starts the training immediately, so, worker
# with task id x (0-based) should wait (x+1) * _DELAY_SECS_PER_WORKER.
start_delay_secs = 0
if config.task_type == run_config_lib.TaskType.WORKER:
# TODO(xiejw): Replace the hard code logic (task_id + 1) with unique id in
# training cluster.
start_delay_secs = min(_MAX_DELAY_SECS,
(config.task_id + 1) * _DELAY_SECS_PER_WORKER)
if start_delay_secs > 0:
logging.info('Waiting %d secs before starting training.',
start_delay_secs)
time.sleep(start_delay_secs)
self._estimator.train(input_fn=self._train_spec.input_fn,
max_steps=self._train_spec.max_steps,
hooks=self._train_spec.hooks,
saving_listeners=saving_listeners)
def _start_continuous_evaluation(self):
"""Repeatedly calls `Estimator` evaluate and export until training ends."""
start_delay_secs = self._eval_spec.start_delay_secs
if start_delay_secs:
logging.info('Waiting %f secs before starting eval.', start_delay_secs)
time.sleep(start_delay_secs)
latest_eval_result = None
evaluator = _TrainingExecutor._Evaluator(self._estimator, self._eval_spec,
self._train_spec.max_steps)
while True:
if latest_eval_result:
global_step = latest_eval_result.get(ops.GraphKeys.GLOBAL_STEP)
if (global_step and self._train_spec.max_steps and
global_step >= self._train_spec.max_steps):
logging.info(
'Exiting evaluation, global_step=%s >= train max_steps=%s',
global_step,
self._train_spec.max_steps)
return
# Final export signal: For any eval result with global_step >= train
# max_steps, the evaluator will send the final export signal. The next
# iteration of while loop will end the continuous eval as the stopping
# condition is satisfied (both checks use the same global_step value,
# i.e., no race condition)
start = time.time()
latest_eval_result = evaluator.evaluate_and_export()
# Throttle if necessary.
elapsed_time = time.time() - start
difference = self._eval_spec.throttle_secs - elapsed_time
if difference > 0:
logging.info('Waiting %f secs before starting next eval run.',
difference)
time.sleep(difference)
class _Evaluator(object):
"""A helper class to call `Estimator.evaluate` and export model."""
def __init__(self, estimator, eval_spec, max_training_steps):
self._estimator = estimator
self._eval_spec = eval_spec
self._is_final_export_triggered = False
self._previous_ckpt_path = None
self._last_warning_time = 0
self._max_training_steps = max_training_steps
@property
def is_final_export_triggered(self):
return self._is_final_export_triggered
def evaluate_and_export(self):
"""Evaluate and (maybe) export the current model.
Returns:
Evaluation results. Returns `None` if current round of evaluation is
skipped.
Raises:
RuntimeError: for any unexpected internal error.
TypeError: if evaluation result has wrong type.
"""
latest_ckpt_path = self._estimator.latest_checkpoint()
if not latest_ckpt_path:
self._log_err_msg('Estimator is not trained yet. Will start an '
'evaluation when a checkpoint is ready.')
return None
if latest_ckpt_path == self._previous_ckpt_path:
self._log_err_msg(
'No new checkpoint ready for evaluation. Skip the current '
'evaluation pass as evaluation results are expected to be same '
'for the same checkpoint.')
return None
eval_result = self._estimator.evaluate(
input_fn=self._eval_spec.input_fn,
steps=self._eval_spec.steps,
name=self._eval_spec.name,
checkpoint_path=latest_ckpt_path,
hooks=self._eval_spec.hooks)
if not eval_result:
raise RuntimeError(
'Internal error: `Estimator.evaluate` should never return empty '
'result.')
if not isinstance(eval_result, dict):
raise TypeError(
'`Estimator.evaluate` should return dict. Given {}.'.format(
type(eval_result)))
if ops.GraphKeys.GLOBAL_STEP not in eval_result:
raise RuntimeError(
'Internal error: `Estimator.evaluate` result should have '
'`global_step` in result. Given {}'.format(eval_result))
is_the_final_export = (eval_result[ops.GraphKeys.GLOBAL_STEP] >=
self._max_training_steps
if self._max_training_steps else False)
self._export_eval_result(eval_result, latest_ckpt_path,
is_the_final_export)
if is_the_final_export:
logging.debug('Calling exporter with the `is_the_final_export=True`.')
self._is_final_export_triggered = True
self._last_warning_time = 0
self._previous_ckpt_path = latest_ckpt_path
return eval_result
def _log_err_msg(self, message):
"""Prints warning `message` every 10 mins."""
current_time = time.time()
if current_time - self._last_warning_time > 600:
logging.warning(message)
self._last_warning_time = current_time
def _export_eval_result(self, eval_result, checkpoint_path,
is_the_final_export):
"""Export `eval_result` according to exporters in `EvalSpec`."""
export_dir_base = os.path.join(
compat.as_str_any(self._estimator.model_dir),
compat.as_str_any('export'))
for exporter in self._eval_spec.exporters:
exporter.export(
estimator=self._estimator,
export_path=os.path.join(
compat.as_str_any(export_dir_base),
compat.as_str_any(exporter.name)),
checkpoint_path=checkpoint_path,
eval_result=eval_result,
is_the_final_export=is_the_final_export)
| mit |
s20121035/rk3288_android5.1_repo | external/chromium_org/native_client_sdk/src/tools/tests/fix_deps_test.py | 104 | 2807 | #!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
import sys
import tempfile
import unittest
SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__))
PARENT_DIR = os.path.dirname(SCRIPT_DIR)
DATA_DIR = os.path.join(SCRIPT_DIR, 'data')
CHROME_SRC = os.path.dirname(os.path.dirname(os.path.dirname(PARENT_DIR)))
MOCK_DIR = os.path.join(CHROME_SRC, "third_party", "pymock")
# For the mock library
sys.path.append(MOCK_DIR)
sys.path.append(PARENT_DIR)
import fix_deps
import mock
class TestFixDeps(unittest.TestCase):
def setUp(self):
self.tempfile = None
def tearDown(self):
if self.tempfile:
os.remove(self.tempfile)
def testRequiresFile(self):
with mock.patch('sys.stderr'):
self.assertRaises(SystemExit, fix_deps.main, [])
def testInvalidOption(self):
with mock.patch('sys.stderr'):
self.assertRaises(SystemExit, fix_deps.main, ['--foo', 'bar'])
def testMissingFile(self):
with mock.patch('sys.stderr'):
self.assertRaises(fix_deps.Error, fix_deps.main, ['nonexistent.file'])
def testAddsDeps(self):
self.tempfile = tempfile.mktemp("_sdktest")
with open(self.tempfile, 'w') as out:
out.write('foo.o: foo.c foo.h bar.h\n')
fix_deps.FixupDepFile(self.tempfile)
with open(self.tempfile) as infile:
contents = infile.read()
lines = contents.splitlines()
self.assertEqual(len(lines), 5)
self.assertTrue('foo.c:' in lines)
self.assertTrue('foo.h:' in lines)
self.assertTrue('bar.h:' in lines)
def testSpacesInFilenames(self):
self.tempfile = tempfile.mktemp("_sdktest")
with open(self.tempfile, 'w') as out:
out.write('foo.o: foo\\ bar.h\n')
fix_deps.FixupDepFile(self.tempfile)
with open(self.tempfile) as infile:
contents = infile.read()
lines = contents.splitlines()
self.assertEqual(len(lines), 3)
self.assertEqual(lines[2], 'foo\\ bar.h:')
def testColonInFilename(self):
self.tempfile = tempfile.mktemp("_sdktest")
with open(self.tempfile, 'w') as out:
out.write('foo.o: c:foo.c\\\n c:bar.h\n')
fix_deps.FixupDepFile(self.tempfile)
with open(self.tempfile) as infile:
contents = infile.read()
lines = contents.splitlines()
self.assertEqual(len(lines), 5)
self.assertEqual(lines[3], 'c:foo.c:')
self.assertEqual(lines[4], 'c:bar.h:')
def testDoubleInvoke(self):
self.tempfile = tempfile.mktemp("_sdktest")
with open(self.tempfile, 'w') as out:
out.write('foo.o: foo\\ bar.h\n')
fix_deps.FixupDepFile(self.tempfile)
self.assertRaises(fix_deps.Error, fix_deps.FixupDepFile, self.tempfile)
if __name__ == '__main__':
unittest.main()
| gpl-3.0 |
CoderBotOrg/coderbotsrv | server/lib/cryptography/hazmat/bindings/commoncrypto/common_digest.py | 15 | 1598 | # This file is dual licensed under the terms of the Apache License, Version
# 2.0, and the BSD License. See the LICENSE file in the root of this repository
# for complete details.
from __future__ import absolute_import, division, print_function
INCLUDES = """
#include <CommonCrypto/CommonDigest.h>
"""
TYPES = """
typedef uint32_t CC_LONG;
typedef uint64_t CC_LONG64;
typedef struct CC_MD5state_st {
...;
} CC_MD5_CTX;
typedef struct CC_SHA1state_st {
...;
} CC_SHA1_CTX;
typedef struct CC_SHA256state_st {
...;
} CC_SHA256_CTX;
typedef struct CC_SHA512state_st {
...;
} CC_SHA512_CTX;
"""
FUNCTIONS = """
int CC_MD5_Init(CC_MD5_CTX *);
int CC_MD5_Update(CC_MD5_CTX *, const void *, CC_LONG);
int CC_MD5_Final(unsigned char *, CC_MD5_CTX *);
int CC_SHA1_Init(CC_SHA1_CTX *);
int CC_SHA1_Update(CC_SHA1_CTX *, const void *, CC_LONG);
int CC_SHA1_Final(unsigned char *, CC_SHA1_CTX *);
int CC_SHA224_Init(CC_SHA256_CTX *);
int CC_SHA224_Update(CC_SHA256_CTX *, const void *, CC_LONG);
int CC_SHA224_Final(unsigned char *, CC_SHA256_CTX *);
int CC_SHA256_Init(CC_SHA256_CTX *);
int CC_SHA256_Update(CC_SHA256_CTX *, const void *, CC_LONG);
int CC_SHA256_Final(unsigned char *, CC_SHA256_CTX *);
int CC_SHA384_Init(CC_SHA512_CTX *);
int CC_SHA384_Update(CC_SHA512_CTX *, const void *, CC_LONG);
int CC_SHA384_Final(unsigned char *, CC_SHA512_CTX *);
int CC_SHA512_Init(CC_SHA512_CTX *);
int CC_SHA512_Update(CC_SHA512_CTX *, const void *, CC_LONG);
int CC_SHA512_Final(unsigned char *, CC_SHA512_CTX *);
"""
MACROS = """
"""
CUSTOMIZATIONS = """
"""
CONDITIONAL_NAMES = {}
| gpl-3.0 |
0k/OpenUpgrade | addons/account/project/wizard/account_analytic_inverted_balance_report.py | 378 | 2045 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import time
from openerp.osv import fields, osv
class account_analytic_inverted_balance(osv.osv_memory):
_name = 'account.analytic.inverted.balance'
_description = 'Account Analytic Inverted Balance'
_columns = {
'date1': fields.date('Start of period', required=True),
'date2': fields.date('End of period', required=True),
}
_defaults = {
'date1': lambda *a: time.strftime('%Y-01-01'),
'date2': lambda *a: time.strftime('%Y-%m-%d')
}
def check_report(self, cr, uid, ids, context=None):
if context is None:
context = {}
data = self.read(cr, uid, ids)[0]
datas = {
'ids': context.get('active_ids', []),
'model': 'account.analytic.account',
'form': data
}
datas['form']['active_ids'] = context.get('active_ids', False)
return self.pool['report'].get_action(cr, uid, [], 'account.report_invertedanalyticbalance', data=datas, context=context)
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
ianyh/heroku-buildpack-python-opencv | vendor/pip-1.5.4/pip/_vendor/requests/compat.py | 571 | 2556 | # -*- coding: utf-8 -*-
"""
pythoncompat
"""
from .packages import chardet
import sys
# -------
# Pythons
# -------
# Syntax sugar.
_ver = sys.version_info
#: Python 2.x?
is_py2 = (_ver[0] == 2)
#: Python 3.x?
is_py3 = (_ver[0] == 3)
#: Python 3.0.x
is_py30 = (is_py3 and _ver[1] == 0)
#: Python 3.1.x
is_py31 = (is_py3 and _ver[1] == 1)
#: Python 3.2.x
is_py32 = (is_py3 and _ver[1] == 2)
#: Python 3.3.x
is_py33 = (is_py3 and _ver[1] == 3)
#: Python 3.4.x
is_py34 = (is_py3 and _ver[1] == 4)
#: Python 2.7.x
is_py27 = (is_py2 and _ver[1] == 7)
#: Python 2.6.x
is_py26 = (is_py2 and _ver[1] == 6)
#: Python 2.5.x
is_py25 = (is_py2 and _ver[1] == 5)
#: Python 2.4.x
is_py24 = (is_py2 and _ver[1] == 4) # I'm assuming this is not by choice.
# ---------
# Platforms
# ---------
# Syntax sugar.
_ver = sys.version.lower()
is_pypy = ('pypy' in _ver)
is_jython = ('jython' in _ver)
is_ironpython = ('iron' in _ver)
# Assume CPython, if nothing else.
is_cpython = not any((is_pypy, is_jython, is_ironpython))
# Windows-based system.
is_windows = 'win32' in str(sys.platform).lower()
# Standard Linux 2+ system.
is_linux = ('linux' in str(sys.platform).lower())
is_osx = ('darwin' in str(sys.platform).lower())
is_hpux = ('hpux' in str(sys.platform).lower()) # Complete guess.
is_solaris = ('solar==' in str(sys.platform).lower()) # Complete guess.
try:
import simplejson as json
except ImportError:
import json
# ---------
# Specifics
# ---------
if is_py2:
from urllib import quote, unquote, quote_plus, unquote_plus, urlencode, getproxies, proxy_bypass
from urlparse import urlparse, urlunparse, urljoin, urlsplit, urldefrag
from urllib2 import parse_http_list
import cookielib
from Cookie import Morsel
from StringIO import StringIO
from .packages.urllib3.packages.ordered_dict import OrderedDict
from httplib import IncompleteRead
builtin_str = str
bytes = str
str = unicode
basestring = basestring
numeric_types = (int, long, float)
elif is_py3:
from urllib.parse import urlparse, urlunparse, urljoin, urlsplit, urlencode, quote, unquote, quote_plus, unquote_plus, urldefrag
from urllib.request import parse_http_list, getproxies, proxy_bypass
from http import cookiejar as cookielib
from http.cookies import Morsel
from io import StringIO
from collections import OrderedDict
from http.client import IncompleteRead
builtin_str = str
str = str
bytes = bytes
basestring = (str, bytes)
numeric_types = (int, float)
| mit |
kgilmo/penning_artiq | artiq/frontend/artiq_client.py | 1 | 10078 | #!/usr/bin/env python3
import argparse
import logging
import time
import asyncio
import sys
from operator import itemgetter
from dateutil.parser import parse as parse_date
from prettytable import PrettyTable
from artiq.protocols.pc_rpc import Client
from artiq.protocols.sync_struct import Subscriber
from artiq.protocols import pyon
from artiq.tools import short_format
def clear_screen():
sys.stdout.write("\x1b[2J\x1b[H")
def get_argparser():
parser = argparse.ArgumentParser(description="ARTIQ CLI client")
parser.add_argument(
"-s", "--server", default="::1",
help="hostname or IP of the master to connect to")
parser.add_argument(
"--port", default=None, type=int,
help="TCP port to use to connect to the master")
subparsers = parser.add_subparsers(dest="action")
subparsers.required = True
parser_add = subparsers.add_parser("submit", help="submit an experiment")
parser_add.add_argument("-p", "--pipeline", default="main", type=str,
help="pipeline to run the experiment in "
"(default: %(default)s)")
parser_add.add_argument("-P", "--priority", default=0, type=int,
help="priority (higher value means sooner "
"scheduling, default: %(default)s)")
parser_add.add_argument("-t", "--timed", default=None, type=str,
help="set a due date for the experiment")
parser_add.add_argument("-f", "--flush", default=False, action="store_true",
help="flush the pipeline before preparing "
"the experiment")
parser_add.add_argument("-R", "--repository", default=False,
action="store_true",
help="use the experiment repository")
parser_add.add_argument("-r", "--revision", default=None,
help="use a specific repository revision "
"(defaults to head, ignored without -R)")
parser_add.add_argument("-c", "--class-name", default=None,
help="name of the class to run")
parser_add.add_argument("-v", "--verbose", default=0, action="count",
help="increase logging level of the experiment")
parser_add.add_argument("-q", "--quiet", default=0, action="count",
help="decrease logging level of the experiment")
parser_add.add_argument("file",
help="file containing the experiment to run")
parser_add.add_argument("arguments", nargs="*",
help="run arguments")
parser_delete = subparsers.add_parser("delete",
help="delete an experiment "
"from the schedule")
parser_delete.add_argument("-g", action="store_true",
help="request graceful termination")
parser_delete.add_argument("rid", type=int,
help="run identifier (RID)")
parser_set_dataset = subparsers.add_parser(
"set-dataset", help="add or modify a dataset")
parser_set_dataset.add_argument("name", help="name of the dataset")
parser_set_dataset.add_argument("value",
help="value in PYON format")
parser_set_dataset.add_argument("-p", "--persist", action="store_true",
help="make the dataset persistent")
parser_del_dataset = subparsers.add_parser(
"del-dataset", help="delete a dataset")
parser_del_dataset.add_argument("name", help="name of the dataset")
parser_show = subparsers.add_parser(
"show", help="show schedule, log, devices or datasets")
parser_show.add_argument(
"what",
help="select object to show: schedule/log/devices/datasets")
subparsers.add_parser(
"scan-devices", help="trigger a device database (re)scan")
parser_scan_repos = subparsers.add_parser(
"scan-repository", help="trigger a repository (re)scan")
parser_scan_repos.add_argument("revision", default=None, nargs="?",
help="use a specific repository revision "
"(defaults to head)")
return parser
def _parse_arguments(arguments):
d = {}
for argument in arguments:
name, value = argument.split("=")
d[name] = pyon.decode(value)
return d
def _action_submit(remote, args):
try:
arguments = _parse_arguments(args.arguments)
except:
print("Failed to parse run arguments")
sys.exit(1)
expid = {
"log_level": logging.WARNING + args.quiet*10 - args.verbose*10,
"file": args.file,
"class_name": args.class_name,
"arguments": arguments,
}
if args.repository:
expid["repo_rev"] = args.revision
if args.timed is None:
due_date = None
else:
due_date = time.mktime(parse_date(args.timed).timetuple())
rid = remote.submit(args.pipeline, expid,
args.priority, due_date, args.flush)
print("RID: {}".format(rid))
def _action_delete(remote, args):
if args.g:
remote.request_termination(args.rid)
else:
remote.delete(args.rid)
def _action_set_dataset(remote, args):
remote.set(args.name, pyon.decode(args.value), args.persist)
def _action_del_dataset(remote, args):
remote.delete(args.name)
def _action_scan_devices(remote, args):
remote.scan()
def _action_scan_repository(remote, args):
remote.scan_async(args.revision)
def _show_schedule(schedule):
clear_screen()
if schedule:
l = sorted(schedule.items(),
key=lambda x: (-x[1]["priority"],
x[1]["due_date"] or 0,
x[0]))
table = PrettyTable(["RID", "Pipeline", " Status ", "Prio",
"Due date", "Revision", "File", "Class name"])
for rid, v in l:
row = [rid, v["pipeline"], v["status"], v["priority"]]
if v["due_date"] is None:
row.append("")
else:
row.append(time.strftime("%m/%d %H:%M:%S",
time.localtime(v["due_date"])))
expid = v["expid"]
if "repo_rev" in expid:
row.append(expid["repo_rev"])
else:
row.append("Outside repo.")
row.append(expid["file"])
if expid["class_name"] is None:
row.append("")
else:
row.append(expid["class_name"])
table.add_row(row)
print(table)
else:
print("Schedule is empty")
def _show_devices(devices):
clear_screen()
table = PrettyTable(["Name", "Description"])
table.align["Description"] = "l"
for k, v in sorted(devices.items(), key=itemgetter(0)):
table.add_row([k, pyon.encode(v, True)])
print(table)
def _show_datasets(datasets):
clear_screen()
table = PrettyTable(["Dataset", "Persistent", "Value"])
for k, (persist, value) in sorted(datasets.items(), key=itemgetter(0)):
table.add_row([k, "Y" if persist else "N", short_format(value)])
print(table)
def _run_subscriber(host, port, subscriber):
if port is None:
port = 3250
loop = asyncio.get_event_loop()
try:
loop.run_until_complete(subscriber.connect(host, port))
try:
loop.run_until_complete(asyncio.wait_for(subscriber.receive_task,
None))
print("Connection to master lost")
finally:
loop.run_until_complete(subscriber.close())
finally:
loop.close()
def _show_dict(args, notifier_name, display_fun):
d = dict()
def init_d(x):
d.clear()
d.update(x)
return d
subscriber = Subscriber(notifier_name, init_d,
lambda mod: display_fun(d))
_run_subscriber(args.server, args.port, subscriber)
def _print_log_record(record):
level, source, t, message = record
t = time.strftime("%m/%d %H:%M:%S", time.localtime(t))
print(level, source, t, message)
class _LogPrinter:
def __init__(self, init):
for record in init:
_print_log_record(record)
def append(self, record):
_print_log_record(record)
def insert(self, i, record):
_print_log_record(record)
def pop(self, i=-1):
pass
def __delitem__(self, x):
pass
def __setitem__(self, k, v):
pass
def _show_log(args):
subscriber = Subscriber("log", _LogPrinter)
_run_subscriber(args.server, args.port, subscriber)
def main():
args = get_argparser().parse_args()
action = args.action.replace("-", "_")
if action == "show":
if args.what == "schedule":
_show_dict(args, "schedule", _show_schedule)
elif args.what == "log":
_show_log(args)
elif args.what == "devices":
_show_dict(args, "devices", _show_devices)
elif args.what == "datasets":
_show_dict(args, "datasets", _show_datasets)
else:
print("Unknown object to show, use -h to list valid names.")
sys.exit(1)
else:
port = 3251 if args.port is None else args.port
target_name = {
"submit": "master_schedule",
"delete": "master_schedule",
"set_dataset": "master_dataset_db",
"del_dataset": "master_dataset_db",
"scan_devices": "master_device_db",
"scan_repository": "master_repository"
}[action]
remote = Client(args.server, port, target_name)
try:
globals()["_action_" + action](remote, args)
finally:
remote.close_rpc()
if __name__ == "__main__":
main()
| gpl-3.0 |
nicky-ji/edx-nicky | common/djangoapps/track/migrations/0002_auto__add_field_trackinglog_host__chg_field_trackinglog_event_type__ch.py | 189 | 2486 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'TrackingLog.host'
db.add_column('track_trackinglog', 'host',
self.gf('django.db.models.fields.CharField')(default='', max_length=64, blank=True),
keep_default=False)
# Changing field 'TrackingLog.event_type'
db.alter_column('track_trackinglog', 'event_type', self.gf('django.db.models.fields.CharField')(max_length=512))
# Changing field 'TrackingLog.page'
db.alter_column('track_trackinglog', 'page', self.gf('django.db.models.fields.CharField')(max_length=512, null=True))
def backwards(self, orm):
# Deleting field 'TrackingLog.host'
db.delete_column('track_trackinglog', 'host')
# Changing field 'TrackingLog.event_type'
db.alter_column('track_trackinglog', 'event_type', self.gf('django.db.models.fields.CharField')(max_length=32))
# Changing field 'TrackingLog.page'
db.alter_column('track_trackinglog', 'page', self.gf('django.db.models.fields.CharField')(max_length=32, null=True))
models = {
'track.trackinglog': {
'Meta': {'object_name': 'TrackingLog'},
'agent': ('django.db.models.fields.CharField', [], {'max_length': '256', 'blank': 'True'}),
'dtcreated': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'event': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'event_source': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'event_type': ('django.db.models.fields.CharField', [], {'max_length': '512', 'blank': 'True'}),
'host': ('django.db.models.fields.CharField', [], {'max_length': '64', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ip': ('django.db.models.fields.CharField', [], {'max_length': '32', 'blank': 'True'}),
'page': ('django.db.models.fields.CharField', [], {'max_length': '512', 'null': 'True', 'blank': 'True'}),
'time': ('django.db.models.fields.DateTimeField', [], {}),
'username': ('django.db.models.fields.CharField', [], {'max_length': '32', 'blank': 'True'})
}
}
complete_apps = ['track']
| agpl-3.0 |
luca76/QGIS | python/plugins/processing/script/scripts/Chainage.py | 2 | 1141 | ##[Example scripts]=group
##lines=vector
##distance=number 1
##startpoint=number 0
##endpoint=number 0
##output=output vector
from qgis.core import *
from PyQt4.QtCore import *
from processing.core.VectorWriter import VectorWriter
def create_points(feat):
geom = feat.geometry()
length = geom.length()
currentdistance = 0
if endpoint > 0:
length = endpoint
out = QgsFeature()
while startpoint + currentdistance <= length:
point = geom.interpolate(startpoint + currentdistance)
currentdistance = currentdistance + distance
out.setGeometry(point)
attrs = feat.attributes()
attrs.append(currentdistance)
out.setAttributes(attrs)
writer.addFeature(out)
layer = processing.getObject(lines)
fields = layer.dataProvider().fields()
fields.append(QgsField('Distance', QVariant.Double))
writer = VectorWriter(output, None, fields, QGis.WKBPoint,
layer.crs())
feats = processing.features(layer)
nFeat = len(feats)
for i, feat in enumerate(feats):
progress.setPercentage(int(100 * i / nFeat))
create_points(feat)
del writer
| gpl-2.0 |
cheehieu/bitcoin | qa/rpc-tests/test_framework/netutil.py | 328 | 4562 | #!/usr/bin/env python2
# Copyright (c) 2014 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
# Linux network utilities
import sys
import socket
import fcntl
import struct
import array
import os
import binascii
# Roughly based on http://voorloopnul.com/blog/a-python-netstat-in-less-than-100-lines-of-code/ by Ricardo Pascal
STATE_ESTABLISHED = '01'
STATE_SYN_SENT = '02'
STATE_SYN_RECV = '03'
STATE_FIN_WAIT1 = '04'
STATE_FIN_WAIT2 = '05'
STATE_TIME_WAIT = '06'
STATE_CLOSE = '07'
STATE_CLOSE_WAIT = '08'
STATE_LAST_ACK = '09'
STATE_LISTEN = '0A'
STATE_CLOSING = '0B'
def get_socket_inodes(pid):
'''
Get list of socket inodes for process pid.
'''
base = '/proc/%i/fd' % pid
inodes = []
for item in os.listdir(base):
target = os.readlink(os.path.join(base, item))
if target.startswith('socket:'):
inodes.append(int(target[8:-1]))
return inodes
def _remove_empty(array):
return [x for x in array if x !='']
def _convert_ip_port(array):
host,port = array.split(':')
# convert host from mangled-per-four-bytes form as used by kernel
host = binascii.unhexlify(host)
host_out = ''
for x in range(0, len(host)/4):
(val,) = struct.unpack('=I', host[x*4:(x+1)*4])
host_out += '%08x' % val
return host_out,int(port,16)
def netstat(typ='tcp'):
'''
Function to return a list with status of tcp connections at linux systems
To get pid of all network process running on system, you must run this script
as superuser
'''
with open('/proc/net/'+typ,'r') as f:
content = f.readlines()
content.pop(0)
result = []
for line in content:
line_array = _remove_empty(line.split(' ')) # Split lines and remove empty spaces.
tcp_id = line_array[0]
l_addr = _convert_ip_port(line_array[1])
r_addr = _convert_ip_port(line_array[2])
state = line_array[3]
inode = int(line_array[9]) # Need the inode to match with process pid.
nline = [tcp_id, l_addr, r_addr, state, inode]
result.append(nline)
return result
def get_bind_addrs(pid):
'''
Get bind addresses as (host,port) tuples for process pid.
'''
inodes = get_socket_inodes(pid)
bind_addrs = []
for conn in netstat('tcp') + netstat('tcp6'):
if conn[3] == STATE_LISTEN and conn[4] in inodes:
bind_addrs.append(conn[1])
return bind_addrs
# from: http://code.activestate.com/recipes/439093/
def all_interfaces():
'''
Return all interfaces that are up
'''
is_64bits = sys.maxsize > 2**32
struct_size = 40 if is_64bits else 32
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
max_possible = 8 # initial value
while True:
bytes = max_possible * struct_size
names = array.array('B', '\0' * bytes)
outbytes = struct.unpack('iL', fcntl.ioctl(
s.fileno(),
0x8912, # SIOCGIFCONF
struct.pack('iL', bytes, names.buffer_info()[0])
))[0]
if outbytes == bytes:
max_possible *= 2
else:
break
namestr = names.tostring()
return [(namestr[i:i+16].split('\0', 1)[0],
socket.inet_ntoa(namestr[i+20:i+24]))
for i in range(0, outbytes, struct_size)]
def addr_to_hex(addr):
'''
Convert string IPv4 or IPv6 address to binary address as returned by
get_bind_addrs.
Very naive implementation that certainly doesn't work for all IPv6 variants.
'''
if '.' in addr: # IPv4
addr = [int(x) for x in addr.split('.')]
elif ':' in addr: # IPv6
sub = [[], []] # prefix, suffix
x = 0
addr = addr.split(':')
for i,comp in enumerate(addr):
if comp == '':
if i == 0 or i == (len(addr)-1): # skip empty component at beginning or end
continue
x += 1 # :: skips to suffix
assert(x < 2)
else: # two bytes per component
val = int(comp, 16)
sub[x].append(val >> 8)
sub[x].append(val & 0xff)
nullbytes = 16 - len(sub[0]) - len(sub[1])
assert((x == 0 and nullbytes == 0) or (x == 1 and nullbytes > 0))
addr = sub[0] + ([0] * nullbytes) + sub[1]
else:
raise ValueError('Could not parse address %s' % addr)
return binascii.hexlify(bytearray(addr))
| mit |
simongoffin/my_odoo_tutorial | openerp/workflow/__init__.py | 378 | 3793 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2014 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.workflow.service import WorkflowService
# The new API is in openerp.workflow.workflow_service
# OLD API of the Workflow
def clear_cache(cr, uid):
WorkflowService.clear_cache(cr.dbname)
def trg_write(uid, res_type, res_id, cr):
"""
Reevaluates the specified workflow instance. Thus if any condition for
a transition have been changed in the backend, then running ``trg_write``
will move the workflow over that transition.
:param res_type: the model name
:param res_id: the model instance id the workflow belongs to
:param cr: a database cursor
"""
return WorkflowService.new(cr, uid, res_type, res_id).write()
def trg_trigger(uid, res_type, res_id, cr):
"""
Activate a trigger.
If a workflow instance is waiting for a trigger from another model, then this
trigger can be activated if its conditions are met.
:param res_type: the model name
:param res_id: the model instance id the workflow belongs to
:param cr: a database cursor
"""
return WorkflowService.new(cr, uid, res_type, res_id).trigger()
def trg_delete(uid, res_type, res_id, cr):
"""
Delete a workflow instance
:param res_type: the model name
:param res_id: the model instance id the workflow belongs to
:param cr: a database cursor
"""
return WorkflowService.new(cr, uid, res_type, res_id).delete()
def trg_create(uid, res_type, res_id, cr):
"""
Create a new workflow instance
:param res_type: the model name
:param res_id: the model instance id to own the created worfklow instance
:param cr: a database cursor
"""
return WorkflowService.new(cr, uid, res_type, res_id).create()
def trg_validate(uid, res_type, res_id, signal, cr):
"""
Fire a signal on a given workflow instance
:param res_type: the model name
:param res_id: the model instance id the workflow belongs to
:signal: the signal name to be fired
:param cr: a database cursor
"""
assert isinstance(signal, basestring)
return WorkflowService.new(cr, uid, res_type, res_id).validate(signal)
def trg_redirect(uid, res_type, res_id, new_rid, cr):
"""
Re-bind a workflow instance to another instance of the same model.
Make all workitems which are waiting for a (subflow) workflow instance
for the old resource point to the (first active) workflow instance for
the new resource.
:param res_type: the model name
:param res_id: the model instance id the workflow belongs to
:param new_rid: the model instance id to own the worfklow instance
:param cr: a database cursor
"""
assert isinstance(new_rid, (long, int))
return WorkflowService.new(cr, uid, res_type, res_id).redirect(new_rid)
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
m2ware/ControlSocket | src/tinyxml/setversion.py | 33 | 2478 | # Python program to set the version.
##############################################
import re
import sys
def fileProcess( name, lineFunction ):
filestream = open( name, 'r' )
if filestream.closed:
print( "file " + name + " not open." )
return
output = ""
print( "--- Processing " + name + " ---------" )
while 1:
line = filestream.readline()
if not line: break
output += lineFunction( line )
filestream.close()
if not output: return # basic error checking
print( "Writing file " + name )
filestream = open( name, "w" );
filestream.write( output );
filestream.close()
def echoInput( line ):
return line
major = input( "Major: " )
minor = input( "Minor: " )
build = input( "Build: " )
print "Setting dox,tinyxml2.h"
print "Version: " + `major` + "." + `minor` + "." + `build`
#### Write the tinyxml.h ####
def engineRule( line ):
matchMajor = "static const int TIXML2_MAJOR_VERSION"
matchMinor = "static const int TIXML2_MINOR_VERSION"
matchBuild = "static const int TIXML2_PATCH_VERSION"
if line[0:len(matchMajor)] == matchMajor:
print "1)tinyxml2.h Major found"
return matchMajor + " = " + `major` + ";\n"
elif line[0:len(matchMinor)] == matchMinor:
print "2)tinyxml2.h Minor found"
return matchMinor + " = " + `minor` + ";\n"
elif line[0:len(matchBuild)] == matchBuild:
print "3)tinyxml2.h Build found"
return matchBuild + " = " + `build` + ";\n"
else:
return line;
fileProcess( "tinyxml2.h", engineRule )
#### Write the dox ####
def doxRule( line ):
match = "PROJECT_NUMBER"
if line[0:len( match )] == match:
print "dox project found"
return "PROJECT_NUMBER = " + `major` + "." + `minor` + "." + `build` + "\n"
else:
return line;
fileProcess( "dox", doxRule )
#### Write the CMakeLists.txt ####
def cmakeRule1( line ):
matchVersion = "set(GENERIC_LIB_VERSION"
if line[0:len(matchVersion)] == matchVersion:
print "1)tinyxml2.h Major found"
return matchVersion + " \"" + `major` + "." + `minor` + "." + `build` + "\")" + "\n"
else:
return line;
fileProcess( "CMakeLists.txt", cmakeRule1 )
def cmakeRule2( line ):
matchSoversion = "set(GENERIC_LIB_SOVERSION"
if line[0:len(matchSoversion)] == matchSoversion:
print "1)tinyxml2.h Major found"
return matchSoversion + " \"" + `major` + "\")" + "\n"
else:
return line;
fileProcess( "CMakeLists.txt", cmakeRule2 )
| mit |
crishoj/got-your-back | googleapiclient/errors.py | 57 | 3622 | # Copyright 2014 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Errors for the library.
All exceptions defined by the library
should be defined in this file.
"""
from __future__ import absolute_import
__author__ = '[email protected] (Joe Gregorio)'
import json
from oauth2client import util
class Error(Exception):
"""Base error for this module."""
pass
class HttpError(Error):
"""HTTP data was invalid or unexpected."""
@util.positional(3)
def __init__(self, resp, content, uri=None):
self.resp = resp
if not isinstance(content, bytes):
raise TypeError("HTTP content should be bytes")
self.content = content
self.uri = uri
def _get_reason(self):
"""Calculate the reason for the error from the response content."""
reason = self.resp.reason
try:
data = json.loads(self.content.decode('utf-8'))
reason = data['error']['message']
except (ValueError, KeyError):
pass
if reason is None:
reason = ''
return reason
def __repr__(self):
if self.uri:
return '<HttpError %s when requesting %s returned "%s">' % (
self.resp.status, self.uri, self._get_reason().strip())
else:
return '<HttpError %s "%s">' % (self.resp.status, self._get_reason())
__str__ = __repr__
class InvalidJsonError(Error):
"""The JSON returned could not be parsed."""
pass
class UnknownFileType(Error):
"""File type unknown or unexpected."""
pass
class UnknownLinkType(Error):
"""Link type unknown or unexpected."""
pass
class UnknownApiNameOrVersion(Error):
"""No API with that name and version exists."""
pass
class UnacceptableMimeTypeError(Error):
"""That is an unacceptable mimetype for this operation."""
pass
class MediaUploadSizeError(Error):
"""Media is larger than the method can accept."""
pass
class ResumableUploadError(HttpError):
"""Error occured during resumable upload."""
pass
class InvalidChunkSizeError(Error):
"""The given chunksize is not valid."""
pass
class InvalidNotificationError(Error):
"""The channel Notification is invalid."""
pass
class BatchError(HttpError):
"""Error occured during batch operations."""
@util.positional(2)
def __init__(self, reason, resp=None, content=None):
self.resp = resp
self.content = content
self.reason = reason
def __repr__(self):
return '<BatchError %s "%s">' % (self.resp.status, self.reason)
__str__ = __repr__
class UnexpectedMethodError(Error):
"""Exception raised by RequestMockBuilder on unexpected calls."""
@util.positional(1)
def __init__(self, methodId=None):
"""Constructor for an UnexpectedMethodError."""
super(UnexpectedMethodError, self).__init__(
'Received unexpected call %s' % methodId)
class UnexpectedBodyError(Error):
"""Exception raised by RequestMockBuilder on unexpected bodies."""
def __init__(self, expected, provided):
"""Constructor for an UnexpectedMethodError."""
super(UnexpectedBodyError, self).__init__(
'Expected: [%s] - Provided: [%s]' % (expected, provided))
| apache-2.0 |
CentroGeo/geonode | geonode/geoapps/api/serializers.py | 2 | 5333 | # -*- coding: utf-8 -*-
#########################################################################
#
# Copyright (C) 2020 OSGeo
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#########################################################################
import json
import logging
from django.contrib.auth import get_user_model
from dynamic_rest.fields.fields import DynamicRelationField
from dynamic_rest.serializers import DynamicModelSerializer
from geonode.base.api.serializers import ResourceBaseSerializer
from geonode.geoapps.models import GeoApp, GeoAppData
from rest_framework.serializers import ValidationError
logger = logging.getLogger(__name__)
class GeoAppDataField(DynamicRelationField):
def value_to_string(self, obj):
value = self.value_from_object(obj)
return self.get_prep_value(value)
class GeoAppDataSerializer(DynamicModelSerializer):
class Meta:
ref_name = 'GeoAppData'
model = GeoAppData
name = 'GeoAppData'
fields = ('pk', 'blob')
def to_internal_value(self, data):
return data
def to_representation(self, value):
data = GeoAppData.objects.filter(resource__id=value).first()
if data and data.blob:
if isinstance(data.blob, dict):
return data.blob
return json.loads(data.blob)
return {}
class GeoAppSerializer(ResourceBaseSerializer):
"""
- Deferred / not Embedded --> ?include[]=data
"""
data = GeoAppDataField(
GeoAppDataSerializer,
source='id',
many=False,
embed=False,
deferred=True)
def __init__(self, *args, **kwargs):
# Instantiate the superclass normally
super(GeoAppSerializer, self).__init__(*args, **kwargs)
class Meta:
model = GeoApp
name = 'geoapp'
fields = (
'pk', 'uuid',
'zoom', 'projection', 'center_x', 'center_y',
'urlsuffix', 'data'
)
def to_internal_value(self, data):
if isinstance(data, str):
data = json.loads(data)
if 'data' in data:
_data = data.pop('data')
if self.is_valid():
data['blob'] = _data
return data
def extra_update_checks(self, validated_data):
_user_profiles = {}
for _key, _value in validated_data.items():
if _key in ('owner', 'poc', 'metadata_owner'):
_user_profiles[_key] = _value
for _key, _value in _user_profiles.items():
validated_data.pop(_key)
_u = get_user_model().objects.filter(username=_value).first()
if _u:
validated_data[_key] = _u
else:
raise ValidationError(f"The specified '{_key}' does not exist!")
def extra_create_checks(self, validated_data):
if 'name' not in validated_data or \
'owner' not in validated_data:
raise ValidationError("No valid data: 'name' and 'owner' are mandatory fields!")
if self.Meta.model.objects.filter(name=validated_data['name']).count():
raise ValidationError("A GeoApp with the same 'name' already exists!")
self.extra_update_checks(validated_data)
def create(self, validated_data):
# perform sanity checks
self.extra_create_checks(validated_data)
# Extract JSON blob
_data = None
if 'blob' in validated_data:
_data = validated_data.pop('blob')
# Create a new instance
_instance = self.Meta.model.objects.create(**validated_data)
if _instance and _data:
try:
_geo_app, _created = GeoAppData.objects.get_or_create(resource=_instance)
_geo_app.blob = _data
_geo_app.save()
except Exception as e:
raise ValidationError(e)
_instance.save()
return _instance
def update(self, instance, validated_data):
# perform sanity checks
self.extra_update_checks(validated_data)
# Extract JSON blob
_data = None
if 'blob' in validated_data:
_data = validated_data.pop('blob')
try:
self.Meta.model.objects.filter(pk=instance.id).update(**validated_data)
instance.refresh_from_db()
except Exception as e:
raise ValidationError(e)
if instance and _data:
try:
_geo_app, _created = GeoAppData.objects.get_or_create(resource=instance)
_geo_app.blob = _data
_geo_app.save()
except Exception as e:
raise ValidationError(e)
instance.save()
return instance
| gpl-3.0 |
mathwuyue/sdn-d2d | tests/unit/module_load_test.py | 46 | 1777 | #!/usr/bin/env python
#
# Copyright 2011-2012 Andreas Wundsam
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" A simple nose based test unit test that discovers all modules in the pox directory and tries to load them """
import sys
from os import path
import os
import unittest
SCRIPT_DIR=path.dirname(path.abspath(__file__))
ROOT=path.abspath(path.join(SCRIPT_DIR,"../.."))
sys.path.append(os.path.dirname(__file__) + "/../..")
packages = {}
modules = []
for root, dirs, files in os.walk(ROOT):
assert root.startswith(ROOT)
root = root[len(ROOT)+1:]
if not root.startswith("pox"): continue
if not path.exists(path.join(root, "__init__.py")):
continue
modules.append(root.replace(path.sep,"."))
files = [f for f in files if f.endswith(".py") and not f.startswith("__init__") and f != "setup.py"]
#print root
for f in files:
packagename = root.replace(path.sep,".")
modules.append( packagename + "." + f[:-3])
def test_load_modules():
# This is a test /generator/. It yields a separate loading test for each module
# Nosetests is required
for module in modules:
yield load_module, module
def load_module(module):
loaded_module = __import__(module)
if __name__ == '__main__':
import nose
nose.main(defaultTest=__name__)
| apache-2.0 |
daoluan/decode-Django | Django-1.5.1/django/contrib/gis/admin/options.py | 112 | 5554 | from django.contrib.admin import ModelAdmin
from django.contrib.gis.admin.widgets import OpenLayersWidget
from django.contrib.gis.gdal import OGRGeomType
from django.contrib.gis.db import models
class GeoModelAdmin(ModelAdmin):
"""
The administration options class for Geographic models. Map settings
may be overloaded from their defaults to create custom maps.
"""
# The default map settings that may be overloaded -- still subject
# to API changes.
default_lon = 0
default_lat = 0
default_zoom = 4
display_wkt = False
display_srid = False
extra_js = []
num_zoom = 18
max_zoom = False
min_zoom = False
units = False
max_resolution = False
max_extent = False
modifiable = True
mouse_position = True
scale_text = True
layerswitcher = True
scrollable = True
map_width = 600
map_height = 400
map_srid = 4326
map_template = 'gis/admin/openlayers.html'
openlayers_url = 'http://openlayers.org/api/2.11/OpenLayers.js'
point_zoom = num_zoom - 6
wms_url = 'http://vmap0.tiles.osgeo.org/wms/vmap0'
wms_layer = 'basic'
wms_name = 'OpenLayers WMS'
wms_options = {'format': 'image/jpeg'}
debug = False
widget = OpenLayersWidget
@property
def media(self):
"Injects OpenLayers JavaScript into the admin."
media = super(GeoModelAdmin, self).media
media.add_js([self.openlayers_url])
media.add_js(self.extra_js)
return media
def formfield_for_dbfield(self, db_field, **kwargs):
"""
Overloaded from ModelAdmin so that an OpenLayersWidget is used
for viewing/editing GeometryFields.
"""
if isinstance(db_field, models.GeometryField):
request = kwargs.pop('request', None)
# Setting the widget with the newly defined widget.
kwargs['widget'] = self.get_map_widget(db_field)
return db_field.formfield(**kwargs)
else:
return super(GeoModelAdmin, self).formfield_for_dbfield(db_field, **kwargs)
def get_map_widget(self, db_field):
"""
Returns a subclass of the OpenLayersWidget (or whatever was specified
in the `widget` attribute) using the settings from the attributes set
in this class.
"""
is_collection = db_field.geom_type in ('MULTIPOINT', 'MULTILINESTRING', 'MULTIPOLYGON', 'GEOMETRYCOLLECTION')
if is_collection:
if db_field.geom_type == 'GEOMETRYCOLLECTION': collection_type = 'Any'
else: collection_type = OGRGeomType(db_field.geom_type.replace('MULTI', ''))
else:
collection_type = 'None'
class OLMap(self.widget):
template = self.map_template
geom_type = db_field.geom_type
wms_options = ''
if self.wms_options:
wms_options = ["%s: '%s'" % pair for pair in self.wms_options.items()]
wms_options = ', %s' % ', '.join(wms_options)
params = {'default_lon' : self.default_lon,
'default_lat' : self.default_lat,
'default_zoom' : self.default_zoom,
'display_wkt' : self.debug or self.display_wkt,
'geom_type' : OGRGeomType(db_field.geom_type),
'field_name' : db_field.name,
'is_collection' : is_collection,
'scrollable' : self.scrollable,
'layerswitcher' : self.layerswitcher,
'collection_type' : collection_type,
'is_linestring' : db_field.geom_type in ('LINESTRING', 'MULTILINESTRING'),
'is_polygon' : db_field.geom_type in ('POLYGON', 'MULTIPOLYGON'),
'is_point' : db_field.geom_type in ('POINT', 'MULTIPOINT'),
'num_zoom' : self.num_zoom,
'max_zoom' : self.max_zoom,
'min_zoom' : self.min_zoom,
'units' : self.units, #likely shoud get from object
'max_resolution' : self.max_resolution,
'max_extent' : self.max_extent,
'modifiable' : self.modifiable,
'mouse_position' : self.mouse_position,
'scale_text' : self.scale_text,
'map_width' : self.map_width,
'map_height' : self.map_height,
'point_zoom' : self.point_zoom,
'srid' : self.map_srid,
'display_srid' : self.display_srid,
'wms_url' : self.wms_url,
'wms_layer' : self.wms_layer,
'wms_name' : self.wms_name,
'wms_options' : wms_options,
'debug' : self.debug,
}
return OLMap
from django.contrib.gis import gdal
if gdal.HAS_GDAL:
# Use the official spherical mercator projection SRID on versions
# of GDAL that support it; otherwise, fallback to 900913.
if gdal.GDAL_VERSION >= (1, 7):
spherical_mercator_srid = 3857
else:
spherical_mercator_srid = 900913
class OSMGeoAdmin(GeoModelAdmin):
map_template = 'gis/admin/osm.html'
num_zoom = 20
map_srid = spherical_mercator_srid
max_extent = '-20037508,-20037508,20037508,20037508'
max_resolution = '156543.0339'
point_zoom = num_zoom - 6
units = 'm'
| gpl-2.0 |
godfather1103/WeiboRobot | python27/1.0/lib/test/test_curses.py | 44 | 9518 | #
# Test script for the curses module
#
# This script doesn't actually display anything very coherent. but it
# does call every method and function.
#
# Functions not tested: {def,reset}_{shell,prog}_mode, getch(), getstr(),
# init_color()
# Only called, not tested: getmouse(), ungetmouse()
#
import sys, tempfile, os
# Optionally test curses module. This currently requires that the
# 'curses' resource be given on the regrtest command line using the -u
# option. If not available, nothing after this line will be executed.
import unittest
from test.test_support import requires, import_module
requires('curses')
curses = import_module('curses')
curses.panel = import_module('curses.panel')
# XXX: if newterm was supported we could use it instead of initscr and not exit
term = os.environ.get('TERM')
if not term or term == 'unknown':
raise unittest.SkipTest, "$TERM=%r, calling initscr() may cause exit" % term
if sys.platform == "cygwin":
raise unittest.SkipTest("cygwin's curses mostly just hangs")
def window_funcs(stdscr):
"Test the methods of windows"
win = curses.newwin(10,10)
win = curses.newwin(5,5, 5,5)
win2 = curses.newwin(15,15, 5,5)
for meth in [stdscr.addch, stdscr.addstr]:
for args in [('a'), ('a', curses.A_BOLD),
(4,4, 'a'), (5,5, 'a', curses.A_BOLD)]:
meth(*args)
for meth in [stdscr.box, stdscr.clear, stdscr.clrtobot,
stdscr.clrtoeol, stdscr.cursyncup, stdscr.delch,
stdscr.deleteln, stdscr.erase, stdscr.getbegyx,
stdscr.getbkgd, stdscr.getkey, stdscr.getmaxyx,
stdscr.getparyx, stdscr.getyx, stdscr.inch,
stdscr.insertln, stdscr.instr, stdscr.is_wintouched,
win.noutrefresh, stdscr.redrawwin, stdscr.refresh,
stdscr.standout, stdscr.standend, stdscr.syncdown,
stdscr.syncup, stdscr.touchwin, stdscr.untouchwin]:
meth()
stdscr.addnstr('1234', 3)
stdscr.addnstr('1234', 3, curses.A_BOLD)
stdscr.addnstr(4,4, '1234', 3)
stdscr.addnstr(5,5, '1234', 3, curses.A_BOLD)
stdscr.attron(curses.A_BOLD)
stdscr.attroff(curses.A_BOLD)
stdscr.attrset(curses.A_BOLD)
stdscr.bkgd(' ')
stdscr.bkgd(' ', curses.A_REVERSE)
stdscr.bkgdset(' ')
stdscr.bkgdset(' ', curses.A_REVERSE)
win.border(65, 66, 67, 68,
69, 70, 71, 72)
win.border('|', '!', '-', '_',
'+', '\\', '#', '/')
try:
win.border(65, 66, 67, 68,
69, [], 71, 72)
except TypeError:
pass
else:
raise RuntimeError, "Expected win.border() to raise TypeError"
stdscr.clearok(1)
win4 = stdscr.derwin(2,2)
win4 = stdscr.derwin(1,1, 5,5)
win4.mvderwin(9,9)
stdscr.echochar('a')
stdscr.echochar('a', curses.A_BOLD)
stdscr.hline('-', 5)
stdscr.hline('-', 5, curses.A_BOLD)
stdscr.hline(1,1,'-', 5)
stdscr.hline(1,1,'-', 5, curses.A_BOLD)
stdscr.idcok(1)
stdscr.idlok(1)
stdscr.immedok(1)
stdscr.insch('c')
stdscr.insdelln(1)
stdscr.insnstr('abc', 3)
stdscr.insnstr('abc', 3, curses.A_BOLD)
stdscr.insnstr(5, 5, 'abc', 3)
stdscr.insnstr(5, 5, 'abc', 3, curses.A_BOLD)
stdscr.insstr('def')
stdscr.insstr('def', curses.A_BOLD)
stdscr.insstr(5, 5, 'def')
stdscr.insstr(5, 5, 'def', curses.A_BOLD)
stdscr.is_linetouched(0)
stdscr.keypad(1)
stdscr.leaveok(1)
stdscr.move(3,3)
win.mvwin(2,2)
stdscr.nodelay(1)
stdscr.notimeout(1)
win2.overlay(win)
win2.overwrite(win)
win2.overlay(win, 1, 2, 3, 3, 2, 1)
win2.overwrite(win, 1, 2, 3, 3, 2, 1)
stdscr.redrawln(1,2)
stdscr.scrollok(1)
stdscr.scroll()
stdscr.scroll(2)
stdscr.scroll(-3)
stdscr.move(12, 2)
stdscr.setscrreg(10,15)
win3 = stdscr.subwin(10,10)
win3 = stdscr.subwin(10,10, 5,5)
stdscr.syncok(1)
stdscr.timeout(5)
stdscr.touchline(5,5)
stdscr.touchline(5,5,0)
stdscr.vline('a', 3)
stdscr.vline('a', 3, curses.A_STANDOUT)
stdscr.chgat(5, 2, 3, curses.A_BLINK)
stdscr.chgat(3, curses.A_BOLD)
stdscr.chgat(5, 8, curses.A_UNDERLINE)
stdscr.chgat(curses.A_BLINK)
stdscr.refresh()
stdscr.vline(1,1, 'a', 3)
stdscr.vline(1,1, 'a', 3, curses.A_STANDOUT)
if hasattr(curses, 'resize'):
stdscr.resize()
if hasattr(curses, 'enclose'):
stdscr.enclose()
def module_funcs(stdscr):
"Test module-level functions"
for func in [curses.baudrate, curses.beep, curses.can_change_color,
curses.cbreak, curses.def_prog_mode, curses.doupdate,
curses.filter, curses.flash, curses.flushinp,
curses.has_colors, curses.has_ic, curses.has_il,
curses.isendwin, curses.killchar, curses.longname,
curses.nocbreak, curses.noecho, curses.nonl,
curses.noqiflush, curses.noraw,
curses.reset_prog_mode, curses.termattrs,
curses.termname, curses.erasechar, curses.getsyx]:
func()
# Functions that actually need arguments
if curses.tigetstr("cnorm"):
curses.curs_set(1)
curses.delay_output(1)
curses.echo() ; curses.echo(1)
f = tempfile.TemporaryFile()
stdscr.putwin(f)
f.seek(0)
curses.getwin(f)
f.close()
curses.halfdelay(1)
curses.intrflush(1)
curses.meta(1)
curses.napms(100)
curses.newpad(50,50)
win = curses.newwin(5,5)
win = curses.newwin(5,5, 1,1)
curses.nl() ; curses.nl(1)
curses.putp('abc')
curses.qiflush()
curses.raw() ; curses.raw(1)
curses.setsyx(5,5)
curses.tigetflag('hc')
curses.tigetnum('co')
curses.tigetstr('cr')
curses.tparm('cr')
curses.typeahead(sys.__stdin__.fileno())
curses.unctrl('a')
curses.ungetch('a')
curses.use_env(1)
# Functions only available on a few platforms
if curses.has_colors():
curses.start_color()
curses.init_pair(2, 1,1)
curses.color_content(1)
curses.color_pair(2)
curses.pair_content(curses.COLOR_PAIRS - 1)
curses.pair_number(0)
if hasattr(curses, 'use_default_colors'):
curses.use_default_colors()
if hasattr(curses, 'keyname'):
curses.keyname(13)
if hasattr(curses, 'has_key'):
curses.has_key(13)
if hasattr(curses, 'getmouse'):
(availmask, oldmask) = curses.mousemask(curses.BUTTON1_PRESSED)
# availmask indicates that mouse stuff not available.
if availmask != 0:
curses.mouseinterval(10)
# just verify these don't cause errors
curses.ungetmouse(0, 0, 0, 0, curses.BUTTON1_PRESSED)
m = curses.getmouse()
if hasattr(curses, 'is_term_resized'):
curses.is_term_resized(*stdscr.getmaxyx())
if hasattr(curses, 'resizeterm'):
curses.resizeterm(*stdscr.getmaxyx())
if hasattr(curses, 'resize_term'):
curses.resize_term(*stdscr.getmaxyx())
def unit_tests():
from curses import ascii
for ch, expected in [('a', 'a'), ('A', 'A'),
(';', ';'), (' ', ' '),
('\x7f', '^?'), ('\n', '^J'), ('\0', '^@'),
# Meta-bit characters
('\x8a', '!^J'), ('\xc1', '!A'),
]:
if ascii.unctrl(ch) != expected:
print 'curses.unctrl fails on character', repr(ch)
def test_userptr_without_set(stdscr):
w = curses.newwin(10, 10)
p = curses.panel.new_panel(w)
# try to access userptr() before calling set_userptr() -- segfaults
try:
p.userptr()
raise RuntimeError, 'userptr should fail since not set'
except curses.panel.error:
pass
def test_userptr_memory_leak(stdscr):
w = curses.newwin(10, 10)
p = curses.panel.new_panel(w)
obj = object()
nrefs = sys.getrefcount(obj)
for i in range(100):
p.set_userptr(obj)
p.set_userptr(None)
if sys.getrefcount(obj) != nrefs:
raise RuntimeError, "set_userptr leaked references"
def test_userptr_segfault(stdscr):
panel = curses.panel.new_panel(stdscr)
class A:
def __del__(self):
panel.set_userptr(None)
panel.set_userptr(A())
panel.set_userptr(None)
def test_resize_term(stdscr):
if hasattr(curses, 'resizeterm'):
lines, cols = curses.LINES, curses.COLS
curses.resizeterm(lines - 1, cols + 1)
if curses.LINES != lines - 1 or curses.COLS != cols + 1:
raise RuntimeError, "Expected resizeterm to update LINES and COLS"
def test_issue6243(stdscr):
curses.ungetch(1025)
stdscr.getkey()
def main(stdscr):
curses.savetty()
try:
module_funcs(stdscr)
window_funcs(stdscr)
test_userptr_without_set(stdscr)
test_userptr_memory_leak(stdscr)
test_userptr_segfault(stdscr)
test_resize_term(stdscr)
test_issue6243(stdscr)
finally:
curses.resetty()
if __name__ == '__main__':
curses.wrapper(main)
unit_tests()
else:
if not sys.__stdout__.isatty():
raise unittest.SkipTest("sys.__stdout__ is not a tty")
# testing setupterm() inside initscr/endwin
# causes terminal breakage
curses.setupterm(fd=sys.__stdout__.fileno())
try:
stdscr = curses.initscr()
main(stdscr)
finally:
curses.endwin()
unit_tests()
| gpl-3.0 |
peterfroehlich/lightblue-0.4 | src/series60/_lightblue.py | 36 | 14171 | # Copyright (c) 2009 Bea Lam. All rights reserved.
#
# This file is part of LightBlue.
#
# LightBlue is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# LightBlue is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with LightBlue. If not, see <http://www.gnu.org/licenses/>.
import socket as _socket
import _lightbluecommon
# public attributes
__all__ = ("finddevices", "findservices", "finddevicename",
"gethostaddr", "gethostclass",
"socket",
"advertise", "stopadvertise",
"selectdevice", "selectservice")
# details of advertised services
__advertised = {}
def finddevices(getnames=True, length=10):
# originally this used DiscoverDevices in _lightblueutil extension, but
# that blocks the UI
import e32
inquiry = _DeviceInquiry()
inquiry.start(getnames, length)
timer = None
try:
while not inquiry.isdone():
# keep waiting
timer = e32.Ao_timer()
timer.after(0.1)
finally:
inquiry.stop()
if timer is not None: timer.cancel()
return inquiry.getfounddevices()
def findservices(addr=None, name=None, servicetype=None):
if servicetype is None:
funcs = (_socket.bt_discover, _socket.bt_obex_discover)
elif servicetype == _lightbluecommon.RFCOMM:
funcs = (_socket.bt_discover, )
elif servicetype == _lightbluecommon.OBEX:
funcs = (_socket.bt_obex_discover, )
else:
raise ValueError("servicetype must be RFCOMM, OBEX or None, was %s" % \
servicetype)
if addr is None:
devices = finddevices()
btaddrs = [d[0] for d in devices]
else:
btaddrs = [addr]
services = []
for addr in btaddrs:
for func in funcs:
try:
devaddr, servicesdict = func(addr)
except _socket.error, e:
#raise _lightbluecommon.BluetoothError(str(e))
print "[lightblue] cannot look up services for %s" % addr
continue
if name is not None:
for servicename in servicesdict.keys():
if servicename != name:
del servicesdict[servicename]
services.extend(_getservicetuples(devaddr, servicesdict))
return services
def finddevicename(address, usecache=True):
if not _lightbluecommon._isbtaddr(address):
raise ValueError("%s is not a valid bluetooth address" % str(address))
if address == gethostaddr():
return _gethostname()
try:
# lookupName() expects address without colon separators
import _lightblueutil
address_no_sep = address.replace(":", "").replace("-", "")
name = _lightblueutil.lookupName(address_no_sep, (not usecache))
except SymbianError, e:
raise _lightbluecommon.BluetoothError(
"Cannot find device name for %s: %s" % (address, str(e)))
return name
def gethostaddr():
import _lightblueutil
try:
addr = _lightblueutil.getLocalAddress()
except SymbianError, exc:
raise _lightbluecommon.BluetoothError(
"Cannot read local device address: " + str(exc))
return addr
def gethostclass():
import _lightblueutil
try:
cod = _lightblueutil.getLocalDeviceClass()
except SymbianError, exc:
raise _lightbluecommon.BluetoothError(
"Cannot read local device class: " + str(exc))
return cod
def _gethostname():
import _lightblueutil
try:
name = _lightblueutil.getLocalName()
except SymbianError, exc:
raise _lightbluecommon.BluetoothError(
"Cannot read local device name: " + str(exc))
return name
class _SocketWrapper(object):
def __init__(self, sock, connaddr=()):
self.__dict__["_sock"] = sock
self._setconnaddr(connaddr)
# must implement accept() to return _SocketWrapper objects
def accept(self):
conn, addr = self._sock.accept()
# modify returned address cos PyS60 accept() only returns address, not
# (addr, channel) tuple
addrtuple = (addr.upper(), self._connaddr[1])
return (_SocketWrapper(conn, addrtuple), addrtuple)
accept.__doc__ = _lightbluecommon._socketdocs["accept"]
def bind(self, addr):
# if port==0, find an available port
if addr[1] == 0:
addr = (addr[0], _getavailableport(self))
try:
self._sock.bind(addr)
except Exception, e:
raise _socket.error(str(e))
self._setconnaddr(addr)
bind.__doc__ = _lightbluecommon._socketdocs["bind"]
def close(self):
self._sock.close()
# try to stop advertising
try:
stopadvertise(self)
except:
pass
close.__doc__ = _lightbluecommon._socketdocs["close"]
def connect(self, addr):
self._sock.connect(addr)
self._setconnaddr(addr)
connect.__doc__ = _lightbluecommon._socketdocs["connect"]
def connect_ex(self, addr):
try:
self.connect(addr)
except _socket.error, e:
return e.args[0]
return 0
connect_ex.__doc__ = _lightbluecommon._socketdocs["connect_ex"]
# must implement dup() to return _SocketWrapper objects
def dup(self):
return _SocketWrapper(self._sock.dup())
dup.__doc__ = _lightbluecommon._socketdocs["dup"]
def listen(self, backlog):
self._sock.listen(backlog)
# when listen() is called, set a default security level since S60
# sockets are required to have a security level
# This should be changed later to allow to set security using
# setsockopt()
_socket.set_security(self._sock, _socket.AUTH)
listen.__doc__ = _lightbluecommon._socketdocs["listen"]
# PyS60 raises socket.error("Bad protocol") when this is called for stream
# sockets, but implement it here like recv() for consistency with Linux+Mac
def recvfrom(self, bufsize, flags=0):
return (self._sock.recv(bufsize, flags), None)
recvfrom.__doc__ = _lightbluecommon._socketdocs["recvfrom"]
# PyS60 raises socket.error("Bad protocol") when this is called for stream
# sockets, but implement it here like send() for consistency with Linux+Mac
def sendto(self, data, *extra):
if len(extra) == 1:
address = extra[0]
flags = 0
elif len(extra) == 2:
flags, address = extra
else:
raise TypeError("sendto takes at most 3 arguments (%d given)" % \
(len(extra) + 1))
return self._sock.send(data, flags)
sendto.__doc__ = _lightbluecommon._socketdocs["sendto"]
# sendall should return None on success but PyS60 seems to have it return
# bytes sent like send
def sendall(self, data, flags=0):
self.send(data, flags)
return None
sendall.__doc__ = _lightbluecommon._socketdocs["sendall"]
# implement to return (remote-address, common-channel) like PyBluez
# (PyS60 implementation raises error when this method is called, saying
# it's not implemented - maybe cos a remote BT socket doesn't really have
# an outgoing channel like TCP sockets? But it seems handy to return the
# channel we're communicating over anyway i.e. the local RFCOMM channel)
def getpeername(self):
if not self._connaddr:
raise _socket.error(57, "Socket is not connected")
return self._connaddr
getpeername.__doc__ = _lightbluecommon._socketdocs["getpeername"]
# like getpeername(), PyS60 does not implement this method
def getsockname(self):
if not self._connaddr: # sock is neither bound nor connected
return ("00:00:00:00:00:00", 0)
return (gethostaddr(), self._connaddr[1])
getsockname.__doc__ = _lightbluecommon._socketdocs["getsockname"]
def fileno(self):
raise NotImplementedError
fileno.__doc__ = _lightbluecommon._socketdocs["fileno"]
def settimeout(self, timeout):
raise NotImplementedError
settimeout.__doc__ = _lightbluecommon._socketdocs["settimeout"]
def gettimeout(self):
return None
gettimeout.__doc__ = _lightbluecommon._socketdocs["gettimeout"]
def _setconnaddr(self, connaddr):
if len(connaddr) == 2:
connaddr = (connaddr[0].upper(), connaddr[1])
self.__dict__["_connaddr"] = connaddr
# wrap all other socket methods, to set LightBlue-specific docstrings
_othermethods = [_m for _m in _lightbluecommon._socketdocs.keys() \
if _m not in locals()] # methods other than those already defined
_methoddef = """def %s(self, *args, **kwargs):
return self._sock.%s(*args, **kwargs)
%s.__doc__ = _lightbluecommon._socketdocs['%s']\n"""
for _m in _othermethods:
exec _methoddef % (_m, _m, _m, _m)
del _m, _methoddef
def socket(proto=_lightbluecommon.RFCOMM):
if proto == _lightbluecommon.L2CAP:
raise NotImplementedError("L2CAP sockets not supported on this platform")
sock = _socket.socket(_socket.AF_BT, _socket.SOCK_STREAM,
_socket.BTPROTO_RFCOMM)
return _SocketWrapper(sock)
def _getavailableport(sock):
# can just use bt_rfcomm_get_available_server_channel since only RFCOMM is
# currently supported
return _socket.bt_rfcomm_get_available_server_channel(sock._sock)
def advertise(name, sock, servicetype):
if servicetype == _lightbluecommon.RFCOMM:
servicetype = _socket.RFCOMM
elif servicetype == _lightbluecommon.OBEX:
servicetype = _socket.OBEX
else:
raise ValueError("servicetype must be either RFCOMM or OBEX")
name = unicode(name)
# advertise the service
_socket.bt_advertise_service(name, sock._sock, True, servicetype)
# note details, for if advertising needs to be stopped later
__advertised[id(sock)] = (name, servicetype)
def stopadvertise(sock):
details = __advertised.get(id(sock))
if details is None:
raise _lightbluecommon.BluetoothError("no service advertised")
name, servicetype = details
_socket.bt_advertise_service(name, sock._sock, False, servicetype)
def selectdevice():
import _lightblueutil
try:
result = _lightblueutil.selectDevice()
except SymbianError, e:
raise _lightbluecommon.BluetoothError(str(e))
# underlying method returns class of device as tuple, not whole class
devinfo = (result[0], result[1], _lightbluecommon._joinclass(result[2]))
return devinfo
def selectservice():
device = selectdevice()
if device is None:
return None
import appuifw
services = findservices(addr=device[0])
choice = appuifw.popup_menu(
[unicode("%d: %s" % (s[1], s[2])) for s in services],
u"Choose service:")
if choice is None:
return None
return services[choice]
# Returns a list of (addr, channel, name) service tuples from a device
# address and a dictionary of {name: channel} mappings.
def _getservicetuples(devaddr, servicesdict):
return [(devaddr.upper(), channel, name) for name, channel in servicesdict.items()]
class _DeviceInquiry(object):
def __init__(self):
super(_DeviceInquiry, self).__init__()
self._founddevices = []
self._resolver = None
self._done = False
def start(self, getnames, length):
self._founddevices = []
self._done = False
import _lightblueutil
self._resolver = _lightblueutil.AoResolver()
self._resolver.open()
self._resolver.discover(self._founddevice, None, getnames)
def isdone(self):
return self._done
def stop(self):
if self.isdone():
return
if self._resolver:
self._resolver.cancel()
self._resolver.close()
self._done = True
def getfounddevices(self):
return self._founddevices[:]
def _founddevice(self, err, addr, name, devclass, param):
try:
if err == 0: # no err
#print "Found device", addr
# PDIS AoResolver returns addres without the colons
addr = addr[0:2] + ":" + addr[2:4] + ":" + addr[4:6] + ":" + \
addr[6:8] + ":" + addr[8:10] + ":" + addr[10:12]
devinfo = (addr.encode("utf-8").upper(),
name,
_lightbluecommon._joinclass(devclass))
self._founddevices.append(devinfo)
# keep looking for devices
self._resolver.next()
else:
if err == -25: # KErrEof (no more devices)
# finished discovery
self._resolver.close()
self._done = True
else:
print "[lightblue] device discovery error (%d)" % err
except Exception, e:
# catch all exceptions, the app will crash if exception is raised
# during callback
print "Error during _founddevice() callback: "+ str(e)
| gpl-3.0 |
fatcloud/PyCV-time | challanges/shape-challenge/find_arrows/find_arrows.py | 3 | 2877 |
import cv2
import numpy as np
from webcam_gui import webcam_gui
def angle(p1, p2, p3):
v1 = p1 - p2
v2 = p3 - p2
v1 = v1.astype(float)
v2 = v2.astype(float)
v1 = v1 / np.sqrt(np.dot(v1, v1))
v2 = v2 / np.sqrt(np.dot(v2, v2))
return np.degrees(np.arccos(np.dot(v1, v2)))
def isArrow(heptagon):
hull = cv2.convexHull(heptagon, returnPoints = False)
if len(hull) > 2:
defects = cv2.convexityDefects(heptagon, hull)
if defects is None or len(defects) != 2:
return False
farpoints = [d[0][2] for d in defects]
if not np.abs(farpoints[0] - farpoints[1]) in [3, 4]:
return False
for defect in defects:
s, e, f, d = defect[0]
# print defects
# s, e, f, d = defect[0]
ps = heptagon[s, 0]
pe = heptagon[e, 0]
pd = heptagon[f, 0]
if angle(ps, pd, pe) < 120:
return True
return False
def tip(arrow):
hull = cv2.convexHull(arrow, returnPoints = False)
defects = cv2.convexityDefects(arrow, hull)
farpoints = [d[0][2] for d in defects]
if np.abs(farpoints[0] - farpoints[1]) == 4:
return arrow[sum(farpoints) / 2, 0]
else:
return arrow[0, 0]
def imgproc(frame):
# convert color to gray scale and show it
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
cv2.imshow('gray', gray)
blur = cv2.blur(gray, (5,5))
edge = cv2.Canny(blur, 10, 100)
edge = cv2.blur(edge, (2,2))
cv2.imshow('blured edge', edge)
# convert image to black and white and show it
thresh1, thresh = cv2.threshold(edge, 60, 120, cv2.THRESH_BINARY)
cv2.imshow('thresh', thresh)
# find contours!
contours, hry = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
# draw all the contours
cpframe = frame.copy()
cv2.drawContours(cpframe, contours, -1, (0,255,0), 3)
cv2.imshow('cpframe', cpframe)
# ================== TODO ===================
# Modify these code to suit your need
contours = [ctr for ctr in contours if cv2.contourArea(ctr) > 100]
contours = [cv2.approxPolyDP(ctr, 5, True) for ctr in contours]
heptagons = [ctr for ctr in contours if len(ctr) == 7]
arrows = [hepta for hepta in heptagons if isArrow(hepta)]
#tips = [ tip(a) for a in arrows ]
#contours = [ctr for ctr in contours if cv2.isContourConvex(ctr)]
# ============================================
# draw on the frame
#cv2.drawContours(frame, heptagons, -1, (0,255,0), 3)
cv2.drawContours(frame, arrows, -1, (255, 0, 0), -1)
# draw tips
#for t in tips:
# cv2.circle(frame, tuple(t), 5, (0, 0, 255), -1)
return frame
if __name__ == "__main__":
webcam_gui(imgproc, 1)
| mit |
odoo-brazil/PySPED | pysped/nfe/leiaute/conscad_101.py | 4 | 12584 | # -*- coding: utf-8 -*-
#
# PySPED - Python libraries to deal with Brazil's SPED Project
#
# Copyright (C) 2010-2012
# Copyright (C) Aristides Caldeira <aristides.caldeira at tauga.com.br>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Library General Public License as
# published by the Free Software Foundation, either version 2.1 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library General Public License for more details.
#
# You should have received a copy of the GNU Library General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# PySPED - Bibliotecas Python para o
# SPED - Sistema Público de Escrituração Digital
#
# Copyright (C) 2010-2012
# Copyright (C) Aristides Caldeira <aristides.caldeira arroba tauga.com.br>
#
# Este programa é um software livre: você pode redistribuir e/ou modificar
# este programa sob os termos da licença GNU Library General Public License,
# publicada pela Free Software Foundation, em sua versão 2.1 ou, de acordo
# com sua opção, qualquer versão posterior.
#
# Este programa é distribuido na esperança de que venha a ser útil,
# porém SEM QUAISQUER GARANTIAS, nem mesmo a garantia implícita de
# COMERCIABILIDADE ou ADEQUAÇÃO A UMA FINALIDADE ESPECÍFICA. Veja a
# GNU Library General Public License para mais detalhes.
#
# Você deve ter recebido uma cópia da GNU Library General Public License
# juntamente com este programa. Caso esse não seja o caso, acesse:
# <http://www.gnu.org/licenses/>
#
from __future__ import division, print_function, unicode_literals
from pysped.xml_sped import (ABERTURA, NAMESPACE_NFE, TagCaracter,
TagData, TagDataHoraUTC, TagDecimal, TagInteiro,
XMLNFe)
from pysped.nfe.leiaute import ESQUEMA_ATUAL_VERSAO_1 as ESQUEMA_ATUAL
import os
DIRNAME = os.path.dirname(__file__)
class InfConsEnviado(XMLNFe):
def __init__(self):
super(InfConsEnviado, self).__init__()
self.xServ = TagCaracter(nome='xServ', codigo='GP04', tamanho=[8, 8] , raiz='//ConsCad', valor='CONS-CAD')
self.UF = TagCaracter(nome='UF' , codigo='GP05', tamanho=[2, 2] , raiz='//ConsCad')
self.IE = TagCaracter(nome='IE' , codigo='GP06', tamanho=[2, 14] , raiz='//ConsCad', obrigatorio=False)
self.CNPJ = TagCaracter(nome='CNPJ' , codigo='GP07', tamanho=[3, 14], raiz='//ConsCad', obrigatorio=False)
self.CPF = TagCaracter(nome='CPF' , codigo='GP08', tamanho=[3, 11], raiz='//ConsCad', obrigatorio=False)
def get_xml(self):
xml = XMLNFe.get_xml(self)
xml += '<infCons>'
xml += self.xServ.xml
xml += self.UF.xml
xml += self.IE.xml
xml += self.CNPJ.xml
xml += self.CPF.xml
xml += '</infCons>'
return xml
def set_xml(self, arquivo):
if self._le_xml(arquivo):
self.xServ.xml = arquivo
self.UF.xml = arquivo
self.IE.xml = arquivo
self.CNPJ.xml = arquivo
self.CPF.xml = arquivo
xml = property(get_xml, set_xml)
class ConsCad(XMLNFe):
def __init__(self):
super(ConsCad, self).__init__()
self.versao = TagDecimal(nome='ConsCad', codigo='GP01', propriedade='versao', namespace=NAMESPACE_NFE, valor='1.01', raiz='/')
self.infCons = InfConsEnviado()
self.caminho_esquema = os.path.join(DIRNAME, 'schema', ESQUEMA_ATUAL + '/')
self.arquivo_esquema = 'consCad_v1.01.xsd'
def get_xml(self):
xml = XMLNFe.get_xml(self)
xml += ABERTURA
xml += self.versao.xml
xml += self.infCons.xml
xml += '</ConsCad>'
return xml
def set_xml(self, arquivo):
if self._le_xml(arquivo):
self.versao.xml = arquivo
self.infCons.xml = arquivo
xml = property(get_xml, set_xml)
class Ender(XMLNFe):
def __init__(self):
super(Ender, self).__init__()
self.xLgr = TagCaracter(nome='xLgr' , codigo='GR23', tamanho=[1, 255] , raiz='//infCad/ender', obrigatorio=False)
self.nro = TagCaracter(nome='nro' , codigo='GR24', tamanho=[1, 60] , raiz='//infCad/ender', obrigatorio=False)
self.xCpl = TagCaracter(nome='xCpl' , codigo='GR25', tamanho=[1, 60] , raiz='//infCad/ender', obrigatorio=False)
self.xBairro = TagCaracter(nome='xBairro', codigo='GR26', tamanho=[1, 60] , raiz='//infCad/ender', obrigatorio=False)
self.cMun = TagInteiro(nome='cMun' , codigo='GR27', tamanho=[7, 7] , raiz='//infCad/ender', obrigatorio=False)
self.xMun = TagCaracter(nome='xMun' , codigo='GR28', tamanho=[1, 60] , raiz='//infCad/ender', obrigatorio=False)
self.CEP = TagInteiro(nome='CEP' , codigo='GR29', tamanho=[7, 8] , raiz='//infCad/ender', obrigatorio=False)
def get_xml(self):
xml = XMLNFe.get_xml(self)
if self.xLgr.valor or self.nro.valor or self.xCpl.valor or self.xBairro.valor or self.cMun.valor or self.xMun.valor or self.CEP.valor:
xml += '<ender>'
xml += self.xLgr.xml
xml += self.nro.xml
xml += self.xCpl.xml
xml += self.xBairro.xml
xml += self.cMun.xml
xml += self.xMun.xml
xml += self.CEP.xml
xml += '</ender>'
return xml
def set_xml(self, arquivo):
if self._le_xml(arquivo):
self.xLgr.xml = arquivo
self.nro.xml = arquivo
self.xCpl.xml = arquivo
self.xBairro.xml = arquivo
self.cMun.xml = arquivo
self.xMun.xml = arquivo
self.CEP.xml = arquivo
xml = property(get_xml, set_xml)
class InfCadRecebido(XMLNFe):
def __init__(self):
super(InfCadRecebido, self).__init__()
self.IE = TagCaracter(nome='IE' , codigo='GR08' , tamanho=[2, 14], raiz='//infCad', obrigatorio=False)
self.CNPJ = TagCaracter(nome='CNPJ' , codigo='GR09' , tamanho=[3, 14], raiz='//infCad', obrigatorio=False)
self.CPF = TagCaracter(nome='CPF' , codigo='GR10' , tamanho=[3, 11], raiz='//infCad', obrigatorio=False)
self.UF = TagCaracter(nome='UF' , codigo='GR11' , tamanho=[2, 2] , raiz='//infCad')
self.cSit = TagInteiro(nome='cSit' , codigo='GR12' , tamanho=[1, 1] , raiz='//infCad')
self.xNome = TagCaracter(nome='xNome' , codigo='GR13' , tamanho=[1, 60], raiz='//infCad', obrigatorio=False)
self.xFant = TagCaracter(nome='xFant' , codigo='GR13a', tamanho=[1, 60], raiz='//infCad', obrigatorio=False)
self.xRegApur = TagCaracter(nome='xRegApur', codigo='GR14' , tamanho=[1, 60], raiz='//infCad', obrigatorio=False)
self.CNAE = TagInteiro(nome='CNAE' , codigo='GR15' , tamanho=[6, 7] , raiz='//infCad', obrigatorio=False)
self.dIniAtiv = TagData(nome='dIniAtiv' , codigo='GR16' , raiz='//infCad', obrigatorio=False)
self.dUltSit = TagData(nome='dUltSit' , codigo='GR17' , raiz='//infCad', obrigatorio=False)
self.dBaixa = TagData(nome='dBaixa' , codigo='GR18' , raiz='//infCad', obrigatorio=False)
self.IEUnica = TagCaracter(nome='IEUnica' , codigo='GR20' , tamanho=[2, 14], raiz='//infCad', obrigatorio=False)
self.IEAtual = TagCaracter(nome='IEAtual' , codigo='GR21' , tamanho=[2, 14], raiz='//infCad', obrigatorio=False)
self.ender = Ender()
def get_xml(self):
xml = XMLNFe.get_xml(self)
xml += '<infCad>'
xml += self.IE.xml
xml += self.CNPJ.xml
xml += self.CPF.xml
xml += self.UF.xml
xml += self.cSit.xml
xml += self.xNome.xml
xml += self.xFant.xml
xml += self.xRegApur.xml
xml += self.CNAE.xml
xml += self.dIniAtiv.xml
xml += self.dUltSit.xml
xml += self.dBaixa.xml
xml += self.IEUnica.xml
xml += self.IEAtual.xml
xml += self.ender.xml
xml += '</infCad>'
return xml
def set_xml(self, arquivo):
if self._le_xml(arquivo):
self.IE.xml = arquivo
self.CNPJ.xml = arquivo
self.CPF.xml = arquivo
self.UF.xml = arquivo
self.cSit.xml = arquivo
self.xNome.xml = arquivo
self.xFant.xml = arquivo
self.xRegApur.xml = arquivo
self.CNAE.xml = arquivo
self.dIniAtiv.xml = arquivo
self.dUltSit.xml = arquivo
self.dBaixa.xml = arquivo
self.IEUnica.xml = arquivo
self.IEAtual.xml = arquivo
self.ender.xml = arquivo
xml = property(get_xml, set_xml)
class InfConsRecebido(XMLNFe):
def __init__(self):
super(InfConsRecebido, self).__init__()
self.verAplic = TagCaracter(nome='verAplic', codigo='GR04' , tamanho=[1, 20] , raiz='//retConsCad/infCons')
self.cStat = TagInteiro(nome='cStat' , codigo='GR05' , tamanho=[3, 3, 3], raiz='//retConsCad/infCons')
self.xMotivo = TagCaracter(nome='xMotivo' , codigo='GR06' , tamanho=[1, 255] , raiz='//retConsCad/infCons')
self.UF = TagCaracter(nome='UF' , codigo='GR06a', tamanho=[2, 2] , raiz='//retConsCad/infCons')
self.IE = TagCaracter(nome='IE' , codigo='GR06b', tamanho=[2, 14] , raiz='//retConsCad/infCons', obrigatorio=False)
self.CNPJ = TagCaracter(nome='CNPJ' , codigo='GR06c', tamanho=[3, 14] , raiz='//retConsCad/infCons', obrigatorio=False)
self.CPF = TagCaracter(nome='CPF' , codigo='GR06d', tamanho=[3, 11] , raiz='//retConsCad/infCons', obrigatorio=False)
self.dhCons = TagDataHoraUTC(nome='dhCons' , codigo='GR06e', raiz='//retConsCad/infCons')
self.cUF = TagInteiro(nome='cUF' , codigo='GR06f', tamanho=[2, 2, 2], raiz='//retConsCad/infCons')
self.infCad = []
def get_xml(self):
xml = XMLNFe.get_xml(self)
xml += '<infCons>'
xml += self.verAplic.xml
xml += self.cStat.xml
xml += self.xMotivo.xml
xml += self.UF.xml
xml += self.IE.xml
xml += self.CNPJ.xml
xml += self.CPF.xml
xml += self.dhCons.xml
xml += self.cUF.xml
if len(self.infCad) > 0:
for ic in self.infCad:
xml += ic.xml
xml += '</infCons>'
return xml
def set_xml(self, arquivo):
if self._le_xml(arquivo):
self.verAplic.xml = arquivo
self.cStat.xml = arquivo
self.xMotivo.xml = arquivo
self.UF.xml = arquivo
self.IE.xml = arquivo
self.CNPJ.xml = arquivo
self.CPF.xml = arquivo
self.dhCons.xml = arquivo
self.cUF.xml = arquivo
if self._le_nohs('//retConsCad/infCons/infCad') is not None:
self.infCad = self.le_grupo('//retConsCad/infCons/infCad', InfCadRecebido)
#self.infCad = []
#cadastros = self._le_nohs('//retConsCad/infCons/infCad')
#if len(cadastros) > 0:
#for c in cadastros:
#nc = InfCadRecebido()
#nc.xml = c
#self.infCad.append(nc)
xml = property(get_xml, set_xml)
class RetConsCad(XMLNFe):
def __init__(self):
super(RetConsCad, self).__init__()
self.versao = TagDecimal(nome='retConsCad', codigo='GR01', propriedade='versao', namespace=NAMESPACE_NFE, valor='1.01', raiz='/')
self.infCons = InfConsRecebido()
self.caminho_esquema = os.path.join(DIRNAME, 'schema', ESQUEMA_ATUAL + '/')
self.arquivo_esquema = 'retConsCad_v1.01.xsd'
def get_xml(self):
xml = XMLNFe.get_xml(self)
xml += ABERTURA
xml += self.versao.xml
xml += self.infCons.xml
xml += '</retConsCad>'
return xml
def set_xml(self, arquivo):
if self._le_xml(arquivo):
self.versao.xml = arquivo
self.infCons.xml = arquivo
xml = property(get_xml, set_xml)
| lgpl-2.1 |
fplll/strategizer | strategize.py | 1 | 11203 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
u"""
Find BKZ reduction strategies using timing experiments.
.. moduleauthor:: Martin R. Albrecht <[email protected]>
.. moduleauthor:: Léo Ducas <[email protected]>
.. moduleauthor:: Marc Stevens <[email protected]>
"""
# We use multiprocessing to parallelize
from __future__ import absolute_import
from multiprocessing import Queue, Pipe, Process, active_children
from fpylll import IntegerMatrix, GSO, FPLLL, BKZ
from fpylll.tools.bkz_stats import BKZTreeTracer
from fpylll.fplll.bkz_param import Strategy, dump_strategies_json
from strategizer.bkz import CallbackBKZ
from strategizer.bkz import CallbackBKZParam as Param
from strategizer.config import logging, git_revision
from strategizer.util import chunk_iterator
from strategizer.strategizers import (
PruningStrategizer,
OneTourPreprocStrategizerFactory,
TwoTourPreprocStrategizerFactory,
FourTourPreprocStrategizerFactory,
ProgressivePreprocStrategizerFactory,
)
logger = logging.getLogger(__name__)
def find_best(state, fudge=1.01):
"""
Given an ordered tuple of tuples, return the minimal one, where
minimal is determined by first entry.
:param state:
:param fudge:
.. note :: The fudge factor means that we have a bias towards earlier entries.
"""
best = state[0]
for s in state:
if best[0] > fudge * s[0]:
best = s
return best
def worker_process(seed, params, queue=None):
"""
This function is called to collect statistics.
:param A: basis
:param params: BKZ parameters
:param queue: queue used for communication
"""
FPLLL.set_random_seed(seed)
FPLLL.set_threads(params["threads"])
A = IntegerMatrix.random(params.block_size, "qary", q=33554393, k=params.block_size // 2, int_type="long")
M = GSO.Mat(A)
bkz = CallbackBKZ(M) # suppresses initial LLL call
tracer = BKZTreeTracer(bkz, start_clocks=True)
with tracer.context(("tour", 0)):
bkz.svp_reduction(0, params.block_size, params, tracer)
M.update_gso()
tracer.exit()
try:
# close connection
params.strategies[params.block_size].connection.send(None)
except AttributeError:
pass
if queue:
queue.put(tracer.trace)
else:
return tracer.trace
def callback_roundtrip(alive, k, connections, data):
"""
Send ``data`` on ``connections`` for processes ids in ``alive``, ``k`` at a time.
:param alive:
:param k:
:param connections:
:param data:
"""
callback = [None] * len(connections)
for chunk in chunk_iterator(alive, k):
for i in chunk:
connections[i].send(data)
for i in chunk:
try:
callback[i] = connections[i].recv()
except EOFError:
callback[i] = None
connections[i].close()
return callback
def discover_strategy(block_size, Strategizer, strategies, jobs=1, nsamples=50, threads=1):
"""Discover a strategy using ``Strategizer``
:param block_size: block size to try
:param Strategizer: strategizer to use
:param strategies: strategies for smaller block sizes
:param jobs: number of jobs to run in parallel
:param nsamples: number of lattice bases to consider
:param threads: number of threads to use per job
"""
connections = []
processes = []
k = jobs
m = nsamples
strategizer = Strategizer(block_size)
# everybody is alive in the beginning
alive = range(m)
return_queue = Queue()
for i in range(m):
manager, worker = Pipe()
connections.append((manager, worker))
strategies_ = list(strategies)
strategies_.append(Strategizer.Strategy(block_size, worker))
# note: success probability, rerandomisation density etc. can be adapted here
param = Param(block_size=block_size, strategies=strategies_, flags=BKZ.GH_BND)
param["threads"] = threads
process = Process(target=worker_process, args=(2 ** 16 * block_size + i, param, return_queue))
processes.append(process)
callback = [None] * m
for chunk in chunk_iterator(alive, k):
for i in chunk:
process = processes[i]
process.start()
manager, worker = connections[i]
worker.close()
connections[i] = manager
# wait for `k` responses
for i in chunk:
callback[i] = connections[i].recv()
assert all(callback) # everybody wants preprocessing parameters
preproc_params = strategizer(callback)
callback = callback_roundtrip(alive, k, connections, preproc_params)
assert all(callback) # everybody wants pruning parameters
pruning_params = strategizer(callback)
callback = callback_roundtrip(alive, k, connections, pruning_params)
assert not any(callback) # no more questions
strategy = Strategy(
block_size=block_size, preprocessing_block_sizes=preproc_params, pruning_parameters=pruning_params
)
active_children()
stats = []
for i in range(m):
stats.append(return_queue.get())
return strategy, tuple(stats), tuple(strategizer.queries)
def strategize(
max_block_size,
existing_strategies=None,
min_block_size=3,
jobs=1,
threads=1,
nsamples=50,
pruner_method="hybrid",
StrategizerFactory=ProgressivePreprocStrategizerFactory,
dump_filename=None,
):
"""
*one* preprocessing block size + pruning.
:param max_block_size: maximum block size to consider
:param strategizers: strategizers to use
:param existing_strategies: extend these previously computed strategies
:param min_block_size: start at this block size
:param jobs: run this many jobs in parallel
:param threads: number of FPLLL threads to use per job
:param nsamples: start using this many samples
:param dump_filename: write strategies to this filename
"""
if dump_filename is None:
dump_filename = "default-strategies-%s.json" % git_revision
if existing_strategies is not None:
strategies = existing_strategies
times = [None] * len(strategies)
else:
strategies = []
times = []
for i in range(len(strategies), min_block_size):
strategies.append(Strategy(i, [], []))
times.append(None)
strategizer = PruningStrategizer
for block_size in range(min_block_size, max_block_size + 1):
logger.info("= block size: %3d, samples: %3d =", block_size, nsamples)
state = []
try:
p = max(strategies[-1].preprocessing_block_sizes[-1], 2)
except (IndexError,):
p = 2
prev_best_total_time = None
while p < block_size:
if p >= 4:
strategizer_p = type("PreprocStrategizer-%d" % p, (strategizer, StrategizerFactory(p)), {})
else:
strategizer_p = strategizer
strategy, stats, queries = discover_strategy(
block_size, strategizer_p, strategies, jobs=jobs, nsamples=nsamples, threads=threads,
)
stats = [stat for stat in stats if stat is not None]
total_time = [float(stat.data["cputime"]) for stat in stats]
total_walltime = [float(stat.data["walltime"]) for stat in stats]
svp_time = [float(stat.find("enumeration").data["cputime"]) for stat in stats]
preproc_time = [float(stat.find("preprocessing").data["cputime"]) for stat in stats]
total_time = sum(total_time) / len(total_time)
total_walltime = sum(total_walltime) / len(total_walltime)
svp_time = sum(svp_time) / len(svp_time)
preproc_time = sum(preproc_time) / len(preproc_time)
state.append((total_time, total_walltime, strategy, stats, strategizer, queries))
logger.info(
"t: %10.4fs, w: %10.4fs, p: %10.4fs, s: %10.4fs, %s",
total_time,
total_walltime,
preproc_time,
svp_time,
strategy,
)
if prev_best_total_time and 1.3 * prev_best_total_time < total_time:
break
p += 2
if not prev_best_total_time or prev_best_total_time > total_time:
prev_best_total_time = total_time
best = find_best(state)
total_time, total_walltime, strategy, stats, strategizer, queries = best
strategies.append(strategy)
dump_strategies_json(dump_filename, strategies)
times.append((total_time, stats, queries))
logger.info("")
logger.info(
"block size: %3d, cpu: %10.4fs, wall: %10.4fs, strategy: %s",
block_size,
total_time,
total_walltime,
strategy,
)
logger.info("")
if total_time > 0.1 and nsamples > max(2 * jobs, 8):
nsamples //= 2
return strategies, times
StrategizerFactoryDictionnary = {
"ProgressivePreproc": ProgressivePreprocStrategizerFactory,
"OneTourPreproc": OneTourPreprocStrategizerFactory,
"TwoTourPreproc": TwoTourPreprocStrategizerFactory,
"FourTourPreproc": FourTourPreprocStrategizerFactory,
}
if __name__ == "__main__":
import argparse
import logging
import os
parser = argparse.ArgumentParser(description="Preprocessing Search")
parser.add_argument("-j", "--jobs", help="number of jobs to run in parallel", type=int, default=1)
parser.add_argument("-t", "--threads", help="number of FPLLL threads to use per job", type=int, default=1)
parser.add_argument("-s", "--samples", help="number of samples to try", type=int, default=16)
parser.add_argument("-l", "--min-block-size", help="minimal block size to consider", type=int, default=3)
parser.add_argument("-u", "--max-block-size", help="minimal block size to consider", type=int, default=50)
parser.add_argument("-f", "--filename", help="json file to store strategies to", type=str, default=None)
parser.add_argument(
"-S",
"--strategizer",
help="Strategizer : {ProgressivePreproc,OneTourPreproc,TwoTourPreproc,FourTourPreproc}",
type=str,
default="OneTourPreproc",
)
args = parser.parse_args()
log_name = os.path.join("default-strategies-%s.log" % (git_revision))
if args.filename:
if not args.filename.endswith(".json"):
raise ValueError("filename should be a json file")
log_name = args.filename.replace(".json", ".log")
extra = logging.FileHandler(log_name)
extra.setLevel(logging.DEBUG)
formatter = logging.Formatter("%(name)s: %(message)s")
extra.setFormatter(formatter)
logging.getLogger("").addHandler(extra)
strategize(
jobs=args.jobs,
threads=args.threads,
nsamples=args.samples,
min_block_size=args.min_block_size,
max_block_size=args.max_block_size,
StrategizerFactory=StrategizerFactoryDictionnary[args.strategizer],
dump_filename=args.filename,
)
| gpl-3.0 |
mahendra-r/edx-platform | cms/djangoapps/contentstore/features/common.py | 42 | 12769 | # pylint: disable=missing-docstring
# pylint: disable=redefined-outer-name
import os
from lettuce import world, step
from nose.tools import assert_true, assert_in # pylint: disable=no-name-in-module
from django.conf import settings
from student.roles import CourseStaffRole, CourseInstructorRole, GlobalStaff
from student.models import get_user
from selenium.webdriver.common.keys import Keys
from logging import getLogger
from student.tests.factories import AdminFactory
from student import auth
logger = getLogger(__name__)
from terrain.browser import reset_data
TEST_ROOT = settings.COMMON_TEST_DATA_ROOT
@step('I (?:visit|access|open) the Studio homepage$')
def i_visit_the_studio_homepage(_step):
# To make this go to port 8001, put
# LETTUCE_SERVER_PORT = 8001
# in your settings.py file.
world.visit('/')
signin_css = 'a.action-signin'
assert world.is_css_present(signin_css)
@step('I am logged into Studio$')
def i_am_logged_into_studio(_step):
log_into_studio()
@step('I confirm the alert$')
def i_confirm_with_ok(_step):
world.browser.get_alert().accept()
@step(u'I press the "([^"]*)" delete icon$')
def i_press_the_category_delete_icon(_step, category):
if category == 'section':
css = 'a.action.delete-section-button'
elif category == 'subsection':
css = 'a.action.delete-subsection-button'
else:
assert False, 'Invalid category: %s' % category
world.css_click(css)
@step('I have opened a new course in Studio$')
def i_have_opened_a_new_course(_step):
open_new_course()
@step('I have populated a new course in Studio$')
def i_have_populated_a_new_course(_step):
world.clear_courses()
course = world.CourseFactory.create()
world.scenario_dict['COURSE'] = course
section = world.ItemFactory.create(parent_location=course.location)
world.ItemFactory.create(
parent_location=section.location,
category='sequential',
display_name='Subsection One',
)
user = create_studio_user(is_staff=False)
add_course_author(user, course)
log_into_studio()
world.css_click('a.course-link')
world.wait_for_js_to_load()
@step('(I select|s?he selects) the new course')
def select_new_course(_step, whom):
course_link_css = 'a.course-link'
world.css_click(course_link_css)
@step(u'I press the "([^"]*)" notification button$')
def press_the_notification_button(_step, name):
# Because the notification uses a CSS transition,
# Selenium will always report it as being visible.
# This makes it very difficult to successfully click
# the "Save" button at the UI level.
# Instead, we use JavaScript to reliably click
# the button.
btn_css = 'div#page-notification button.action-%s' % name.lower()
world.trigger_event(btn_css, event='focus')
world.browser.execute_script("$('{}').click()".format(btn_css))
world.wait_for_ajax_complete()
@step('I change the "(.*)" field to "(.*)"$')
def i_change_field_to_value(_step, field, value):
field_css = '#%s' % '-'.join([s.lower() for s in field.split()])
ele = world.css_find(field_css).first
ele.fill(value)
ele._element.send_keys(Keys.ENTER)
@step('I reset the database')
def reset_the_db(_step):
"""
When running Lettuce tests using examples (i.e. "Confirmation is
shown on save" in course-settings.feature), the normal hooks
aren't called between examples. reset_data should run before each
scenario to flush the test database. When this doesn't happen we
get errors due to trying to insert a non-unique entry. So instead,
we delete the database manually. This has the effect of removing
any users and courses that have been created during the test run.
"""
reset_data(None)
@step('I see a confirmation that my changes have been saved')
def i_see_a_confirmation(step):
confirmation_css = '#alert-confirmation'
assert world.is_css_present(confirmation_css)
def open_new_course():
world.clear_courses()
create_studio_user()
log_into_studio()
create_a_course()
def create_studio_user(
uname='robot',
email='[email protected]',
password='test',
is_staff=False):
studio_user = world.UserFactory(
username=uname,
email=email,
password=password,
is_staff=is_staff)
registration = world.RegistrationFactory(user=studio_user)
registration.register(studio_user)
registration.activate()
return studio_user
def fill_in_course_info(
name='Robot Super Course',
org='MITx',
num='101',
run='2013_Spring'):
world.css_fill('.new-course-name', name)
world.css_fill('.new-course-org', org)
world.css_fill('.new-course-number', num)
world.css_fill('.new-course-run', run)
def log_into_studio(
uname='robot',
email='[email protected]',
password='test',
name='Robot Studio'):
world.log_in(username=uname, password=password, email=email, name=name)
# Navigate to the studio dashboard
world.visit('/')
assert_in(uname, world.css_text('span.account-username', timeout=10))
def add_course_author(user, course):
"""
Add the user to the instructor group of the course
so they will have the permissions to see it in studio
"""
global_admin = AdminFactory()
for role in (CourseStaffRole, CourseInstructorRole):
auth.add_users(global_admin, role(course.id), user)
def create_a_course():
course = world.CourseFactory.create(org='MITx', course='999', display_name='Robot Super Course')
world.scenario_dict['COURSE'] = course
user = world.scenario_dict.get("USER")
if not user:
user = get_user('[email protected]')
add_course_author(user, course)
# Navigate to the studio dashboard
world.visit('/')
course_link_css = 'a.course-link'
world.css_click(course_link_css)
course_title_css = 'span.course-title'
assert_true(world.is_css_present(course_title_css))
def add_section():
world.css_click('.outline .button-new')
assert_true(world.is_css_present('.outline-section .xblock-field-value'))
def set_date_and_time(date_css, desired_date, time_css, desired_time, key=None):
set_element_value(date_css, desired_date, key)
world.wait_for_ajax_complete()
set_element_value(time_css, desired_time, key)
world.wait_for_ajax_complete()
def set_element_value(element_css, element_value, key=None):
element = world.css_find(element_css).first
element.fill(element_value)
# hit TAB or provided key to trigger save content
if key is not None:
element._element.send_keys(getattr(Keys, key)) # pylint: disable=protected-access
else:
element._element.send_keys(Keys.TAB) # pylint: disable=protected-access
@step('I have enabled the (.*) advanced module$')
def i_enabled_the_advanced_module(step, module):
step.given('I have opened a new course section in Studio')
world.css_click('.nav-course-settings')
world.css_click('.nav-course-settings-advanced a')
type_in_codemirror(0, '["%s"]' % module)
press_the_notification_button(step, 'Save')
@world.absorb
def create_unit_from_course_outline():
"""
Expands the section and clicks on the New Unit link.
The end result is the page where the user is editing the new unit.
"""
css_selectors = [
'.outline-subsection .expand-collapse', '.outline-subsection .button-new'
]
for selector in css_selectors:
world.css_click(selector)
world.wait_for_mathjax()
world.wait_for_xmodule()
world.wait_for_loading()
assert world.is_css_present('ul.new-component-type')
@world.absorb
def wait_for_loading():
"""
Waits for the loading indicator to be hidden.
"""
world.wait_for(lambda _driver: len(world.browser.find_by_css('div.ui-loading.is-hidden')) > 0)
@step('I have clicked the new unit button$')
@step(u'I am in Studio editing a new unit$')
def edit_new_unit(step):
step.given('I have populated a new course in Studio')
create_unit_from_course_outline()
@step('the save notification button is disabled')
def save_button_disabled(step):
button_css = '.action-save'
disabled = 'is-disabled'
assert world.css_has_class(button_css, disabled)
@step('the "([^"]*)" button is disabled')
def button_disabled(step, value):
button_css = 'input[value="%s"]' % value
assert world.css_has_class(button_css, 'is-disabled')
def _do_studio_prompt_action(intent, action):
"""
Wait for a studio prompt to appear and press the specified action button
See common/js/components/views/feedback_prompt.js for implementation
"""
assert intent in [
'warning',
'error',
'confirmation',
'announcement',
'step-required',
'help',
'mini',
]
assert action in ['primary', 'secondary']
world.wait_for_present('div.wrapper-prompt.is-shown#prompt-{}'.format(intent))
action_css = 'li.nav-item > button.action-{}'.format(action)
world.trigger_event(action_css, event='focus')
world.browser.execute_script("$('{}').click()".format(action_css))
world.wait_for_ajax_complete()
world.wait_for_present('div.wrapper-prompt.is-hiding#prompt-{}'.format(intent))
@world.absorb
def confirm_studio_prompt():
_do_studio_prompt_action('warning', 'primary')
@step('I confirm the prompt')
def confirm_the_prompt(step):
confirm_studio_prompt()
@step(u'I am shown a prompt$')
def i_am_shown_a_notification(step):
assert world.is_css_present('.wrapper-prompt')
def type_in_codemirror(index, text, find_prefix="$"):
script = """
var cm = {find_prefix}('div.CodeMirror:eq({index})').get(0).CodeMirror;
cm.getInputField().focus();
cm.setValue(arguments[0]);
cm.getInputField().blur();""".format(index=index, find_prefix=find_prefix)
world.browser.driver.execute_script(script, str(text))
world.wait_for_ajax_complete()
def get_codemirror_value(index=0, find_prefix="$"):
return world.browser.driver.execute_script(
"""
return {find_prefix}('div.CodeMirror:eq({index})').get(0).CodeMirror.getValue();
""".format(index=index, find_prefix=find_prefix)
)
def attach_file(filename, sub_path):
path = os.path.join(TEST_ROOT, sub_path, filename)
world.browser.execute_script("$('input.file-input').css('display', 'block')")
assert_true(os.path.exists(path))
world.browser.attach_file('file', os.path.abspath(path))
def upload_file(filename, sub_path=''):
# The file upload dialog is a faux modal, a div that takes over the display
attach_file(filename, sub_path)
modal_css = 'div.wrapper-modal-window-assetupload'
button_css = '{} .action-upload'.format(modal_css)
world.css_click(button_css)
# Clicking the Upload button triggers an AJAX POST.
world.wait_for_ajax_complete()
# The modal stays up with a "File uploaded succeeded" confirmation message, then goes away.
# It should take under 2 seconds, so wait up to 10.
# Note that is_css_not_present will return as soon as the element is gone.
assert world.is_css_not_present(modal_css, wait_time=10)
@step(u'"([^"]*)" logs in$')
def other_user_login(step, name):
step.given('I log out')
world.visit('/')
signin_css = 'a.action-signin'
world.is_css_present(signin_css)
world.css_click(signin_css)
def fill_login_form():
login_form = world.browser.find_by_css('form#login_form')
login_form.find_by_name('email').fill(name + '@edx.org')
login_form.find_by_name('password').fill("test")
login_form.find_by_name('submit').click()
world.retry_on_exception(fill_login_form)
assert_true(world.is_css_present('.new-course-button'))
world.scenario_dict['USER'] = get_user(name + '@edx.org')
@step(u'the user "([^"]*)" exists( as a course (admin|staff member|is_staff))?$')
def create_other_user(_step, name, has_extra_perms, role_name):
email = name + '@edx.org'
user = create_studio_user(uname=name, password="test", email=email)
if has_extra_perms:
if role_name == "is_staff":
GlobalStaff().add_users(user)
else:
if role_name == "admin":
# admins get staff privileges, as well
roles = (CourseStaffRole, CourseInstructorRole)
else:
roles = (CourseStaffRole,)
course_key = world.scenario_dict["COURSE"].id
global_admin = AdminFactory()
for role in roles:
auth.add_users(global_admin, role(course_key), user)
@step('I log out')
def log_out(_step):
world.visit('logout')
| agpl-3.0 |
fortes/Rfugee | lib/requests-2.7.0-py2.7.egg/requests/packages/urllib3/exceptions.py | 487 | 4374 |
## Base Exceptions
class HTTPError(Exception):
"Base exception used by this module."
pass
class HTTPWarning(Warning):
"Base warning used by this module."
pass
class PoolError(HTTPError):
"Base exception for errors caused within a pool."
def __init__(self, pool, message):
self.pool = pool
HTTPError.__init__(self, "%s: %s" % (pool, message))
def __reduce__(self):
# For pickling purposes.
return self.__class__, (None, None)
class RequestError(PoolError):
"Base exception for PoolErrors that have associated URLs."
def __init__(self, pool, url, message):
self.url = url
PoolError.__init__(self, pool, message)
def __reduce__(self):
# For pickling purposes.
return self.__class__, (None, self.url, None)
class SSLError(HTTPError):
"Raised when SSL certificate fails in an HTTPS connection."
pass
class ProxyError(HTTPError):
"Raised when the connection to a proxy fails."
pass
class DecodeError(HTTPError):
"Raised when automatic decoding based on Content-Type fails."
pass
class ProtocolError(HTTPError):
"Raised when something unexpected happens mid-request/response."
pass
#: Renamed to ProtocolError but aliased for backwards compatibility.
ConnectionError = ProtocolError
## Leaf Exceptions
class MaxRetryError(RequestError):
"""Raised when the maximum number of retries is exceeded.
:param pool: The connection pool
:type pool: :class:`~urllib3.connectionpool.HTTPConnectionPool`
:param string url: The requested Url
:param exceptions.Exception reason: The underlying error
"""
def __init__(self, pool, url, reason=None):
self.reason = reason
message = "Max retries exceeded with url: %s (Caused by %r)" % (
url, reason)
RequestError.__init__(self, pool, url, message)
class HostChangedError(RequestError):
"Raised when an existing pool gets a request for a foreign host."
def __init__(self, pool, url, retries=3):
message = "Tried to open a foreign host with url: %s" % url
RequestError.__init__(self, pool, url, message)
self.retries = retries
class TimeoutStateError(HTTPError):
""" Raised when passing an invalid state to a timeout """
pass
class TimeoutError(HTTPError):
""" Raised when a socket timeout error occurs.
Catching this error will catch both :exc:`ReadTimeoutErrors
<ReadTimeoutError>` and :exc:`ConnectTimeoutErrors <ConnectTimeoutError>`.
"""
pass
class ReadTimeoutError(TimeoutError, RequestError):
"Raised when a socket timeout occurs while receiving data from a server"
pass
# This timeout error does not have a URL attached and needs to inherit from the
# base HTTPError
class ConnectTimeoutError(TimeoutError):
"Raised when a socket timeout occurs while connecting to a server"
pass
class EmptyPoolError(PoolError):
"Raised when a pool runs out of connections and no more are allowed."
pass
class ClosedPoolError(PoolError):
"Raised when a request enters a pool after the pool has been closed."
pass
class LocationValueError(ValueError, HTTPError):
"Raised when there is something wrong with a given URL input."
pass
class LocationParseError(LocationValueError):
"Raised when get_host or similar fails to parse the URL input."
def __init__(self, location):
message = "Failed to parse: %s" % location
HTTPError.__init__(self, message)
self.location = location
class ResponseError(HTTPError):
"Used as a container for an error reason supplied in a MaxRetryError."
GENERIC_ERROR = 'too many error responses'
SPECIFIC_ERROR = 'too many {status_code} error responses'
class SecurityWarning(HTTPWarning):
"Warned when perfoming security reducing actions"
pass
class InsecureRequestWarning(SecurityWarning):
"Warned when making an unverified HTTPS request."
pass
class SystemTimeWarning(SecurityWarning):
"Warned when system time is suspected to be wrong"
pass
class InsecurePlatformWarning(SecurityWarning):
"Warned when certain SSL configuration is not available on a platform."
pass
class ResponseNotChunked(ProtocolError, ValueError):
"Response needs to be chunked in order to read it as chunks."
pass
| mit |
krzysz00/rust | src/etc/check-sanitycheck.py | 37 | 1805 | #!/usr/bin/env python
#
# Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
# file at the top-level directory of this distribution and at
# http://rust-lang.org/COPYRIGHT.
#
# Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
# http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
# <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
# option. This file may not be copied, modified, or distributed
# except according to those terms.
import os
import subprocess
import sys
import functools
STATUS = 0
def error_unless_permitted(env_var, message):
global STATUS
if not os.getenv(env_var):
sys.stderr.write(message)
STATUS = 1
def only_on(platforms):
def decorator(func):
@functools.wraps(func)
def inner():
if any(map(lambda x: sys.platform.startswith(x), platforms)):
func()
return inner
return decorator
@only_on(['linux', 'darwin', 'freebsd', 'openbsd'])
def check_rlimit_core():
import resource
soft, hard = resource.getrlimit(resource.RLIMIT_CORE)
if soft > 0:
error_unless_permitted('ALLOW_NONZERO_RLIMIT_CORE', """\
RLIMIT_CORE is set to a nonzero value (%d). During debuginfo, the test suite
will segfault many rustc's, creating many potentially large core files.
set ALLOW_NONZERO_RLIMIT_CORE to ignore this warning
""" % (soft))
@only_on(['win32'])
def check_console_code_page():
if '65001' not in subprocess.check_output(['cmd', '/c', 'chcp']):
sys.stderr.write('Warning: the console output code page is not UTF-8, \
some tests may fail. Use `cmd /c "chcp 65001"` to setup UTF-8 code page.\n')
def main():
check_console_code_page()
check_rlimit_core()
if __name__ == '__main__':
main()
sys.exit(STATUS)
| apache-2.0 |
mahak/nova | nova/tests/functional/test_login.py | 25 | 1128 | # Copyright 2011 Justin Santa Barbara
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_log import log as logging
from nova.tests.functional import integrated_helpers
LOG = logging.getLogger(__name__)
class LoginTest(integrated_helpers._IntegratedTestBase):
api_major_version = 'v2'
def test_login(self):
# Simple check - we list flavors - so we know we're logged in.
flavors = self.api.get_flavors()
for flavor in flavors:
LOG.debug("flavor: %s", flavor)
class LoginTestV21(LoginTest):
api_major_version = 'v2.1'
| apache-2.0 |
msabramo/PyHamcrest | tests/hamcrest_unit_test/collection/is_empty_test.py | 2 | 1798 | from __future__ import absolute_import
from hamcrest.library.collection.is_empty import *
from hamcrest_unit_test.matcher_test import MatcherTest
from .sequencemixin import GeneratorForm, SequenceForm
__author__ = "Chris Rose"
__copyright__ = "Copyright 2012 hamcrest.org"
__license__ = "BSD, see License.txt"
class LengthHaver(object):
def __init__(self, len_):
self._len = len_
def __len__(self):
return self._len
class EmptyCollectionTest(MatcherTest):
def testReturnsTrueForEmptyStandardCollections(self):
matcher = empty()
self.assert_matches('empty tuple', matcher, ())
self.assert_matches('empty list', matcher, [])
self.assert_matches('emtpy dictionary', matcher, {})
def testReturnsTrueForEmptyCollectionLike(self):
matcher = empty()
self.assert_matches('empty protocol object', matcher, LengthHaver(0))
def testReturnsFalseForNonEmptyStandardCollections(self):
matcher = empty()
self.assert_does_not_match('non-empty tuple', matcher, (1,))
self.assert_does_not_match('non-empty list', matcher, [1])
self.assert_does_not_match('emtpy dictionary', matcher, {1:2})
def testReturnsFalseForNonEmptyCollectionLike(self):
matcher = empty()
self.assert_does_not_match('non-empty protocol object', matcher, LengthHaver(1))
def testHasReadableDescription(self):
self.assert_description("an empty collection", empty())
def testSuccessfulMatchDoesNotGenerateMismatchDescription(self):
self.assert_no_mismatch_description(empty(), [])
def testDescribeMismatch(self):
self.assert_mismatch_description("has 3 item(s)", empty(), [1,2,3])
self.assert_mismatch_description("does not support length", empty(), 1)
| bsd-3-clause |
nthiep/global-ssh-server | lib/python2.7/site-packages/rest_framework/pagination.py | 8 | 26998 | # coding: utf-8
"""
Pagination serializers determine the structure of the output that should
be used for paginated responses.
"""
from __future__ import unicode_literals
from base64 import b64encode, b64decode
from collections import namedtuple
from django.core.paginator import InvalidPage, Paginator as DjangoPaginator
from django.template import Context, loader
from django.utils import six
from django.utils.six.moves.urllib import parse as urlparse
from django.utils.translation import ugettext_lazy as _
from rest_framework.compat import OrderedDict
from rest_framework.exceptions import NotFound
from rest_framework.response import Response
from rest_framework.settings import api_settings
from rest_framework.utils.urls import (
replace_query_param, remove_query_param
)
import warnings
def _positive_int(integer_string, strict=False, cutoff=None):
"""
Cast a string to a strictly positive integer.
"""
ret = int(integer_string)
if ret < 0 or (ret == 0 and strict):
raise ValueError()
if cutoff:
ret = min(ret, cutoff)
return ret
def _divide_with_ceil(a, b):
"""
Returns 'a' divded by 'b', with any remainder rounded up.
"""
if a % b:
return (a // b) + 1
return a // b
def _get_count(queryset):
"""
Determine an object count, supporting either querysets or regular lists.
"""
try:
return queryset.count()
except (AttributeError, TypeError):
return len(queryset)
def _get_displayed_page_numbers(current, final):
"""
This utility function determines a list of page numbers to display.
This gives us a nice contextually relevant set of page numbers.
For example:
current=14, final=16 -> [1, None, 13, 14, 15, 16]
This implementation gives one page to each side of the cursor,
or two pages to the side when the cursor is at the edge, then
ensures that any breaks between non-continous page numbers never
remove only a single page.
For an alernativative implementation which gives two pages to each side of
the cursor, eg. as in GitHub issue list pagination, see:
https://gist.github.com/tomchristie/321140cebb1c4a558b15
"""
assert current >= 1
assert final >= current
if final <= 5:
return list(range(1, final + 1))
# We always include the first two pages, last two pages, and
# two pages either side of the current page.
included = set((
1,
current - 1, current, current + 1,
final
))
# If the break would only exclude a single page number then we
# may as well include the page number instead of the break.
if current <= 4:
included.add(2)
included.add(3)
if current >= final - 3:
included.add(final - 1)
included.add(final - 2)
# Now sort the page numbers and drop anything outside the limits.
included = [
idx for idx in sorted(list(included))
if idx > 0 and idx <= final
]
# Finally insert any `...` breaks
if current > 4:
included.insert(1, None)
if current < final - 3:
included.insert(len(included) - 1, None)
return included
def _get_page_links(page_numbers, current, url_func):
"""
Given a list of page numbers and `None` page breaks,
return a list of `PageLink` objects.
"""
page_links = []
for page_number in page_numbers:
if page_number is None:
page_link = PAGE_BREAK
else:
page_link = PageLink(
url=url_func(page_number),
number=page_number,
is_active=(page_number == current),
is_break=False
)
page_links.append(page_link)
return page_links
def _decode_cursor(encoded):
"""
Given a string representing an encoded cursor, return a `Cursor` instance.
"""
# The offset in the cursor is used in situations where we have a
# nearly-unique index. (Eg millisecond precision creation timestamps)
# We guard against malicious users attempting to cause expensive database
# queries, by having a hard cap on the maximum possible size of the offset.
OFFSET_CUTOFF = 1000
try:
querystring = b64decode(encoded.encode('ascii')).decode('ascii')
tokens = urlparse.parse_qs(querystring, keep_blank_values=True)
offset = tokens.get('o', ['0'])[0]
offset = _positive_int(offset, cutoff=OFFSET_CUTOFF)
reverse = tokens.get('r', ['0'])[0]
reverse = bool(int(reverse))
position = tokens.get('p', [None])[0]
except (TypeError, ValueError):
return None
return Cursor(offset=offset, reverse=reverse, position=position)
def _encode_cursor(cursor):
"""
Given a Cursor instance, return an encoded string representation.
"""
tokens = {}
if cursor.offset != 0:
tokens['o'] = str(cursor.offset)
if cursor.reverse:
tokens['r'] = '1'
if cursor.position is not None:
tokens['p'] = cursor.position
querystring = urlparse.urlencode(tokens, doseq=True)
return b64encode(querystring.encode('ascii')).decode('ascii')
def _reverse_ordering(ordering_tuple):
"""
Given an order_by tuple such as `('-created', 'uuid')` reverse the
ordering and return a new tuple, eg. `('created', '-uuid')`.
"""
def invert(x):
return x[1:] if (x.startswith('-')) else '-' + x
return tuple([invert(item) for item in ordering_tuple])
Cursor = namedtuple('Cursor', ['offset', 'reverse', 'position'])
PageLink = namedtuple('PageLink', ['url', 'number', 'is_active', 'is_break'])
PAGE_BREAK = PageLink(url=None, number=None, is_active=False, is_break=True)
class BasePagination(object):
display_page_controls = False
def paginate_queryset(self, queryset, request, view=None): # pragma: no cover
raise NotImplementedError('paginate_queryset() must be implemented.')
def get_paginated_response(self, data): # pragma: no cover
raise NotImplementedError('get_paginated_response() must be implemented.')
def to_html(self): # pragma: no cover
raise NotImplementedError('to_html() must be implemented to display page controls.')
class PageNumberPagination(BasePagination):
"""
A simple page number based style that supports page numbers as
query parameters. For example:
http://api.example.org/accounts/?page=4
http://api.example.org/accounts/?page=4&page_size=100
"""
# The default page size.
# Defaults to `None`, meaning pagination is disabled.
page_size = api_settings.PAGE_SIZE
# Client can control the page using this query parameter.
page_query_param = 'page'
# Client can control the page size using this query parameter.
# Default is 'None'. Set to eg 'page_size' to enable usage.
page_size_query_param = None
# Set to an integer to limit the maximum page size the client may request.
# Only relevant if 'page_size_query_param' has also been set.
max_page_size = None
last_page_strings = ('last',)
template = 'rest_framework/pagination/numbers.html'
invalid_page_message = _('Invalid page "{page_number}": {message}.')
def _handle_backwards_compat(self, view):
"""
Prior to version 3.1, pagination was handled in the view, and the
attributes were set there. The attributes should now be set on
the pagination class, but the old style is still pending deprecation.
"""
assert not (
getattr(view, 'pagination_serializer_class', None) or
getattr(api_settings, 'DEFAULT_PAGINATION_SERIALIZER_CLASS', None)
), (
"The pagination_serializer_class attribute and "
"DEFAULT_PAGINATION_SERIALIZER_CLASS setting have been removed as "
"part of the 3.1 pagination API improvement. See the pagination "
"documentation for details on the new API."
)
for (settings_key, attr_name) in (
('PAGINATE_BY', 'page_size'),
('PAGINATE_BY_PARAM', 'page_size_query_param'),
('MAX_PAGINATE_BY', 'max_page_size')
):
value = getattr(api_settings, settings_key, None)
if value is not None:
setattr(self, attr_name, value)
warnings.warn(
"The `%s` settings key is pending deprecation. "
"Use the `%s` attribute on the pagination class instead." % (
settings_key, attr_name
),
PendingDeprecationWarning,
)
for (view_attr, attr_name) in (
('paginate_by', 'page_size'),
('page_query_param', 'page_query_param'),
('paginate_by_param', 'page_size_query_param'),
('max_paginate_by', 'max_page_size')
):
value = getattr(view, view_attr, None)
if value is not None:
setattr(self, attr_name, value)
warnings.warn(
"The `%s` view attribute is pending deprecation. "
"Use the `%s` attribute on the pagination class instead." % (
view_attr, attr_name
),
PendingDeprecationWarning,
)
def paginate_queryset(self, queryset, request, view=None):
"""
Paginate a queryset if required, either returning a
page object, or `None` if pagination is not configured for this view.
"""
self._handle_backwards_compat(view)
page_size = self.get_page_size(request)
if not page_size:
return None
paginator = DjangoPaginator(queryset, page_size)
page_number = request.query_params.get(self.page_query_param, 1)
if page_number in self.last_page_strings:
page_number = paginator.num_pages
try:
self.page = paginator.page(page_number)
except InvalidPage as exc:
msg = self.invalid_page_message.format(
page_number=page_number, message=six.text_type(exc)
)
raise NotFound(msg)
if paginator.count > 1 and self.template is not None:
# The browsable API should display pagination controls.
self.display_page_controls = True
self.request = request
return list(self.page)
def get_paginated_response(self, data):
return Response(OrderedDict([
('count', self.page.paginator.count),
('next', self.get_next_link()),
('previous', self.get_previous_link()),
('results', data)
]))
def get_page_size(self, request):
if self.page_size_query_param:
try:
return _positive_int(
request.query_params[self.page_size_query_param],
strict=True,
cutoff=self.max_page_size
)
except (KeyError, ValueError):
pass
return self.page_size
def get_next_link(self):
if not self.page.has_next():
return None
url = self.request.build_absolute_uri()
page_number = self.page.next_page_number()
return replace_query_param(url, self.page_query_param, page_number)
def get_previous_link(self):
if not self.page.has_previous():
return None
url = self.request.build_absolute_uri()
page_number = self.page.previous_page_number()
if page_number == 1:
return remove_query_param(url, self.page_query_param)
return replace_query_param(url, self.page_query_param, page_number)
def get_html_context(self):
base_url = self.request.build_absolute_uri()
def page_number_to_url(page_number):
if page_number == 1:
return remove_query_param(base_url, self.page_query_param)
else:
return replace_query_param(base_url, self.page_query_param, page_number)
current = self.page.number
final = self.page.paginator.num_pages
page_numbers = _get_displayed_page_numbers(current, final)
page_links = _get_page_links(page_numbers, current, page_number_to_url)
return {
'previous_url': self.get_previous_link(),
'next_url': self.get_next_link(),
'page_links': page_links
}
def to_html(self):
template = loader.get_template(self.template)
context = Context(self.get_html_context())
return template.render(context)
class LimitOffsetPagination(BasePagination):
"""
A limit/offset based style. For example:
http://api.example.org/accounts/?limit=100
http://api.example.org/accounts/?offset=400&limit=100
"""
default_limit = api_settings.PAGE_SIZE
limit_query_param = 'limit'
offset_query_param = 'offset'
max_limit = None
template = 'rest_framework/pagination/numbers.html'
def paginate_queryset(self, queryset, request, view=None):
self.limit = self.get_limit(request)
if self.limit is None:
return None
self.offset = self.get_offset(request)
self.count = _get_count(queryset)
self.request = request
if self.count > self.limit and self.template is not None:
self.display_page_controls = True
return list(queryset[self.offset:self.offset + self.limit])
def get_paginated_response(self, data):
return Response(OrderedDict([
('count', self.count),
('next', self.get_next_link()),
('previous', self.get_previous_link()),
('results', data)
]))
def get_limit(self, request):
if self.limit_query_param:
try:
return _positive_int(
request.query_params[self.limit_query_param],
cutoff=self.max_limit
)
except (KeyError, ValueError):
pass
return self.default_limit
def get_offset(self, request):
try:
return _positive_int(
request.query_params[self.offset_query_param],
)
except (KeyError, ValueError):
return 0
def get_next_link(self):
if self.offset + self.limit >= self.count:
return None
url = self.request.build_absolute_uri()
offset = self.offset + self.limit
return replace_query_param(url, self.offset_query_param, offset)
def get_previous_link(self):
if self.offset <= 0:
return None
url = self.request.build_absolute_uri()
if self.offset - self.limit <= 0:
return remove_query_param(url, self.offset_query_param)
offset = self.offset - self.limit
return replace_query_param(url, self.offset_query_param, offset)
def get_html_context(self):
base_url = self.request.build_absolute_uri()
current = _divide_with_ceil(self.offset, self.limit) + 1
# The number of pages is a little bit fiddly.
# We need to sum both the number of pages from current offset to end
# plus the number of pages up to the current offset.
# When offset is not strictly divisible by the limit then we may
# end up introducing an extra page as an artifact.
final = (
_divide_with_ceil(self.count - self.offset, self.limit) +
_divide_with_ceil(self.offset, self.limit)
)
def page_number_to_url(page_number):
if page_number == 1:
return remove_query_param(base_url, self.offset_query_param)
else:
offset = self.offset + ((page_number - current) * self.limit)
return replace_query_param(base_url, self.offset_query_param, offset)
page_numbers = _get_displayed_page_numbers(current, final)
page_links = _get_page_links(page_numbers, current, page_number_to_url)
return {
'previous_url': self.get_previous_link(),
'next_url': self.get_next_link(),
'page_links': page_links
}
def to_html(self):
template = loader.get_template(self.template)
context = Context(self.get_html_context())
return template.render(context)
class CursorPagination(BasePagination):
"""
The cursor pagination implementation is neccessarily complex.
For an overview of the position/offset style we use, see this post:
http://cramer.io/2011/03/08/building-cursors-for-the-disqus-api/
"""
cursor_query_param = 'cursor'
page_size = api_settings.PAGE_SIZE
invalid_cursor_message = _('Invalid cursor')
ordering = '-created'
template = 'rest_framework/pagination/previous_and_next.html'
def paginate_queryset(self, queryset, request, view=None):
if self.page_size is None:
return None
self.base_url = request.build_absolute_uri()
self.ordering = self.get_ordering(request, queryset, view)
# Determine if we have a cursor, and if so then decode it.
encoded = request.query_params.get(self.cursor_query_param)
if encoded is None:
self.cursor = None
(offset, reverse, current_position) = (0, False, None)
else:
self.cursor = _decode_cursor(encoded)
if self.cursor is None:
raise NotFound(self.invalid_cursor_message)
(offset, reverse, current_position) = self.cursor
# Cursor pagination always enforces an ordering.
if reverse:
queryset = queryset.order_by(*_reverse_ordering(self.ordering))
else:
queryset = queryset.order_by(*self.ordering)
# If we have a cursor with a fixed position then filter by that.
if current_position is not None:
order = self.ordering[0]
is_reversed = order.startswith('-')
order_attr = order.lstrip('-')
# Test for: (cursor reversed) XOR (queryset reversed)
if self.cursor.reverse != is_reversed:
kwargs = {order_attr + '__lt': current_position}
else:
kwargs = {order_attr + '__gt': current_position}
queryset = queryset.filter(**kwargs)
# If we have an offset cursor then offset the entire page by that amount.
# We also always fetch an extra item in order to determine if there is a
# page following on from this one.
results = list(queryset[offset:offset + self.page_size + 1])
self.page = list(results[:self.page_size])
# Determine the position of the final item following the page.
if len(results) > len(self.page):
has_following_postion = True
following_position = self._get_position_from_instance(results[-1], self.ordering)
else:
has_following_postion = False
following_position = None
# If we have a reverse queryset, then the query ordering was in reverse
# so we need to reverse the items again before returning them to the user.
if reverse:
self.page = list(reversed(self.page))
if reverse:
# Determine next and previous positions for reverse cursors.
self.has_next = (current_position is not None) or (offset > 0)
self.has_previous = has_following_postion
if self.has_next:
self.next_position = current_position
if self.has_previous:
self.previous_position = following_position
else:
# Determine next and previous positions for forward cursors.
self.has_next = has_following_postion
self.has_previous = (current_position is not None) or (offset > 0)
if self.has_next:
self.next_position = following_position
if self.has_previous:
self.previous_position = current_position
# Display page controls in the browsable API if there is more
# than one page.
if (self.has_previous or self.has_next) and self.template is not None:
self.display_page_controls = True
return self.page
def get_next_link(self):
if not self.has_next:
return None
if self.cursor and self.cursor.reverse and self.cursor.offset != 0:
# If we're reversing direction and we have an offset cursor
# then we cannot use the first position we find as a marker.
compare = self._get_position_from_instance(self.page[-1], self.ordering)
else:
compare = self.next_position
offset = 0
for item in reversed(self.page):
position = self._get_position_from_instance(item, self.ordering)
if position != compare:
# The item in this position and the item following it
# have different positions. We can use this position as
# our marker.
break
# The item in this postion has the same position as the item
# following it, we can't use it as a marker position, so increment
# the offset and keep seeking to the previous item.
compare = position
offset += 1
else:
# There were no unique positions in the page.
if not self.has_previous:
# We are on the first page.
# Our cursor will have an offset equal to the page size,
# but no position to filter against yet.
offset = self.page_size
position = None
elif self.cursor.reverse:
# The change in direction will introduce a paging artifact,
# where we end up skipping forward a few extra items.
offset = 0
position = self.previous_position
else:
# Use the position from the existing cursor and increment
# it's offset by the page size.
offset = self.cursor.offset + self.page_size
position = self.previous_position
cursor = Cursor(offset=offset, reverse=False, position=position)
encoded = _encode_cursor(cursor)
return replace_query_param(self.base_url, self.cursor_query_param, encoded)
def get_previous_link(self):
if not self.has_previous:
return None
if self.cursor and not self.cursor.reverse and self.cursor.offset != 0:
# If we're reversing direction and we have an offset cursor
# then we cannot use the first position we find as a marker.
compare = self._get_position_from_instance(self.page[0], self.ordering)
else:
compare = self.previous_position
offset = 0
for item in self.page:
position = self._get_position_from_instance(item, self.ordering)
if position != compare:
# The item in this position and the item following it
# have different positions. We can use this position as
# our marker.
break
# The item in this postion has the same position as the item
# following it, we can't use it as a marker position, so increment
# the offset and keep seeking to the previous item.
compare = position
offset += 1
else:
# There were no unique positions in the page.
if not self.has_next:
# We are on the final page.
# Our cursor will have an offset equal to the page size,
# but no position to filter against yet.
offset = self.page_size
position = None
elif self.cursor.reverse:
# Use the position from the existing cursor and increment
# it's offset by the page size.
offset = self.cursor.offset + self.page_size
position = self.next_position
else:
# The change in direction will introduce a paging artifact,
# where we end up skipping back a few extra items.
offset = 0
position = self.next_position
cursor = Cursor(offset=offset, reverse=True, position=position)
encoded = _encode_cursor(cursor)
return replace_query_param(self.base_url, self.cursor_query_param, encoded)
def get_ordering(self, request, queryset, view):
"""
Return a tuple of strings, that may be used in an `order_by` method.
"""
ordering_filters = [
filter_cls for filter_cls in getattr(view, 'filter_backends', [])
if hasattr(filter_cls, 'get_ordering')
]
if ordering_filters:
# If a filter exists on the view that implements `get_ordering`
# then we defer to that filter to determine the ordering.
filter_cls = ordering_filters[0]
filter_instance = filter_cls()
ordering = filter_instance.get_ordering(request, queryset, view)
assert ordering is not None, (
'Using cursor pagination, but filter class {filter_cls} '
'returned a `None` ordering.'.format(
filter_cls=filter_cls.__name__
)
)
else:
# The default case is to check for an `ordering` attribute
# on this pagination instance.
ordering = self.ordering
assert ordering is not None, (
'Using cursor pagination, but no ordering attribute was declared '
'on the pagination class.'
)
assert isinstance(ordering, (six.string_types, list, tuple)), (
'Invalid ordering. Expected string or tuple, but got {type}'.format(
type=type(ordering).__name__
)
)
if isinstance(ordering, six.string_types):
return (ordering,)
return tuple(ordering)
def _get_position_from_instance(self, instance, ordering):
attr = getattr(instance, ordering[0].lstrip('-'))
return six.text_type(attr)
def get_paginated_response(self, data):
return Response(OrderedDict([
('next', self.get_next_link()),
('previous', self.get_previous_link()),
('results', data)
]))
def get_html_context(self):
return {
'previous_url': self.get_previous_link(),
'next_url': self.get_next_link()
}
def to_html(self):
template = loader.get_template(self.template)
context = Context(self.get_html_context())
return template.render(context)
| agpl-3.0 |
agjohnson/readthedocs.org | readthedocs/rtd_tests/tests/test_sync_versions.py | 2 | 8267 | import json
from django.test import TestCase
from builds.models import Version
from builds.constants import STABLE
from projects.models import Project
class TestSyncVersions(TestCase):
fixtures = ["eric", "test_data"]
def setUp(self):
self.client.login(username='eric', password='test')
self.pip = Project.objects.get(slug='pip')
Version.objects.create(project=self.pip, identifier='origin/master',
verbose_name='master', slug='master',
active=True, machine=True)
Version.objects.create(project=self.pip, identifier='to_delete',
verbose_name='to_delete', slug='to_delete',
active=False)
def test_proper_url_no_slash(self):
version_post_data = {
'branches': [
{
'identifier': 'origin/master',
'verbose_name': 'master',
},
{
'identifier': 'origin/to_add',
'verbose_name': 'to_add',
},
]}
r = self.client.post(
'/api/v2/project/%s/sync_versions/' % self.pip.pk,
data=json.dumps(version_post_data),
content_type='application/json',
)
json_data = json.loads(r.content)
self.assertEqual(json_data['deleted_versions'], ['to_delete'])
self.assertEqual(json_data['added_versions'], ['to_add'])
def test_stable_versions(self):
version_post_data = {
'branches': [
{
'identifier': 'origin/master',
'verbose_name': 'master',
},
{
'identifier': 'origin/to_add',
'verbose_name': 'to_add',
},
],
'tags': [
{
'identifier': '0.9',
'verbose_name': '0.9',
},
{
'identifier': '0.8',
'verbose_name': '0.8',
},
]
}
self.assertRaises(
Version.DoesNotExist,
Version.objects.get,
slug=STABLE
)
self.client.post(
'/api/v2/project/%s/sync_versions/' % self.pip.pk,
data=json.dumps(version_post_data),
content_type='application/json',
)
version_stable = Version.objects.get(slug=STABLE)
self.assertTrue(version_stable.active)
self.assertEqual(version_stable.identifier, '0.9')
def test_update_stable_version(self):
version_post_data = {
'branches': [
{
'identifier': 'origin/master',
'verbose_name': 'master',
},
],
'tags': [
{
'identifier': '0.9',
'verbose_name': '0.9',
},
{
'identifier': '0.8',
'verbose_name': '0.8',
},
]
}
self.client.post(
'/api/v2/project/%s/sync_versions/' % self.pip.pk,
data=json.dumps(version_post_data),
content_type='application/json',
)
version_stable = Version.objects.get(slug=STABLE)
self.assertTrue(version_stable.active)
self.assertEqual(version_stable.identifier, '0.9')
version_post_data = {
'tags': [
{
'identifier': '1.0.0',
'verbose_name': '1.0.0',
},
]
}
self.client.post(
'/api/v2/project/%s/sync_versions/' % self.pip.pk,
data=json.dumps(version_post_data),
content_type='application/json',
)
version_stable = Version.objects.get(slug=STABLE)
self.assertTrue(version_stable.active)
self.assertEqual(version_stable.identifier, '1.0.0')
version_post_data = {
'tags': [
{
'identifier': '0.7',
'verbose_name': '0.7',
},
]
}
self.client.post(
'/api/v2/project/%s/sync_versions/' % self.pip.pk,
data=json.dumps(version_post_data),
content_type='application/json',
)
version_stable = Version.objects.get(slug=STABLE)
self.assertTrue(version_stable.active)
self.assertEqual(version_stable.identifier, '1.0.0')
def test_update_inactive_stable_version(self):
version_post_data = {
'branches': [
{
'identifier': 'origin/master',
'verbose_name': 'master',
},
],
'tags': [
{
'identifier': '0.9',
'verbose_name': '0.9',
},
]
}
self.client.post(
'/api/v2/project/%s/sync_versions/' % self.pip.pk,
data=json.dumps(version_post_data),
content_type='application/json',
)
version_stable = Version.objects.get(slug=STABLE)
self.assertEqual(version_stable.identifier, '0.9')
version_stable.active = False
version_stable.save()
version_post_data['tags'].append({
'identifier': '1.0.0',
'verbose_name': '1.0.0',
})
self.client.post(
'/api/v2/project/%s/sync_versions/' % self.pip.pk,
data=json.dumps(version_post_data),
content_type='application/json',
)
version_stable = Version.objects.get(slug=STABLE)
self.assertFalse(version_stable.active)
self.assertEqual(version_stable.identifier, '1.0.0')
def test_new_tag_update_active(self):
Version.objects.create(project=self.pip, identifier='0.8.3',
verbose_name='0.8.3', slug='0-8-3',
active=True)
version_post_data = {
'branches': [
{
'identifier': 'origin/master',
'verbose_name': 'master',
},
{
'identifier': 'origin/to_add',
'verbose_name': 'to_add',
},
],
'tags': [
{
'identifier': '0.9',
'verbose_name': '0.9',
},
{
'identifier': '0.8.3',
'verbose_name': '0.8.3',
},
]
}
self.client.post(
'/api/v2/project/%s/sync_versions/' % self.pip.pk,
data=json.dumps(version_post_data),
content_type='application/json',
)
version_9 = Version.objects.get(slug='0.9')
self.assertTrue(version_9.active)
def test_new_tag_update_inactive(self):
Version.objects.create(project=self.pip, identifier='0.8.3',
verbose_name='0.8.3', slug='0-8-3',
active=False)
version_post_data = {
'branches': [
{
'identifier': 'origin/master',
'verbose_name': 'master',
},
{
'identifier': 'origin/to_add',
'verbose_name': 'to_add',
},
],
'tags': [
{
'identifier': '0.9',
'verbose_name': '0.9',
},
{
'identifier': '0.8.3',
'verbose_name': '0.8.3',
},
]
}
self.client.post(
'/api/v2/project/%s/sync_versions/' % self.pip.pk,
data=json.dumps(version_post_data),
content_type='application/json',
)
version_9 = Version.objects.get(slug='0.9')
self.assertTrue(version_9.active is False)
| mit |
zhhf/charging | charging/plugins/cisco/network_plugin.py | 5 | 6800 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright 2012 Cisco Systems, Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# @author: Sumit Naiksatam, Cisco Systems, Inc.
import logging
import webob.exc as wexc
from neutron.api import extensions as neutron_extensions
from neutron.api.v2 import base
from neutron.db import db_base_plugin_v2
from neutron.openstack.common import importutils
from neutron.plugins.cisco.common import cisco_exceptions as cexc
from neutron.plugins.cisco.common import config
from neutron.plugins.cisco.db import network_db_v2 as cdb
from neutron.plugins.cisco import extensions
LOG = logging.getLogger(__name__)
class PluginV2(db_base_plugin_v2.NeutronDbPluginV2):
"""Meta-Plugin with v2 API support for multiple sub-plugins."""
supported_extension_aliases = ["credential", "Cisco qos"]
_methods_to_delegate = ['create_network',
'delete_network', 'update_network', 'get_network',
'get_networks',
'create_port', 'delete_port',
'update_port', 'get_port', 'get_ports',
'create_subnet',
'delete_subnet', 'update_subnet',
'get_subnet', 'get_subnets', ]
CISCO_FAULT_MAP = {
cexc.CredentialAlreadyExists: wexc.HTTPBadRequest,
cexc.CredentialNameNotFound: wexc.HTTPNotFound,
cexc.CredentialNotFound: wexc.HTTPNotFound,
cexc.NetworkSegmentIDNotFound: wexc.HTTPNotFound,
cexc.NetworkVlanBindingAlreadyExists: wexc.HTTPBadRequest,
cexc.NexusComputeHostNotConfigured: wexc.HTTPNotFound,
cexc.NexusConfigFailed: wexc.HTTPBadRequest,
cexc.NexusConnectFailed: wexc.HTTPServiceUnavailable,
cexc.NexusPortBindingNotFound: wexc.HTTPNotFound,
cexc.NoMoreNics: wexc.HTTPBadRequest,
cexc.PortIdForNexusSvi: wexc.HTTPBadRequest,
cexc.PortVnicBindingAlreadyExists: wexc.HTTPBadRequest,
cexc.PortVnicNotFound: wexc.HTTPNotFound,
cexc.QosNameAlreadyExists: wexc.HTTPBadRequest,
cexc.QosNotFound: wexc.HTTPNotFound,
cexc.SubnetNotSpecified: wexc.HTTPBadRequest,
cexc.VlanIDNotAvailable: wexc.HTTPNotFound,
cexc.VlanIDNotFound: wexc.HTTPNotFound,
}
def __init__(self):
"""Load the model class."""
self._model = importutils.import_object(config.CISCO.model_class)
native_bulk_attr_name = ("_%s__native_bulk_support"
% self._model.__class__.__name__)
self.__native_bulk_support = getattr(self._model,
native_bulk_attr_name, False)
if hasattr(self._model, "supported_extension_aliases"):
self.supported_extension_aliases.extend(
self._model.supported_extension_aliases)
neutron_extensions.append_api_extensions_path(extensions.__path__)
# Extend the fault map
self._extend_fault_map()
LOG.debug(_("Plugin initialization complete"))
def __getattribute__(self, name):
"""Delegate core API calls to the model class.
Core API calls are delegated directly to the configured model class.
Note: Bulking calls will be handled by this class, and turned into
non-bulking calls to be considered for delegation.
"""
methods = object.__getattribute__(self, "_methods_to_delegate")
if name in methods:
return getattr(object.__getattribute__(self, "_model"),
name)
else:
return object.__getattribute__(self, name)
def __getattr__(self, name):
"""Delegate calls to the extensions.
This delegates the calls to the extensions explicitly implemented by
the model.
"""
if hasattr(self._model, name):
return getattr(self._model, name)
else:
# Must make sure we re-raise the error that led us here, since
# otherwise getattr() and even hasattr() doesn't work corretly.
raise AttributeError(
_("'%(model)s' object has no attribute '%(name)s'") %
{'model': self._model, 'name': name})
def _extend_fault_map(self):
"""Extend the Neutron Fault Map for Cisco exceptions.
Map exceptions which are specific to the Cisco Plugin
to standard HTTP exceptions.
"""
base.FAULT_MAP.update(self.CISCO_FAULT_MAP)
"""
Extension API implementation
"""
def get_all_qoss(self, tenant_id):
"""Get all QoS levels."""
LOG.debug(_("get_all_qoss() called"))
qoslist = cdb.get_all_qoss(tenant_id)
return qoslist
def get_qos_details(self, tenant_id, qos_id):
"""Get QoS Details."""
LOG.debug(_("get_qos_details() called"))
return cdb.get_qos(tenant_id, qos_id)
def create_qos(self, tenant_id, qos_name, qos_desc):
"""Create a QoS level."""
LOG.debug(_("create_qos() called"))
qos = cdb.add_qos(tenant_id, qos_name, str(qos_desc))
return qos
def delete_qos(self, tenant_id, qos_id):
"""Delete a QoS level."""
LOG.debug(_("delete_qos() called"))
return cdb.remove_qos(tenant_id, qos_id)
def rename_qos(self, tenant_id, qos_id, new_name):
"""Rename QoS level."""
LOG.debug(_("rename_qos() called"))
return cdb.update_qos(tenant_id, qos_id, new_name)
def get_all_credentials(self):
"""Get all credentials."""
LOG.debug(_("get_all_credentials() called"))
credential_list = cdb.get_all_credentials()
return credential_list
def get_credential_details(self, credential_id):
"""Get a particular credential."""
LOG.debug(_("get_credential_details() called"))
return cdb.get_credential(credential_id)
def rename_credential(self, credential_id, new_name, new_password):
"""Rename the particular credential resource."""
LOG.debug(_("rename_credential() called"))
return cdb.update_credential(credential_id, new_name,
new_password=new_password)
| apache-2.0 |
daoluan/decode-Django | Django-1.5.1/django/contrib/auth/tests/context_processors.py | 96 | 6747 | import os
from django.conf import global_settings
from django.contrib.auth import authenticate
from django.contrib.auth.tests.utils import skipIfCustomUser
from django.contrib.auth.models import User, Permission
from django.contrib.contenttypes.models import ContentType
from django.contrib.auth.context_processors import PermWrapper, PermLookupDict
from django.db.models import Q
from django.test import TestCase
from django.test.utils import override_settings
from django.utils._os import upath
class MockUser(object):
def has_module_perms(self, perm):
if perm == 'mockapp':
return True
return False
def has_perm(self, perm):
if perm == 'mockapp.someperm':
return True
return False
class PermWrapperTests(TestCase):
"""
Test some details of the PermWrapper implementation.
"""
class EQLimiterObject(object):
"""
This object makes sure __eq__ will not be called endlessly.
"""
def __init__(self):
self.eq_calls = 0
def __eq__(self, other):
if self.eq_calls > 0:
return True
self.eq_calls += 1
return False
def test_permwrapper_in(self):
"""
Test that 'something' in PermWrapper works as expected.
"""
perms = PermWrapper(MockUser())
# Works for modules and full permissions.
self.assertTrue('mockapp' in perms)
self.assertFalse('nonexisting' in perms)
self.assertTrue('mockapp.someperm' in perms)
self.assertFalse('mockapp.nonexisting' in perms)
def test_permlookupdict_in(self):
"""
No endless loops if accessed with 'in' - refs #18979.
"""
pldict = PermLookupDict(MockUser(), 'mockapp')
with self.assertRaises(TypeError):
self.EQLimiterObject() in pldict
@skipIfCustomUser
@override_settings(
TEMPLATE_DIRS=(
os.path.join(os.path.dirname(upath(__file__)), 'templates'),
),
USE_TZ=False, # required for loading the fixture
PASSWORD_HASHERS=('django.contrib.auth.hashers.SHA1PasswordHasher',),
)
class AuthContextProcessorTests(TestCase):
"""
Tests for the ``django.contrib.auth.context_processors.auth`` processor
"""
urls = 'django.contrib.auth.tests.urls'
fixtures = ['context-processors-users.xml']
@override_settings(
MIDDLEWARE_CLASSES=global_settings.MIDDLEWARE_CLASSES,
TEMPLATE_CONTEXT_PROCESSORS=global_settings.TEMPLATE_CONTEXT_PROCESSORS,
)
def test_session_not_accessed(self):
"""
Tests that the session is not accessed simply by including
the auth context processor
"""
response = self.client.get('/auth_processor_no_attr_access/')
self.assertContains(response, "Session not accessed")
@override_settings(
MIDDLEWARE_CLASSES=global_settings.MIDDLEWARE_CLASSES,
TEMPLATE_CONTEXT_PROCESSORS=global_settings.TEMPLATE_CONTEXT_PROCESSORS,
)
def test_session_is_accessed(self):
"""
Tests that the session is accessed if the auth context processor
is used and relevant attributes accessed.
"""
response = self.client.get('/auth_processor_attr_access/')
self.assertContains(response, "Session accessed")
def test_perms_attrs(self):
u = User.objects.create_user(username='normal', password='secret')
u.user_permissions.add(
Permission.objects.get(
content_type=ContentType.objects.get_for_model(Permission),
codename='add_permission'))
self.client.login(username='normal', password='secret')
response = self.client.get('/auth_processor_perms/')
self.assertContains(response, "Has auth permissions")
self.assertContains(response, "Has auth.add_permission permissions")
self.assertNotContains(response, "nonexisting")
def test_perm_in_perms_attrs(self):
u = User.objects.create_user(username='normal', password='secret')
u.user_permissions.add(
Permission.objects.get(
content_type=ContentType.objects.get_for_model(Permission),
codename='add_permission'))
self.client.login(username='normal', password='secret')
response = self.client.get('/auth_processor_perm_in_perms/')
self.assertContains(response, "Has auth permissions")
self.assertContains(response, "Has auth.add_permission permissions")
self.assertNotContains(response, "nonexisting")
def test_message_attrs(self):
self.client.login(username='super', password='secret')
response = self.client.get('/auth_processor_messages/')
self.assertContains(response, "Message 1")
def test_user_attrs(self):
"""
Test that the lazy objects returned behave just like the wrapped objects.
"""
# These are 'functional' level tests for common use cases. Direct
# testing of the implementation (SimpleLazyObject) is in the 'utils'
# tests.
self.client.login(username='super', password='secret')
user = authenticate(username='super', password='secret')
response = self.client.get('/auth_processor_user/')
self.assertContains(response, "unicode: super")
self.assertContains(response, "id: 100")
self.assertContains(response, "username: super")
# bug #12037 is tested by the {% url %} in the template:
self.assertContains(response, "url: /userpage/super/")
# See if this object can be used for queries where a Q() comparing
# a user can be used with another Q() (in an AND or OR fashion).
# This simulates what a template tag might do with the user from the
# context. Note that we don't need to execute a query, just build it.
#
# The failure case (bug #12049) on Python 2.4 with a LazyObject-wrapped
# User is a fatal TypeError: "function() takes at least 2 arguments
# (0 given)" deep inside deepcopy().
#
# Python 2.5 and 2.6 succeeded, but logged internally caught exception
# spew:
#
# Exception RuntimeError: 'maximum recursion depth exceeded while
# calling a Python object' in <type 'exceptions.AttributeError'>
# ignored"
query = Q(user=response.context['user']) & Q(someflag=True)
# Tests for user equality. This is hard because User defines
# equality in a non-duck-typing way
# See bug #12060
self.assertEqual(response.context['user'], user)
self.assertEqual(user, response.context['user'])
| gpl-2.0 |
Godiyos/python-for-android | python3-alpha/python3-src/Tools/scripts/ifdef.py | 48 | 3718 | #! /usr/bin/env python3
# Selectively preprocess #ifdef / #ifndef statements.
# Usage:
# ifdef [-Dname] ... [-Uname] ... [file] ...
#
# This scans the file(s), looking for #ifdef and #ifndef preprocessor
# commands that test for one of the names mentioned in the -D and -U
# options. On standard output it writes a copy of the input file(s)
# minus those code sections that are suppressed by the selected
# combination of defined/undefined symbols. The #if(n)def/#else/#else
# lines themselfs (if the #if(n)def tests for one of the mentioned
# names) are removed as well.
# Features: Arbitrary nesting of recognized and unrecognized
# preprocesor statements works correctly. Unrecognized #if* commands
# are left in place, so it will never remove too much, only too
# little. It does accept whitespace around the '#' character.
# Restrictions: There should be no comments or other symbols on the
# #if(n)def lines. The effect of #define/#undef commands in the input
# file or in included files is not taken into account. Tests using
# #if and the defined() pseudo function are not recognized. The #elif
# command is not recognized. Improperly nesting is not detected.
# Lines that look like preprocessor commands but which are actually
# part of comments or string literals will be mistaken for
# preprocessor commands.
import sys
import getopt
defs = []
undefs = []
def main():
opts, args = getopt.getopt(sys.argv[1:], 'D:U:')
for o, a in opts:
if o == '-D':
defs.append(a)
if o == '-U':
undefs.append(a)
if not args:
args = ['-']
for filename in args:
if filename == '-':
process(sys.stdin, sys.stdout)
else:
f = open(filename, 'r')
process(f, sys.stdout)
f.close()
def process(fpi, fpo):
keywords = ('if', 'ifdef', 'ifndef', 'else', 'endif')
ok = 1
stack = []
while 1:
line = fpi.readline()
if not line: break
while line[-2:] == '\\\n':
nextline = fpi.readline()
if not nextline: break
line = line + nextline
tmp = line.strip()
if tmp[:1] != '#':
if ok: fpo.write(line)
continue
tmp = tmp[1:].strip()
words = tmp.split()
keyword = words[0]
if keyword not in keywords:
if ok: fpo.write(line)
continue
if keyword in ('ifdef', 'ifndef') and len(words) == 2:
if keyword == 'ifdef':
ko = 1
else:
ko = 0
word = words[1]
if word in defs:
stack.append((ok, ko, word))
if not ko: ok = 0
elif word in undefs:
stack.append((ok, not ko, word))
if ko: ok = 0
else:
stack.append((ok, -1, word))
if ok: fpo.write(line)
elif keyword == 'if':
stack.append((ok, -1, ''))
if ok: fpo.write(line)
elif keyword == 'else' and stack:
s_ok, s_ko, s_word = stack[-1]
if s_ko < 0:
if ok: fpo.write(line)
else:
s_ko = not s_ko
ok = s_ok
if not s_ko: ok = 0
stack[-1] = s_ok, s_ko, s_word
elif keyword == 'endif' and stack:
s_ok, s_ko, s_word = stack[-1]
if s_ko < 0:
if ok: fpo.write(line)
del stack[-1]
ok = s_ok
else:
sys.stderr.write('Unknown keyword %s\n' % keyword)
if stack:
sys.stderr.write('stack: %s\n' % stack)
if __name__ == '__main__':
main()
| apache-2.0 |
sestrella/ansible | test/units/module_utils/network/ftd/test_configuration.py | 19 | 25072 | # Copyright (c) 2018 Cisco and/or its affiliates.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
import json
import unittest
import pytest
from units.compat import mock
from units.compat.mock import call, patch
from ansible.module_utils.network.ftd.common import HTTPMethod, FtdUnexpectedResponse
from ansible.module_utils.network.ftd.configuration import iterate_over_pageable_resource, BaseConfigurationResource, \
OperationChecker, OperationNamePrefix, ParamName, QueryParams
from ansible.module_utils.network.ftd.fdm_swagger_client import ValidationError, OperationField
class TestBaseConfigurationResource(object):
@pytest.fixture
def connection_mock(self, mocker):
connection_class_mock = mocker.patch('ansible.modules.network.ftd.ftd_configuration.Connection')
connection_instance = connection_class_mock.return_value
connection_instance.validate_data.return_value = True, None
connection_instance.validate_query_params.return_value = True, None
connection_instance.validate_path_params.return_value = True, None
return connection_instance
@patch.object(BaseConfigurationResource, '_fetch_system_info')
@patch.object(BaseConfigurationResource, '_send_request')
def test_get_objects_by_filter_with_multiple_filters(self, send_request_mock, fetch_system_info_mock,
connection_mock):
objects = [
{'name': 'obj1', 'type': 1, 'foo': {'bar': 'buzz'}},
{'name': 'obj2', 'type': 1, 'foo': {'bar': 'buz'}},
{'name': 'obj3', 'type': 2, 'foo': {'bar': 'buzz'}}
]
fetch_system_info_mock.return_value = {
'databaseInfo': {
'buildVersion': '6.3.0'
}
}
connection_mock.get_operation_spec.return_value = {
'method': HTTPMethod.GET,
'url': '/object/'
}
resource = BaseConfigurationResource(connection_mock, False)
send_request_mock.side_effect = [{'items': objects}, {'items': []}]
# resource.get_objects_by_filter returns generator so to be able compare generated list with expected list
# we need evaluate it.
assert objects == list(resource.get_objects_by_filter('test', {}))
send_request_mock.assert_has_calls(
[
mock.call('/object/', 'get', {}, {}, {'limit': 10, 'offset': 0})
]
)
send_request_mock.reset_mock()
send_request_mock.side_effect = [{'items': objects}, {'items': []}]
# resource.get_objects_by_filter returns generator so to be able compare generated list with expected list
# we need evaluate it.
assert [objects[0]] == list(resource.get_objects_by_filter('test', {ParamName.FILTERS: {'name': 'obj1'}}))
send_request_mock.assert_has_calls(
[
mock.call('/object/', 'get', {}, {}, {QueryParams.FILTER: 'name:obj1', 'limit': 10, 'offset': 0})
]
)
send_request_mock.reset_mock()
send_request_mock.side_effect = [{'items': objects}, {'items': []}]
# resource.get_objects_by_filter returns generator so to be able compare generated list with expected list
# we need evaluate it.
assert [objects[1]] == list(resource.get_objects_by_filter(
'test',
{ParamName.FILTERS: {'name': 'obj2', 'type': 1, 'foo': {'bar': 'buz'}}}))
send_request_mock.assert_has_calls(
[
mock.call('/object/', 'get', {}, {}, {QueryParams.FILTER: 'name:obj2', 'limit': 10, 'offset': 0})
]
)
@patch.object(BaseConfigurationResource, '_fetch_system_info')
@patch.object(BaseConfigurationResource, '_send_request')
def test_get_objects_by_filter_with_multiple_responses(self, send_request_mock, fetch_system_info_mock,
connection_mock):
send_request_mock.side_effect = [
{'items': [
{'name': 'obj1', 'type': 'foo'},
{'name': 'obj2', 'type': 'bar'}
]},
{'items': [
{'name': 'obj3', 'type': 'foo'}
]},
{'items': []}
]
fetch_system_info_mock.return_value = {
'databaseInfo': {
'buildVersion': '6.3.0'
}
}
connection_mock.get_operation_spec.return_value = {
'method': HTTPMethod.GET,
'url': '/object/'
}
resource = BaseConfigurationResource(connection_mock, False)
assert [{'name': 'obj1', 'type': 'foo'}] == list(resource.get_objects_by_filter(
'test',
{ParamName.FILTERS: {'type': 'foo'}}))
send_request_mock.assert_has_calls(
[
mock.call('/object/', 'get', {}, {}, {'limit': 10, 'offset': 0})
]
)
send_request_mock.reset_mock()
send_request_mock.side_effect = [
{'items': [
{'name': 'obj1', 'type': 'foo'},
{'name': 'obj2', 'type': 'bar'}
]},
{'items': [
{'name': 'obj3', 'type': 'foo'}
]},
{'items': []}
]
resp = list(resource.get_objects_by_filter(
'test',
{
ParamName.FILTERS: {'type': 'foo'},
ParamName.QUERY_PARAMS: {'limit': 2}
}))
assert [{'name': 'obj1', 'type': 'foo'}, {'name': 'obj3', 'type': 'foo'}] == resp
send_request_mock.assert_has_calls(
[
mock.call('/object/', 'get', {}, {}, {'limit': 2, 'offset': 0}),
mock.call('/object/', 'get', {}, {}, {'limit': 2, 'offset': 2})
]
)
def test_module_should_fail_if_validation_error_in_data(self, connection_mock):
connection_mock.get_operation_spec.return_value = {'method': HTTPMethod.POST, 'url': '/test'}
report = {
'required': ['objects[0].type'],
'invalid_type': [
{
'path': 'objects[3].id',
'expected_type': 'string',
'actually_value': 1
}
]
}
connection_mock.validate_data.return_value = (False, json.dumps(report, sort_keys=True, indent=4))
with pytest.raises(ValidationError) as e_info:
resource = BaseConfigurationResource(connection_mock, False)
resource.crud_operation('addTest', {'data': {}})
result = e_info.value.args[0]
key = 'Invalid data provided'
assert result[key]
result[key] = json.loads(result[key])
assert result == {key: {
'invalid_type': [{'actually_value': 1, 'expected_type': 'string', 'path': 'objects[3].id'}],
'required': ['objects[0].type']
}}
def test_module_should_fail_if_validation_error_in_query_params(self, connection_mock):
connection_mock.get_operation_spec.return_value = {'method': HTTPMethod.GET, 'url': '/test',
'returnMultipleItems': False}
report = {
'required': ['objects[0].type'],
'invalid_type': [
{
'path': 'objects[3].id',
'expected_type': 'string',
'actually_value': 1
}
]
}
connection_mock.validate_query_params.return_value = (False, json.dumps(report, sort_keys=True, indent=4))
with pytest.raises(ValidationError) as e_info:
resource = BaseConfigurationResource(connection_mock, False)
resource.crud_operation('getTestList', {'data': {}})
result = e_info.value.args[0]
key = 'Invalid query_params provided'
assert result[key]
result[key] = json.loads(result[key])
assert result == {key: {
'invalid_type': [{'actually_value': 1, 'expected_type': 'string', 'path': 'objects[3].id'}],
'required': ['objects[0].type']}}
def test_module_should_fail_if_validation_error_in_path_params(self, connection_mock):
connection_mock.get_operation_spec.return_value = {'method': HTTPMethod.GET, 'url': '/test',
'returnMultipleItems': False}
report = {
'path_params': {
'required': ['objects[0].type'],
'invalid_type': [
{
'path': 'objects[3].id',
'expected_type': 'string',
'actually_value': 1
}
]
}
}
connection_mock.validate_path_params.return_value = (False, json.dumps(report, sort_keys=True, indent=4))
with pytest.raises(ValidationError) as e_info:
resource = BaseConfigurationResource(connection_mock, False)
resource.crud_operation('putTest', {'data': {}})
result = e_info.value.args[0]
key = 'Invalid path_params provided'
assert result[key]
result[key] = json.loads(result[key])
assert result == {key: {
'path_params': {
'invalid_type': [{'actually_value': 1, 'expected_type': 'string', 'path': 'objects[3].id'}],
'required': ['objects[0].type']}}}
def test_module_should_fail_if_validation_error_in_all_params(self, connection_mock):
connection_mock.get_operation_spec.return_value = {'method': HTTPMethod.POST, 'url': '/test'}
report = {
'data': {
'required': ['objects[0].type'],
'invalid_type': [
{
'path': 'objects[3].id',
'expected_type': 'string',
'actually_value': 1
}
]
},
'path_params': {
'required': ['some_param'],
'invalid_type': [
{
'path': 'name',
'expected_type': 'string',
'actually_value': True
}
]
},
'query_params': {
'required': ['other_param'],
'invalid_type': [
{
'path': 'f_integer',
'expected_type': 'integer',
'actually_value': "test"
}
]
}
}
connection_mock.validate_data.return_value = (False, json.dumps(report['data'], sort_keys=True, indent=4))
connection_mock.validate_query_params.return_value = (False,
json.dumps(report['query_params'], sort_keys=True,
indent=4))
connection_mock.validate_path_params.return_value = (False,
json.dumps(report['path_params'], sort_keys=True,
indent=4))
with pytest.raises(ValidationError) as e_info:
resource = BaseConfigurationResource(connection_mock, False)
resource.crud_operation('putTest', {'data': {}})
result = e_info.value.args[0]
key_data = 'Invalid data provided'
assert result[key_data]
result[key_data] = json.loads(result[key_data])
key_path_params = 'Invalid path_params provided'
assert result[key_path_params]
result[key_path_params] = json.loads(result[key_path_params])
key_query_params = 'Invalid query_params provided'
assert result[key_query_params]
result[key_query_params] = json.loads(result[key_query_params])
assert result == {
key_data: {'invalid_type': [{'actually_value': 1, 'expected_type': 'string', 'path': 'objects[3].id'}],
'required': ['objects[0].type']},
key_path_params: {'invalid_type': [{'actually_value': True, 'expected_type': 'string', 'path': 'name'}],
'required': ['some_param']},
key_query_params: {
'invalid_type': [{'actually_value': 'test', 'expected_type': 'integer', 'path': 'f_integer'}],
'required': ['other_param']}}
@pytest.mark.parametrize("test_api_version, expected_result",
[
("6.2.3", "name:object_name"),
("6.3.0", "name:object_name"),
("6.4.0", "fts~object_name")
]
)
def test_stringify_name_filter(self, test_api_version, expected_result, connection_mock):
filters = {"name": "object_name"}
with patch.object(BaseConfigurationResource, '_fetch_system_info') as fetch_system_info_mock:
fetch_system_info_mock.return_value = {
'databaseInfo': {
'buildVersion': test_api_version
}
}
resource = BaseConfigurationResource(connection_mock, False)
assert resource._stringify_name_filter(filters) == expected_result, "Unexpected result for version %s" % (
test_api_version)
class TestIterateOverPageableResource(object):
def test_iterate_over_pageable_resource_with_no_items(self):
resource_func = mock.Mock(return_value={'items': []})
items = iterate_over_pageable_resource(resource_func, {'query_params': {}})
assert [] == list(items)
def test_iterate_over_pageable_resource_with_one_page(self):
resource_func = mock.Mock(side_effect=[
{'items': ['foo', 'bar']},
{'items': []},
])
items = iterate_over_pageable_resource(resource_func, {'query_params': {}})
assert ['foo', 'bar'] == list(items)
resource_func.assert_has_calls([
call(params={'query_params': {'offset': 0, 'limit': 10}})
])
def test_iterate_over_pageable_resource_with_multiple_pages(self):
objects = [
{'items': ['foo']},
{'items': ['bar']},
{'items': ['buzz']},
{'items': []},
]
resource_func = mock.Mock(side_effect=objects)
items = iterate_over_pageable_resource(resource_func, {'query_params': {}})
assert ['foo'] == list(items)
resource_func.reset_mock()
resource_func = mock.Mock(side_effect=objects)
items = iterate_over_pageable_resource(resource_func, {'query_params': {'limit': 1}})
assert ['foo', 'bar', 'buzz'] == list(items)
def test_iterate_over_pageable_resource_should_preserve_query_params(self):
resource_func = mock.Mock(return_value={'items': []})
items = iterate_over_pageable_resource(resource_func, {'query_params': {'filter': 'name:123'}})
assert [] == list(items)
resource_func.assert_called_once_with(params={'query_params': {'filter': 'name:123', 'offset': 0, 'limit': 10}})
def test_iterate_over_pageable_resource_should_preserve_limit(self):
resource_func = mock.Mock(side_effect=[
{'items': ['foo']},
{'items': []},
])
items = iterate_over_pageable_resource(resource_func, {'query_params': {'limit': 1}})
assert ['foo'] == list(items)
resource_func.assert_has_calls([
call(params={'query_params': {'offset': 0, 'limit': 1}})
])
def test_iterate_over_pageable_resource_should_preserve_offset(self):
resource_func = mock.Mock(side_effect=[
{'items': ['foo']},
{'items': []},
])
items = iterate_over_pageable_resource(resource_func, {'query_params': {'offset': 3}})
assert ['foo'] == list(items)
resource_func.assert_has_calls([
call(params={'query_params': {'offset': 3, 'limit': 10}}),
])
def test_iterate_over_pageable_resource_should_pass_with_string_offset_and_limit(self):
resource_func = mock.Mock(side_effect=[
{'items': ['foo']},
{'items': []},
])
items = iterate_over_pageable_resource(resource_func, {'query_params': {'offset': '1', 'limit': '1'}})
assert ['foo'] == list(items)
resource_func.assert_has_calls([
call(params={'query_params': {'offset': '1', 'limit': '1'}}),
call(params={'query_params': {'offset': 2, 'limit': '1'}})
])
def test_iterate_over_pageable_resource_raises_exception_when_server_returned_more_items_than_requested(self):
resource_func = mock.Mock(side_effect=[
{'items': ['foo', 'redundant_bar']},
{'items': []},
])
with pytest.raises(FtdUnexpectedResponse):
list(iterate_over_pageable_resource(resource_func, {'query_params': {'offset': '1', 'limit': '1'}}))
resource_func.assert_has_calls([
call(params={'query_params': {'offset': '1', 'limit': '1'}})
])
class TestOperationCheckerClass(unittest.TestCase):
def setUp(self):
self._checker = OperationChecker
def test_is_add_operation_positive(self):
operation_name = OperationNamePrefix.ADD + "Object"
operation_spec = {OperationField.METHOD: HTTPMethod.POST}
assert self._checker.is_add_operation(operation_name, operation_spec)
def test_is_add_operation_wrong_method_in_spec(self):
operation_name = OperationNamePrefix.ADD + "Object"
operation_spec = {OperationField.METHOD: HTTPMethod.GET}
assert not self._checker.is_add_operation(operation_name, operation_spec)
def test_is_add_operation_negative_wrong_operation_name(self):
operation_name = OperationNamePrefix.GET + "Object"
operation_spec = {OperationField.METHOD: HTTPMethod.POST}
assert not self._checker.is_add_operation(operation_name, operation_spec)
def test_is_edit_operation_positive(self):
operation_name = OperationNamePrefix.EDIT + "Object"
operation_spec = {OperationField.METHOD: HTTPMethod.PUT}
assert self._checker.is_edit_operation(operation_name, operation_spec)
def test_is_edit_operation_wrong_method_in_spec(self):
operation_name = OperationNamePrefix.EDIT + "Object"
operation_spec = {OperationField.METHOD: HTTPMethod.GET}
assert not self._checker.is_edit_operation(operation_name, operation_spec)
def test_is_edit_operation_negative_wrong_operation_name(self):
operation_name = OperationNamePrefix.GET + "Object"
operation_spec = {OperationField.METHOD: HTTPMethod.PUT}
assert not self._checker.is_edit_operation(operation_name, operation_spec)
def test_is_delete_operation_positive(self):
operation_name = OperationNamePrefix.DELETE + "Object"
operation_spec = {OperationField.METHOD: HTTPMethod.DELETE}
self.assertTrue(
self._checker.is_delete_operation(operation_name, operation_spec)
)
def test_is_delete_operation_wrong_method_in_spec(self):
operation_name = OperationNamePrefix.DELETE + "Object"
operation_spec = {OperationField.METHOD: HTTPMethod.GET}
assert not self._checker.is_delete_operation(operation_name, operation_spec)
def test_is_delete_operation_negative_wrong_operation_name(self):
operation_name = OperationNamePrefix.GET + "Object"
operation_spec = {OperationField.METHOD: HTTPMethod.DELETE}
assert not self._checker.is_delete_operation(operation_name, operation_spec)
def test_is_get_list_operation_positive(self):
operation_name = OperationNamePrefix.GET + "Object"
operation_spec = {
OperationField.METHOD: HTTPMethod.GET,
OperationField.RETURN_MULTIPLE_ITEMS: True
}
assert self._checker.is_get_list_operation(operation_name, operation_spec)
def test_is_get_list_operation_wrong_method_in_spec(self):
operation_name = OperationNamePrefix.GET + "Object"
operation_spec = {
OperationField.METHOD: HTTPMethod.POST,
OperationField.RETURN_MULTIPLE_ITEMS: True
}
assert not self._checker.is_get_list_operation(operation_name, operation_spec)
def test_is_get_list_operation_does_not_return_list(self):
operation_name = OperationNamePrefix.GET + "Object"
operation_spec = {
OperationField.METHOD: HTTPMethod.GET,
OperationField.RETURN_MULTIPLE_ITEMS: False
}
assert not self._checker.is_get_list_operation(operation_name, operation_spec)
def test_is_get_operation_positive(self):
operation_name = OperationNamePrefix.GET + "Object"
operation_spec = {
OperationField.METHOD: HTTPMethod.GET,
OperationField.RETURN_MULTIPLE_ITEMS: False
}
self.assertTrue(
self._checker.is_get_operation(operation_name, operation_spec)
)
def test_is_get_operation_wrong_method_in_spec(self):
operation_name = OperationNamePrefix.ADD + "Object"
operation_spec = {
OperationField.METHOD: HTTPMethod.POST,
OperationField.RETURN_MULTIPLE_ITEMS: False
}
assert not self._checker.is_get_operation(operation_name, operation_spec)
def test_is_get_operation_negative_when_returns_multiple(self):
operation_name = OperationNamePrefix.GET + "Object"
operation_spec = {
OperationField.METHOD: HTTPMethod.GET,
OperationField.RETURN_MULTIPLE_ITEMS: True
}
assert not self._checker.is_get_operation(operation_name, operation_spec)
def test_is_upsert_operation_positive(self):
operation_name = OperationNamePrefix.UPSERT + "Object"
assert self._checker.is_upsert_operation(operation_name)
def test_is_upsert_operation_with_wrong_operation_name(self):
for op_type in [OperationNamePrefix.ADD, OperationNamePrefix.GET, OperationNamePrefix.EDIT,
OperationNamePrefix.DELETE]:
operation_name = op_type + "Object"
assert not self._checker.is_upsert_operation(operation_name)
def test_is_find_by_filter_operation(self):
operation_name = OperationNamePrefix.GET + "Object"
operation_spec = {
OperationField.METHOD: HTTPMethod.GET,
OperationField.RETURN_MULTIPLE_ITEMS: True
}
params = {ParamName.FILTERS: 1}
self.assertTrue(
self._checker.is_find_by_filter_operation(
operation_name, params, operation_spec
)
)
def test_is_find_by_filter_operation_negative_when_filters_empty(self):
operation_name = OperationNamePrefix.GET + "Object"
operation_spec = {
OperationField.METHOD: HTTPMethod.GET,
OperationField.RETURN_MULTIPLE_ITEMS: True
}
params = {ParamName.FILTERS: None}
assert not self._checker.is_find_by_filter_operation(
operation_name, params, operation_spec
)
params = {}
assert not self._checker.is_find_by_filter_operation(
operation_name, params, operation_spec
)
def test_is_upsert_operation_supported_operation(self):
get_list_op_spec = {OperationField.METHOD: HTTPMethod.GET, OperationField.RETURN_MULTIPLE_ITEMS: True}
add_op_spec = {OperationField.METHOD: HTTPMethod.POST}
edit_op_spec = {OperationField.METHOD: HTTPMethod.PUT}
assert self._checker.is_upsert_operation_supported({'getList': get_list_op_spec, 'edit': edit_op_spec})
assert self._checker.is_upsert_operation_supported(
{'add': add_op_spec, 'getList': get_list_op_spec, 'edit': edit_op_spec})
assert not self._checker.is_upsert_operation_supported({'getList': get_list_op_spec})
assert not self._checker.is_upsert_operation_supported({'edit': edit_op_spec})
assert not self._checker.is_upsert_operation_supported({'getList': get_list_op_spec, 'add': add_op_spec})
| gpl-3.0 |
axinging/sky_engine | build/android/pylib/uirobot/uirobot_test_instance.py | 42 | 2064 | # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
import json
import logging
from pylib import constants
from pylib.base import test_instance
from pylib.utils import apk_helper
class UirobotTestInstance(test_instance.TestInstance):
def __init__(self, args, error_func):
"""Constructor.
Args:
args: Command line arguments.
"""
super(UirobotTestInstance, self).__init__()
if not args.app_under_test:
error_func('Must set --app-under-test.')
self._app_under_test = args.app_under_test
self._minutes = args.minutes
if args.remote_device_file:
with open(args.remote_device_file) as remote_device_file:
device_json = json.load(remote_device_file)
else:
device_json = {}
device_type = device_json.get('device_type', 'Android')
if args.device_type:
if device_type and device_type != args.device_type:
logging.info('Overriding device_type from %s to %s',
device_type, args.device_type)
device_type = args.device_type
if device_type == 'Android':
self._suite = 'Android Uirobot'
self._package_name = apk_helper.GetPackageName(self._app_under_test)
elif device_type == 'iOS':
self._suite = 'iOS Uirobot'
self._package_name = self._app_under_test
#override
def TestType(self):
"""Returns type of test."""
return 'uirobot'
#override
def SetUp(self):
"""Setup for test."""
pass
#override
def TearDown(self):
"""Teardown for test."""
pass
@property
def app_under_test(self):
"""Returns the app to run the test on."""
return self._app_under_test
@property
def minutes(self):
"""Returns the number of minutes to run the uirobot for."""
return self._minutes
@property
def package_name(self):
"""Returns the name of the package in the APK."""
return self._package_name
@property
def suite(self):
return self._suite
| bsd-3-clause |
albertomurillo/ansible | lib/ansible/modules/cloud/vultr/vultr_account_facts.py | 28 | 3335 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# (c) 2017, René Moser <[email protected]>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: vultr_account_facts
short_description: Gather facts about the Vultr account.
description:
- Gather facts about account balance, charges and payments.
version_added: "2.5"
author: "René Moser (@resmo)"
extends_documentation_fragment: vultr
'''
EXAMPLES = r'''
- name: Gather Vultr account facts
local_action:
module: vultr_account_facts
- name: Print the gathered facts
debug:
var: ansible_facts.vultr_account_facts
'''
RETURN = r'''
---
vultr_api:
description: Response from Vultr API with a few additions/modification
returned: success
type: complex
contains:
api_account:
description: Account used in the ini file to select the key
returned: success
type: str
sample: default
api_timeout:
description: Timeout used for the API requests
returned: success
type: int
sample: 60
api_retries:
description: Amount of max retries for the API requests
returned: success
type: int
sample: 5
api_endpoint:
description: Endpoint used for the API requests
returned: success
type: str
sample: "https://api.vultr.com"
vultr_account_facts:
description: Response from Vultr API
returned: success
type: complex
contains:
balance:
description: Your account balance.
returned: success
type: float
sample: -214.69
pending_charges:
description: Charges pending.
returned: success
type: float
sample: 57.03
last_payment_date:
description: Date of the last payment.
returned: success
type: str
sample: "2017-08-26 12:47:48"
last_payment_amount:
description: The amount of the last payment transaction.
returned: success
type: float
sample: -250.0
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.vultr import (
Vultr,
vultr_argument_spec,
)
class AnsibleVultrAccountFacts(Vultr):
def __init__(self, module):
super(AnsibleVultrAccountFacts, self).__init__(module, "vultr_account_facts")
self.returns = {
'balance': dict(convert_to='float'),
'pending_charges': dict(convert_to='float'),
'last_payment_date': dict(),
'last_payment_amount': dict(convert_to='float'),
}
def get_account_info(self):
return self.api_query(path="/v1/account/info")
def main():
argument_spec = vultr_argument_spec()
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True,
)
account_facts = AnsibleVultrAccountFacts(module)
result = account_facts.get_result(account_facts.get_account_info())
ansible_facts = {
'vultr_account_facts': result['vultr_account_facts']
}
module.exit_json(ansible_facts=ansible_facts, **result)
if __name__ == '__main__':
main()
| gpl-3.0 |
Kast0rTr0y/ansible | lib/ansible/modules/storage/netapp/na_cdot_svm.py | 15 | 8677 | #!/usr/bin/python
# (c) 2017, NetApp, Inc
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'version': '1.0'}
DOCUMENTATION = '''
module: na_cdot_svm
short_description: Manage NetApp cDOT svm
extends_documentation_fragment:
- netapp.ontap
version_added: '2.3'
author: Sumit Kumar ([email protected])
description:
- Create or destroy svm on NetApp cDOT
options:
state:
description:
- Whether the specified SVM should exist or not.
required: true
choices: ['present', 'absent']
name:
description:
- The name of the SVM to manage.
required: true
root_volume:
description:
- Root volume of the SVM.
note: required when C(state=present)
root_volume_aggregate:
description:
- The aggregate on which the root volume will be created.
note: required when C(state=present)
root_volume_security_style:
description:
- Security Style of the root volume.
- When specified as part of the vserver-create, this field represents the security style for the Vserver root volume.
- When specified as part of vserver-get-iter call, this will return the list of matching Vservers.
- Possible values are 'unix', 'ntfs', 'mixed'.
- The 'unified' security style, which applies only to Infinite Volumes, cannot be applied to a Vserver's root volume.
- Valid options are "unix" for NFS, "ntfs" for CIFS, "mixed" for Mixed, "unified" for Unified.
note: required when C(state=present)
choices: ['unix', 'ntfs', 'mixed', 'unified']
'''
EXAMPLES = """
- name: Create SVM
na_cdot_svm:
state: present
name: ansibleVServer
root_volume: vol1
root_volume_aggregate: aggr1
root_volume_security_style: mixed
hostname: "{{ netapp_hostname }}"
username: "{{ netapp_username }}"
password: "{{ netapp_password }}"
"""
RETURN = """
"""
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.pycompat24 import get_exception
import ansible.module_utils.netapp as netapp_utils
HAS_NETAPP_LIB = netapp_utils.has_netapp_lib()
class NetAppCDOTSVM(object):
def __init__(self):
self.argument_spec = netapp_utils.ontap_sf_host_argument_spec()
self.argument_spec.update(dict(
state=dict(required=True, choices=['present', 'absent']),
name=dict(required=True, type='str'),
root_volume=dict(type='str'),
root_volume_aggregate=dict(type='str'),
root_volume_security_style=dict(type='str', choices=['nfs',
'cifs',
'mixed',
'unified'
]),
))
self.module = AnsibleModule(
argument_spec=self.argument_spec,
required_if=[
('state', 'present', ['root_volume',
'root_volume_aggregate',
'root_volume_security_style'])
],
supports_check_mode=True
)
p = self.module.params
# set up state variables
self.state = p['state']
self.name = p['name']
self.root_volume = p['root_volume']
self.root_volume_aggregate = p['root_volume_aggregate']
self.root_volume_security_style = p['root_volume_security_style']
if HAS_NETAPP_LIB is False:
self.module.fail_json(msg="the python NetApp-Lib module is required")
else:
self.server = netapp_utils.setup_ontap_zapi(module=self.module)
def get_vserver(self):
"""
Checks if vserver exists.
:return:
True if vserver found
False if vserver is not found
:rtype: bool
"""
vserver_info = netapp_utils.zapi.NaElement('vserver-get-iter')
query_details = netapp_utils.zapi.NaElement.create_node_with_children(
'vserver-info', **{'vserver-name': self.name})
query = netapp_utils.zapi.NaElement('query')
query.add_child_elem(query_details)
vserver_info.add_child_elem(query)
result = self.server.invoke_successfully(vserver_info,
enable_tunneling=False)
if (result.get_child_by_name('num-records') and
int(result.get_child_content('num-records')) >= 1):
"""
TODO:
Return more relevant parameters about vserver that can
be updated by the playbook.
"""
return True
else:
return False
def create_vserver(self):
vserver_create = netapp_utils.zapi.NaElement.create_node_with_children(
'vserver-create', **{'vserver-name': self.name,
'root-volume': self.root_volume,
'root-volume-aggregate':
self.root_volume_aggregate,
'root-volume-security-style':
self.root_volume_security_style
})
try:
self.server.invoke_successfully(vserver_create,
enable_tunneling=False)
except netapp_utils.zapi.NaApiError:
err = get_exception()
self.module.fail_json(msg='Error provisioning SVM %s with root volume %s on aggregate %s'
% (self.name, self.root_volume, self.root_volume_aggregate),
exception=str(err))
def delete_vserver(self):
vserver_delete = netapp_utils.zapi.NaElement.create_node_with_children(
'vserver-destroy', **{'vserver-name': self.name})
try:
self.server.invoke_successfully(vserver_delete,
enable_tunneling=False)
except netapp_utils.zapi.NaApiError:
err = get_exception()
self.module.fail_json(msg='Error deleting SVM %s with root volume %s on aggregate %s'
% (self.name, self.root_volume, self.root_volume_aggregate),
exception=str(err))
def rename_vserver(self):
vserver_rename = netapp_utils.zapi.NaElement.create_node_with_children(
'vserver-rename', **{'vserver-name': self.name,
'new-name': self.name})
try:
self.server.invoke_successfully(vserver_rename,
enable_tunneling=False)
except netapp_utils.zapi.NaApiError:
err = get_exception()
self.module.fail_json(msg='Error renaming SVM %s' % self.name, exception=str(err))
def apply(self):
changed = False
vserver_exists = self.get_vserver()
rename_vserver = False
if vserver_exists:
if self.state == 'absent':
changed = True
elif self.state == 'present':
# Update properties
pass
else:
if self.state == 'present':
changed = True
if changed:
if self.module.check_mode:
pass
else:
if self.state == 'present':
if not vserver_exists:
self.create_vserver()
else:
if rename_vserver:
self.rename_vserver()
elif self.state == 'absent':
self.delete_vserver()
self.module.exit_json(changed=changed)
def main():
v = NetAppCDOTSVM()
v.apply()
if __name__ == '__main__':
main()
| gpl-3.0 |
saimn/astropy | astropy/coordinates/attributes.py | 3 | 18234 | # -*- coding: utf-8 -*-
# Licensed under a 3-clause BSD style license - see LICENSE.rst
# Dependencies
import numpy as np
# Project
from astropy import units as u
from astropy.utils import ShapedLikeNDArray
__all__ = ['Attribute', 'TimeAttribute', 'QuantityAttribute',
'EarthLocationAttribute', 'CoordinateAttribute',
'CartesianRepresentationAttribute',
'DifferentialAttribute']
class Attribute:
"""A non-mutable data descriptor to hold a frame attribute.
This class must be used to define frame attributes (e.g. ``equinox`` or
``obstime``) that are included in a frame class definition.
Examples
--------
The `~astropy.coordinates.FK4` class uses the following class attributes::
class FK4(BaseCoordinateFrame):
equinox = TimeAttribute(default=_EQUINOX_B1950)
obstime = TimeAttribute(default=None,
secondary_attribute='equinox')
This means that ``equinox`` and ``obstime`` are available to be set as
keyword arguments when creating an ``FK4`` class instance and are then
accessible as instance attributes. The instance value for the attribute
must be stored in ``'_' + <attribute_name>`` by the frame ``__init__``
method.
Note in this example that ``equinox`` and ``obstime`` are time attributes
and use the ``TimeAttributeFrame`` class. This subclass overrides the
``convert_input`` method to validate and convert inputs into a ``Time``
object.
Parameters
----------
default : object
Default value for the attribute if not provided
secondary_attribute : str
Name of a secondary instance attribute which supplies the value if
``default is None`` and no value was supplied during initialization.
"""
name = '<unbound>'
def __init__(self, default=None, secondary_attribute=''):
self.default = default
self.secondary_attribute = secondary_attribute
super().__init__()
def __set_name__(self, owner, name):
self.name = name
def convert_input(self, value):
"""
Validate the input ``value`` and convert to expected attribute class.
The base method here does nothing, but subclasses can implement this
as needed. The method should catch any internal exceptions and raise
ValueError with an informative message.
The method returns the validated input along with a boolean that
indicates whether the input value was actually converted. If the input
value was already the correct type then the ``converted`` return value
should be ``False``.
Parameters
----------
value : object
Input value to be converted.
Returns
-------
output_value
The ``value`` converted to the correct type (or just ``value`` if
``converted`` is False)
converted : bool
True if the conversion was actually performed, False otherwise.
Raises
------
ValueError
If the input is not valid for this attribute.
"""
return value, False
def __get__(self, instance, frame_cls=None):
if instance is None:
out = self.default
else:
out = getattr(instance, '_' + self.name, self.default)
if out is None:
out = getattr(instance, self.secondary_attribute, self.default)
out, converted = self.convert_input(out)
if instance is not None:
instance_shape = getattr(instance, 'shape', None) # None if instance (frame) has no data!
if instance_shape is not None and (getattr(out, 'shape', ()) and
out.shape != instance_shape):
# If the shapes do not match, try broadcasting.
try:
if isinstance(out, ShapedLikeNDArray):
out = out._apply(np.broadcast_to, shape=instance_shape,
subok=True)
else:
out = np.broadcast_to(out, instance_shape, subok=True)
except ValueError:
# raise more informative exception.
raise ValueError(
"attribute {} should be scalar or have shape {}, "
"but is has shape {} and could not be broadcast."
.format(self.name, instance_shape, out.shape))
converted = True
if converted:
setattr(instance, '_' + self.name, out)
return out
def __set__(self, instance, val):
raise AttributeError('Cannot set frame attribute')
class TimeAttribute(Attribute):
"""
Frame attribute descriptor for quantities that are Time objects.
See the `~astropy.coordinates.Attribute` API doc for further
information.
Parameters
----------
default : object
Default value for the attribute if not provided
secondary_attribute : str
Name of a secondary instance attribute which supplies the value if
``default is None`` and no value was supplied during initialization.
"""
def convert_input(self, value):
"""
Convert input value to a Time object and validate by running through
the Time constructor. Also check that the input was a scalar.
Parameters
----------
value : object
Input value to be converted.
Returns
-------
out, converted : correctly-typed object, boolean
Tuple consisting of the correctly-typed object and a boolean which
indicates if conversion was actually performed.
Raises
------
ValueError
If the input is not valid for this attribute.
"""
from astropy.time import Time
if value is None:
return None, False
if isinstance(value, Time):
out = value
converted = False
else:
try:
out = Time(value)
except Exception as err:
raise ValueError(
f'Invalid time input {self.name}={value!r}\n{err}')
converted = True
# Set attribute as read-only for arrays (not allowed by numpy
# for array scalars)
if out.shape:
out.writeable = False
return out, converted
class CartesianRepresentationAttribute(Attribute):
"""
A frame attribute that is a CartesianRepresentation with specified units.
Parameters
----------
default : object
Default value for the attribute if not provided
secondary_attribute : str
Name of a secondary instance attribute which supplies the value if
``default is None`` and no value was supplied during initialization.
unit : unit object or None
Name of a unit that the input will be converted into. If None, no
unit-checking or conversion is performed
"""
def __init__(self, default=None, secondary_attribute='', unit=None):
super().__init__(default, secondary_attribute)
self.unit = unit
def convert_input(self, value):
"""
Checks that the input is a CartesianRepresentation with the correct
unit, or the special value ``[0, 0, 0]``.
Parameters
----------
value : object
Input value to be converted.
Returns
-------
out, converted : correctly-typed object, boolean
Tuple consisting of the correctly-typed object and a boolean which
indicates if conversion was actually performed.
Raises
------
ValueError
If the input is not valid for this attribute.
"""
if (isinstance(value, list) and len(value) == 3 and
all(v == 0 for v in value) and self.unit is not None):
return CartesianRepresentation(np.zeros(3) * self.unit), True
else:
# is it a CartesianRepresentation with correct unit?
if hasattr(value, 'xyz') and value.xyz.unit == self.unit:
return value, False
converted = True
# if it's a CartesianRepresentation, get the xyz Quantity
value = getattr(value, 'xyz', value)
if not hasattr(value, 'unit'):
raise TypeError('tried to set a {} with something that does '
'not have a unit.'
.format(self.__class__.__name__))
value = value.to(self.unit)
# now try and make a CartesianRepresentation.
cartrep = CartesianRepresentation(value, copy=False)
return cartrep, converted
class QuantityAttribute(Attribute):
"""
A frame attribute that is a quantity with specified units and shape
(optionally).
Can be `None`, which should be used for special cases in associated
frame transformations like "this quantity should be ignored" or similar.
Parameters
----------
default : value or Quantity or None
Default value for the attribute if the user does not supply one. If a
Quantity, it must be consistent with ``unit``, or if a value, ``unit``
cannot be None.
secondary_attribute : str
Name of a secondary instance attribute which supplies the value if
``default is None`` and no value was supplied during initialization.
unit : unit object or None
Name of a unit that the input will be converted into. If None, no
unit-checking or conversion is performed
shape : tuple or None
If given, specifies the shape the attribute must be
"""
def __init__(self, default=None, secondary_attribute='', unit=None,
shape=None):
if default is None and unit is None:
raise ValueError('Either a default quantity value must be '
'provided, or a unit must be provided to define a '
'QuantityAttribute.')
if default is not None and unit is None:
unit = default.unit
self.unit = unit
self.shape = shape
default = self.convert_input(default)[0]
super().__init__(default, secondary_attribute)
def convert_input(self, value):
"""
Checks that the input is a Quantity with the necessary units (or the
special value ``0``).
Parameters
----------
value : object
Input value to be converted.
Returns
-------
out, converted : correctly-typed object, boolean
Tuple consisting of the correctly-typed object and a boolean which
indicates if conversion was actually performed.
Raises
------
ValueError
If the input is not valid for this attribute.
"""
if value is None:
return None, False
if (not hasattr(value, 'unit') and self.unit != u.dimensionless_unscaled
and np.any(value != 0)):
raise TypeError('Tried to set a QuantityAttribute with '
'something that does not have a unit.')
oldvalue = value
value = u.Quantity(oldvalue, self.unit, copy=False)
if self.shape is not None and value.shape != self.shape:
if value.shape == () and oldvalue == 0:
# Allow a single 0 to fill whatever shape is needed.
value = np.broadcast_to(value, self.shape, subok=True)
else:
raise ValueError(
f'The provided value has shape "{value.shape}", but '
f'should have shape "{self.shape}"')
converted = oldvalue is not value
return value, converted
class EarthLocationAttribute(Attribute):
"""
A frame attribute that can act as a `~astropy.coordinates.EarthLocation`.
It can be created as anything that can be transformed to the
`~astropy.coordinates.ITRS` frame, but always presents as an `EarthLocation`
when accessed after creation.
Parameters
----------
default : object
Default value for the attribute if not provided
secondary_attribute : str
Name of a secondary instance attribute which supplies the value if
``default is None`` and no value was supplied during initialization.
"""
def convert_input(self, value):
"""
Checks that the input is a Quantity with the necessary units (or the
special value ``0``).
Parameters
----------
value : object
Input value to be converted.
Returns
-------
out, converted : correctly-typed object, boolean
Tuple consisting of the correctly-typed object and a boolean which
indicates if conversion was actually performed.
Raises
------
ValueError
If the input is not valid for this attribute.
"""
if value is None:
return None, False
elif isinstance(value, EarthLocation):
return value, False
else:
# we have to do the import here because of some tricky circular deps
from .builtin_frames import ITRS
if not hasattr(value, 'transform_to'):
raise ValueError('"{}" was passed into an '
'EarthLocationAttribute, but it does not have '
'"transform_to" method'.format(value))
itrsobj = value.transform_to(ITRS())
return itrsobj.earth_location, True
class CoordinateAttribute(Attribute):
"""
A frame attribute which is a coordinate object. It can be given as a
`~astropy.coordinates.SkyCoord` or a low-level frame instance. If a
low-level frame instance is provided, it will always be upgraded to be a
`~astropy.coordinates.SkyCoord` to ensure consistent transformation
behavior. The coordinate object will always be returned as a low-level
frame instance when accessed.
Parameters
----------
frame : a coordinate frame class
The type of frame this attribute can be
default : object
Default value for the attribute if not provided
secondary_attribute : str
Name of a secondary instance attribute which supplies the value if
``default is None`` and no value was supplied during initialization.
"""
def __init__(self, frame, default=None, secondary_attribute=''):
self._frame = frame
super().__init__(default, secondary_attribute)
def convert_input(self, value):
"""
Checks that the input is a SkyCoord with the necessary units (or the
special value ``None``).
Parameters
----------
value : object
Input value to be converted.
Returns
-------
out, converted : correctly-typed object, boolean
Tuple consisting of the correctly-typed object and a boolean which
indicates if conversion was actually performed.
Raises
------
ValueError
If the input is not valid for this attribute.
"""
from astropy.coordinates import SkyCoord
if value is None:
return None, False
elif isinstance(value, self._frame):
return value, False
else:
value = SkyCoord(value) # always make the value a SkyCoord
transformedobj = value.transform_to(self._frame)
return transformedobj.frame, True
class DifferentialAttribute(Attribute):
"""A frame attribute which is a differential instance.
The optional ``allowed_classes`` argument allows specifying a restricted
set of valid differential classes to check the input against. Otherwise,
any `~astropy.coordinates.BaseDifferential` subclass instance is valid.
Parameters
----------
default : object
Default value for the attribute if not provided
allowed_classes : tuple, optional
A list of allowed differential classes for this attribute to have.
secondary_attribute : str
Name of a secondary instance attribute which supplies the value if
``default is None`` and no value was supplied during initialization.
"""
def __init__(self, default=None, allowed_classes=None,
secondary_attribute=''):
if allowed_classes is not None:
self.allowed_classes = tuple(allowed_classes)
else:
self.allowed_classes = BaseDifferential
super().__init__(default, secondary_attribute)
def convert_input(self, value):
"""
Checks that the input is a differential object and is one of the
allowed class types.
Parameters
----------
value : object
Input value.
Returns
-------
out, converted : correctly-typed object, boolean
Tuple consisting of the correctly-typed object and a boolean which
indicates if conversion was actually performed.
Raises
------
ValueError
If the input is not valid for this attribute.
"""
if value is None:
return None, False
if not isinstance(value, self.allowed_classes):
if len(self.allowed_classes) == 1:
value = self.allowed_classes[0](value)
else:
raise TypeError('Tried to set a DifferentialAttribute with '
'an unsupported Differential type {}. Allowed '
'classes are: {}'
.format(value.__class__,
self.allowed_classes))
return value, True
# do this here to prevent a series of complicated circular imports
from .earth import EarthLocation
from .representation import CartesianRepresentation, BaseDifferential
| bsd-3-clause |
Cinntax/home-assistant | tests/components/input_select/test_init.py | 4 | 8756 | """The tests for the Input select component."""
# pylint: disable=protected-access
import asyncio
from homeassistant.loader import bind_hass
from homeassistant.components.input_select import (
ATTR_OPTION,
ATTR_OPTIONS,
DOMAIN,
SERVICE_SET_OPTIONS,
SERVICE_SELECT_NEXT,
SERVICE_SELECT_OPTION,
SERVICE_SELECT_PREVIOUS,
)
from homeassistant.const import ATTR_ENTITY_ID, ATTR_FRIENDLY_NAME, ATTR_ICON
from homeassistant.core import State, Context
from homeassistant.setup import async_setup_component
from tests.common import mock_restore_cache
@bind_hass
def select_option(hass, entity_id, option):
"""Set value of input_select.
This is a legacy helper method. Do not use it for new tests.
"""
hass.async_create_task(
hass.services.async_call(
DOMAIN,
SERVICE_SELECT_OPTION,
{ATTR_ENTITY_ID: entity_id, ATTR_OPTION: option},
)
)
@bind_hass
def select_next(hass, entity_id):
"""Set next value of input_select.
This is a legacy helper method. Do not use it for new tests.
"""
hass.async_create_task(
hass.services.async_call(
DOMAIN, SERVICE_SELECT_NEXT, {ATTR_ENTITY_ID: entity_id}
)
)
@bind_hass
def select_previous(hass, entity_id):
"""Set previous value of input_select.
This is a legacy helper method. Do not use it for new tests.
"""
hass.async_create_task(
hass.services.async_call(
DOMAIN, SERVICE_SELECT_PREVIOUS, {ATTR_ENTITY_ID: entity_id}
)
)
async def test_config(hass):
"""Test config."""
invalid_configs = [
None,
{},
{"name with space": None},
# {'bad_options': {'options': None}},
{"bad_initial": {"options": [1, 2], "initial": 3}},
]
for cfg in invalid_configs:
assert not await async_setup_component(hass, DOMAIN, {DOMAIN: cfg})
async def test_select_option(hass):
"""Test select_option methods."""
assert await async_setup_component(
hass,
DOMAIN,
{DOMAIN: {"test_1": {"options": ["some option", "another option"]}}},
)
entity_id = "input_select.test_1"
state = hass.states.get(entity_id)
assert "some option" == state.state
select_option(hass, entity_id, "another option")
await hass.async_block_till_done()
state = hass.states.get(entity_id)
assert "another option" == state.state
select_option(hass, entity_id, "non existing option")
await hass.async_block_till_done()
state = hass.states.get(entity_id)
assert "another option" == state.state
async def test_select_next(hass):
"""Test select_next methods."""
assert await async_setup_component(
hass,
DOMAIN,
{
DOMAIN: {
"test_1": {
"options": ["first option", "middle option", "last option"],
"initial": "middle option",
}
}
},
)
entity_id = "input_select.test_1"
state = hass.states.get(entity_id)
assert "middle option" == state.state
select_next(hass, entity_id)
await hass.async_block_till_done()
state = hass.states.get(entity_id)
assert "last option" == state.state
select_next(hass, entity_id)
await hass.async_block_till_done()
state = hass.states.get(entity_id)
assert "first option" == state.state
async def test_select_previous(hass):
"""Test select_previous methods."""
assert await async_setup_component(
hass,
DOMAIN,
{
DOMAIN: {
"test_1": {
"options": ["first option", "middle option", "last option"],
"initial": "middle option",
}
}
},
)
entity_id = "input_select.test_1"
state = hass.states.get(entity_id)
assert "middle option" == state.state
select_previous(hass, entity_id)
await hass.async_block_till_done()
state = hass.states.get(entity_id)
assert "first option" == state.state
select_previous(hass, entity_id)
await hass.async_block_till_done()
state = hass.states.get(entity_id)
assert "last option" == state.state
async def test_config_options(hass):
"""Test configuration options."""
count_start = len(hass.states.async_entity_ids())
test_2_options = ["Good Option", "Better Option", "Best Option"]
assert await async_setup_component(
hass,
DOMAIN,
{
DOMAIN: {
"test_1": {"options": [1, 2]},
"test_2": {
"name": "Hello World",
"icon": "mdi:work",
"options": test_2_options,
"initial": "Better Option",
},
}
},
)
assert count_start + 2 == len(hass.states.async_entity_ids())
state_1 = hass.states.get("input_select.test_1")
state_2 = hass.states.get("input_select.test_2")
assert state_1 is not None
assert state_2 is not None
assert "1" == state_1.state
assert ["1", "2"] == state_1.attributes.get(ATTR_OPTIONS)
assert ATTR_ICON not in state_1.attributes
assert "Better Option" == state_2.state
assert test_2_options == state_2.attributes.get(ATTR_OPTIONS)
assert "Hello World" == state_2.attributes.get(ATTR_FRIENDLY_NAME)
assert "mdi:work" == state_2.attributes.get(ATTR_ICON)
async def test_set_options_service(hass):
"""Test set_options service."""
assert await async_setup_component(
hass,
DOMAIN,
{
DOMAIN: {
"test_1": {
"options": ["first option", "middle option", "last option"],
"initial": "middle option",
}
}
},
)
entity_id = "input_select.test_1"
state = hass.states.get(entity_id)
assert "middle option" == state.state
data = {ATTR_OPTIONS: ["test1", "test2"], "entity_id": entity_id}
await hass.services.async_call(DOMAIN, SERVICE_SET_OPTIONS, data)
await hass.async_block_till_done()
state = hass.states.get(entity_id)
assert "test1" == state.state
select_option(hass, entity_id, "first option")
await hass.async_block_till_done()
state = hass.states.get(entity_id)
assert "test1" == state.state
select_option(hass, entity_id, "test2")
await hass.async_block_till_done()
state = hass.states.get(entity_id)
assert "test2" == state.state
@asyncio.coroutine
def test_restore_state(hass):
"""Ensure states are restored on startup."""
mock_restore_cache(
hass,
(
State("input_select.s1", "last option"),
State("input_select.s2", "bad option"),
),
)
options = {"options": ["first option", "middle option", "last option"]}
yield from async_setup_component(
hass, DOMAIN, {DOMAIN: {"s1": options, "s2": options}}
)
state = hass.states.get("input_select.s1")
assert state
assert state.state == "last option"
state = hass.states.get("input_select.s2")
assert state
assert state.state == "first option"
@asyncio.coroutine
def test_initial_state_overrules_restore_state(hass):
"""Ensure states are restored on startup."""
mock_restore_cache(
hass,
(
State("input_select.s1", "last option"),
State("input_select.s2", "bad option"),
),
)
options = {
"options": ["first option", "middle option", "last option"],
"initial": "middle option",
}
yield from async_setup_component(
hass, DOMAIN, {DOMAIN: {"s1": options, "s2": options}}
)
state = hass.states.get("input_select.s1")
assert state
assert state.state == "middle option"
state = hass.states.get("input_select.s2")
assert state
assert state.state == "middle option"
async def test_input_select_context(hass, hass_admin_user):
"""Test that input_select context works."""
assert await async_setup_component(
hass,
"input_select",
{
"input_select": {
"s1": {"options": ["first option", "middle option", "last option"]}
}
},
)
state = hass.states.get("input_select.s1")
assert state is not None
await hass.services.async_call(
"input_select",
"select_next",
{"entity_id": state.entity_id},
True,
Context(user_id=hass_admin_user.id),
)
state2 = hass.states.get("input_select.s1")
assert state2 is not None
assert state.state != state2.state
assert state2.context.user_id == hass_admin_user.id
| apache-2.0 |
sbrunner/openlayers | tests/selenium/remotecontrol/selenium.py | 254 | 69389 |
"""
Copyright 2006 ThoughtWorks, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
__docformat__ = "restructuredtext en"
# This file has been automatically generated via XSL
import httplib
import urllib
import re
class selenium:
"""
Defines an object that runs Selenium commands.
Element Locators
~~~~~~~~~~~~~~~~
Element Locators tell Selenium which HTML element a command refers to.
The format of a locator is:
\ *locatorType*\ **=**\ \ *argument*
We support the following strategies for locating elements:
* \ **identifier**\ =\ *id*:
Select the element with the specified @id attribute. If no match is
found, select the first element whose @name attribute is \ *id*.
(This is normally the default; see below.)
* \ **id**\ =\ *id*:
Select the element with the specified @id attribute.
* \ **name**\ =\ *name*:
Select the first element with the specified @name attribute.
* username
* name=username
The name may optionally be followed by one or more \ *element-filters*, separated from the name by whitespace. If the \ *filterType* is not specified, \ **value**\ is assumed.
* name=flavour value=chocolate
* \ **dom**\ =\ *javascriptExpression*:
Find an element by evaluating the specified string. This allows you to traverse the HTML Document Object
Model using JavaScript. Note that you must not return a value in this string; simply make it the last expression in the block.
* dom=document.forms['myForm'].myDropdown
* dom=document.images[56]
* dom=function foo() { return document.links[1]; }; foo();
* \ **xpath**\ =\ *xpathExpression*:
Locate an element using an XPath expression.
* xpath=//img[@alt='The image alt text']
* xpath=//table[@id='table1']//tr[4]/td[2]
* xpath=//a[contains(@href,'#id1')]
* xpath=//a[contains(@href,'#id1')]/@class
* xpath=(//table[@class='stylee'])//th[text()='theHeaderText']/../td
* xpath=//input[@name='name2' and @value='yes']
* xpath=//\*[text()="right"]
* \ **link**\ =\ *textPattern*:
Select the link (anchor) element which contains text matching the
specified \ *pattern*.
* link=The link text
* \ **css**\ =\ *cssSelectorSyntax*:
Select the element using css selectors. Please refer to CSS2 selectors, CSS3 selectors for more information. You can also check the TestCssLocators test in the selenium test suite for an example of usage, which is included in the downloaded selenium core package.
* css=a[href="#id3"]
* css=span#firstChild + span
Currently the css selector locator supports all css1, css2 and css3 selectors except namespace in css3, some pseudo classes(:nth-of-type, :nth-last-of-type, :first-of-type, :last-of-type, :only-of-type, :visited, :hover, :active, :focus, :indeterminate) and pseudo elements(::first-line, ::first-letter, ::selection, ::before, ::after).
Without an explicit locator prefix, Selenium uses the following default
strategies:
* \ **dom**\ , for locators starting with "document."
* \ **xpath**\ , for locators starting with "//"
* \ **identifier**\ , otherwise
Element Filters
~~~~~~~~~~~~~~~
Element filters can be used with a locator to refine a list of candidate elements. They are currently used only in the 'name' element-locator.
Filters look much like locators, ie.
\ *filterType*\ **=**\ \ *argument*
Supported element-filters are:
\ **value=**\ \ *valuePattern*
Matches elements based on their values. This is particularly useful for refining a list of similarly-named toggle-buttons.
\ **index=**\ \ *index*
Selects a single element based on its position in the list (offset from zero).
String-match Patterns
~~~~~~~~~~~~~~~~~~~~~
Various Pattern syntaxes are available for matching string values:
* \ **glob:**\ \ *pattern*:
Match a string against a "glob" (aka "wildmat") pattern. "Glob" is a
kind of limited regular-expression syntax typically used in command-line
shells. In a glob pattern, "\*" represents any sequence of characters, and "?"
represents any single character. Glob patterns match against the entire
string.
* \ **regexp:**\ \ *regexp*:
Match a string using a regular-expression. The full power of JavaScript
regular-expressions is available.
* \ **regexpi:**\ \ *regexpi*:
Match a string using a case-insensitive regular-expression.
* \ **exact:**\ \ *string*:
Match a string exactly, verbatim, without any of that fancy wildcard
stuff.
If no pattern prefix is specified, Selenium assumes that it's a "glob"
pattern.
For commands that return multiple values (such as verifySelectOptions),
the string being matched is a comma-separated list of the return values,
where both commas and backslashes in the values are backslash-escaped.
When providing a pattern, the optional matching syntax (i.e. glob,
regexp, etc.) is specified once, as usual, at the beginning of the
pattern.
"""
### This part is hard-coded in the XSL
def __init__(self, host, port, browserStartCommand, browserURL):
self.host = host
self.port = port
self.browserStartCommand = browserStartCommand
self.browserURL = browserURL
self.sessionId = None
def start(self):
result = self.get_string("getNewBrowserSession", [self.browserStartCommand, self.browserURL])
try:
self.sessionId = result
except ValueError:
raise Exception, result
def stop(self):
self.do_command("testComplete", [])
self.sessionId = None
def do_command(self, verb, args):
conn = httplib.HTTPConnection(self.host, self.port)
commandString = u'/selenium-server/driver/?cmd=' + urllib.quote_plus(unicode(verb).encode('utf-8'))
for i in range(len(args)):
commandString = commandString + '&' + unicode(i+1) + '=' + urllib.quote_plus(unicode(args[i]).encode('utf-8'))
if (None != self.sessionId):
commandString = commandString + "&sessionId=" + unicode(self.sessionId)
conn.request("GET", commandString)
response = conn.getresponse()
#print response.status, response.reason
data = unicode(response.read(), "UTF-8")
result = response.reason
#print "Selenium Result: " + repr(data) + "\n\n"
if (not data.startswith('OK')):
raise Exception, data
return data
def get_string(self, verb, args):
result = self.do_command(verb, args)
return result[3:]
def get_string_array(self, verb, args):
csv = self.get_string(verb, args)
token = ""
tokens = []
escape = False
for i in range(len(csv)):
letter = csv[i]
if (escape):
token = token + letter
escape = False
continue
if (letter == '\\'):
escape = True
elif (letter == ','):
tokens.append(token)
token = ""
else:
token = token + letter
tokens.append(token)
return tokens
def get_number(self, verb, args):
# Is there something I need to do here?
return self.get_string(verb, args)
def get_number_array(self, verb, args):
# Is there something I need to do here?
return self.get_string_array(verb, args)
def get_boolean(self, verb, args):
boolstr = self.get_string(verb, args)
if ("true" == boolstr):
return True
if ("false" == boolstr):
return False
raise ValueError, "result is neither 'true' nor 'false': " + boolstr
def get_boolean_array(self, verb, args):
boolarr = self.get_string_array(verb, args)
for i in range(len(boolarr)):
if ("true" == boolstr):
boolarr[i] = True
continue
if ("false" == boolstr):
boolarr[i] = False
continue
raise ValueError, "result is neither 'true' nor 'false': " + boolarr[i]
return boolarr
### From here on, everything's auto-generated from XML
def click(self,locator):
"""
Clicks on a link, button, checkbox or radio button. If the click action
causes a new page to load (like a link usually does), call
waitForPageToLoad.
'locator' is an element locator
"""
self.do_command("click", [locator,])
def double_click(self,locator):
"""
Double clicks on a link, button, checkbox or radio button. If the double click action
causes a new page to load (like a link usually does), call
waitForPageToLoad.
'locator' is an element locator
"""
self.do_command("doubleClick", [locator,])
def context_menu(self,locator):
"""
Simulates opening the context menu for the specified element (as might happen if the user "right-clicked" on the element).
'locator' is an element locator
"""
self.do_command("contextMenu", [locator,])
def click_at(self,locator,coordString):
"""
Clicks on a link, button, checkbox or radio button. If the click action
causes a new page to load (like a link usually does), call
waitForPageToLoad.
'locator' is an element locator
'coordString' is specifies the x,y position (i.e. - 10,20) of the mouse event relative to the element returned by the locator.
"""
self.do_command("clickAt", [locator,coordString,])
def double_click_at(self,locator,coordString):
"""
Doubleclicks on a link, button, checkbox or radio button. If the action
causes a new page to load (like a link usually does), call
waitForPageToLoad.
'locator' is an element locator
'coordString' is specifies the x,y position (i.e. - 10,20) of the mouse event relative to the element returned by the locator.
"""
self.do_command("doubleClickAt", [locator,coordString,])
def context_menu_at(self,locator,coordString):
"""
Simulates opening the context menu for the specified element (as might happen if the user "right-clicked" on the element).
'locator' is an element locator
'coordString' is specifies the x,y position (i.e. - 10,20) of the mouse event relative to the element returned by the locator.
"""
self.do_command("contextMenuAt", [locator,coordString,])
def fire_event(self,locator,eventName):
"""
Explicitly simulate an event, to trigger the corresponding "on\ *event*"
handler.
'locator' is an element locator
'eventName' is the event name, e.g. "focus" or "blur"
"""
self.do_command("fireEvent", [locator,eventName,])
def focus(self,locator):
"""
Move the focus to the specified element; for example, if the element is an input field, move the cursor to that field.
'locator' is an element locator
"""
self.do_command("focus", [locator,])
def key_press(self,locator,keySequence):
"""
Simulates a user pressing and releasing a key.
'locator' is an element locator
'keySequence' is Either be a string("\" followed by the numeric keycode of the key to be pressed, normally the ASCII value of that key), or a single character. For example: "w", "\119".
"""
self.do_command("keyPress", [locator,keySequence,])
def shift_key_down(self):
"""
Press the shift key and hold it down until doShiftUp() is called or a new page is loaded.
"""
self.do_command("shiftKeyDown", [])
def shift_key_up(self):
"""
Release the shift key.
"""
self.do_command("shiftKeyUp", [])
def meta_key_down(self):
"""
Press the meta key and hold it down until doMetaUp() is called or a new page is loaded.
"""
self.do_command("metaKeyDown", [])
def meta_key_up(self):
"""
Release the meta key.
"""
self.do_command("metaKeyUp", [])
def alt_key_down(self):
"""
Press the alt key and hold it down until doAltUp() is called or a new page is loaded.
"""
self.do_command("altKeyDown", [])
def alt_key_up(self):
"""
Release the alt key.
"""
self.do_command("altKeyUp", [])
def control_key_down(self):
"""
Press the control key and hold it down until doControlUp() is called or a new page is loaded.
"""
self.do_command("controlKeyDown", [])
def control_key_up(self):
"""
Release the control key.
"""
self.do_command("controlKeyUp", [])
def key_down(self,locator,keySequence):
"""
Simulates a user pressing a key (without releasing it yet).
'locator' is an element locator
'keySequence' is Either be a string("\" followed by the numeric keycode of the key to be pressed, normally the ASCII value of that key), or a single character. For example: "w", "\119".
"""
self.do_command("keyDown", [locator,keySequence,])
def key_up(self,locator,keySequence):
"""
Simulates a user releasing a key.
'locator' is an element locator
'keySequence' is Either be a string("\" followed by the numeric keycode of the key to be pressed, normally the ASCII value of that key), or a single character. For example: "w", "\119".
"""
self.do_command("keyUp", [locator,keySequence,])
def mouse_over(self,locator):
"""
Simulates a user hovering a mouse over the specified element.
'locator' is an element locator
"""
self.do_command("mouseOver", [locator,])
def mouse_out(self,locator):
"""
Simulates a user moving the mouse pointer away from the specified element.
'locator' is an element locator
"""
self.do_command("mouseOut", [locator,])
def mouse_down(self,locator):
"""
Simulates a user pressing the mouse button (without releasing it yet) on
the specified element.
'locator' is an element locator
"""
self.do_command("mouseDown", [locator,])
def mouse_down_at(self,locator,coordString):
"""
Simulates a user pressing the mouse button (without releasing it yet) at
the specified location.
'locator' is an element locator
'coordString' is specifies the x,y position (i.e. - 10,20) of the mouse event relative to the element returned by the locator.
"""
self.do_command("mouseDownAt", [locator,coordString,])
def mouse_up(self,locator):
"""
Simulates the event that occurs when the user releases the mouse button (i.e., stops
holding the button down) on the specified element.
'locator' is an element locator
"""
self.do_command("mouseUp", [locator,])
def mouse_up_at(self,locator,coordString):
"""
Simulates the event that occurs when the user releases the mouse button (i.e., stops
holding the button down) at the specified location.
'locator' is an element locator
'coordString' is specifies the x,y position (i.e. - 10,20) of the mouse event relative to the element returned by the locator.
"""
self.do_command("mouseUpAt", [locator,coordString,])
def mouse_move(self,locator):
"""
Simulates a user pressing the mouse button (without releasing it yet) on
the specified element.
'locator' is an element locator
"""
self.do_command("mouseMove", [locator,])
def mouse_move_at(self,locator,coordString):
"""
Simulates a user pressing the mouse button (without releasing it yet) on
the specified element.
'locator' is an element locator
'coordString' is specifies the x,y position (i.e. - 10,20) of the mouse event relative to the element returned by the locator.
"""
self.do_command("mouseMoveAt", [locator,coordString,])
def type(self,locator,value):
"""
Sets the value of an input field, as though you typed it in.
Can also be used to set the value of combo boxes, check boxes, etc. In these cases,
value should be the value of the option selected, not the visible text.
'locator' is an element locator
'value' is the value to type
"""
self.do_command("type", [locator,value,])
def type_keys(self,locator,value):
"""
Simulates keystroke events on the specified element, as though you typed the value key-by-key.
This is a convenience method for calling keyDown, keyUp, keyPress for every character in the specified string;
this is useful for dynamic UI widgets (like auto-completing combo boxes) that require explicit key events.
Unlike the simple "type" command, which forces the specified value into the page directly, this command
may or may not have any visible effect, even in cases where typing keys would normally have a visible effect.
For example, if you use "typeKeys" on a form element, you may or may not see the results of what you typed in
the field.
In some cases, you may need to use the simple "type" command to set the value of the field and then the "typeKeys" command to
send the keystroke events corresponding to what you just typed.
'locator' is an element locator
'value' is the value to type
"""
self.do_command("typeKeys", [locator,value,])
def set_speed(self,value):
"""
Set execution speed (i.e., set the millisecond length of a delay which will follow each selenium operation). By default, there is no such delay, i.e.,
the delay is 0 milliseconds.
'value' is the number of milliseconds to pause after operation
"""
self.do_command("setSpeed", [value,])
def get_speed(self):
"""
Get execution speed (i.e., get the millisecond length of the delay following each selenium operation). By default, there is no such delay, i.e.,
the delay is 0 milliseconds.
See also setSpeed.
"""
return self.get_string("getSpeed", [])
def check(self,locator):
"""
Check a toggle-button (checkbox/radio)
'locator' is an element locator
"""
self.do_command("check", [locator,])
def uncheck(self,locator):
"""
Uncheck a toggle-button (checkbox/radio)
'locator' is an element locator
"""
self.do_command("uncheck", [locator,])
def select(self,selectLocator,optionLocator):
"""
Select an option from a drop-down using an option locator.
Option locators provide different ways of specifying options of an HTML
Select element (e.g. for selecting a specific option, or for asserting
that the selected option satisfies a specification). There are several
forms of Select Option Locator.
* \ **label**\ =\ *labelPattern*:
matches options based on their labels, i.e. the visible text. (This
is the default.)
* label=regexp:^[Oo]ther
* \ **value**\ =\ *valuePattern*:
matches options based on their values.
* value=other
* \ **id**\ =\ *id*:
matches options based on their ids.
* id=option1
* \ **index**\ =\ *index*:
matches an option based on its index (offset from zero).
* index=2
If no option locator prefix is provided, the default behaviour is to match on \ **label**\ .
'selectLocator' is an element locator identifying a drop-down menu
'optionLocator' is an option locator (a label by default)
"""
self.do_command("select", [selectLocator,optionLocator,])
def add_selection(self,locator,optionLocator):
"""
Add a selection to the set of selected options in a multi-select element using an option locator.
@see #doSelect for details of option locators
'locator' is an element locator identifying a multi-select box
'optionLocator' is an option locator (a label by default)
"""
self.do_command("addSelection", [locator,optionLocator,])
def remove_selection(self,locator,optionLocator):
"""
Remove a selection from the set of selected options in a multi-select element using an option locator.
@see #doSelect for details of option locators
'locator' is an element locator identifying a multi-select box
'optionLocator' is an option locator (a label by default)
"""
self.do_command("removeSelection", [locator,optionLocator,])
def remove_all_selections(self,locator):
"""
Unselects all of the selected options in a multi-select element.
'locator' is an element locator identifying a multi-select box
"""
self.do_command("removeAllSelections", [locator,])
def submit(self,formLocator):
"""
Submit the specified form. This is particularly useful for forms without
submit buttons, e.g. single-input "Search" forms.
'formLocator' is an element locator for the form you want to submit
"""
self.do_command("submit", [formLocator,])
def open(self,url):
"""
Opens an URL in the test frame. This accepts both relative and absolute
URLs.
The "open" command waits for the page to load before proceeding,
ie. the "AndWait" suffix is implicit.
\ *Note*: The URL must be on the same domain as the runner HTML
due to security restrictions in the browser (Same Origin Policy). If you
need to open an URL on another domain, use the Selenium Server to start a
new browser session on that domain.
'url' is the URL to open; may be relative or absolute
"""
self.do_command("open", [url,])
def open_window(self,url,windowID):
"""
Opens a popup window (if a window with that ID isn't already open).
After opening the window, you'll need to select it using the selectWindow
command.
This command can also be a useful workaround for bug SEL-339. In some cases, Selenium will be unable to intercept a call to window.open (if the call occurs during or before the "onLoad" event, for example).
In those cases, you can force Selenium to notice the open window's name by using the Selenium openWindow command, using
an empty (blank) url, like this: openWindow("", "myFunnyWindow").
'url' is the URL to open, which can be blank
'windowID' is the JavaScript window ID of the window to select
"""
self.do_command("openWindow", [url,windowID,])
def select_window(self,windowID):
"""
Selects a popup window using a window locator; once a popup window has been selected, all
commands go to that window. To select the main window again, use null
as the target.
Window locators provide different ways of specifying the window object:
by title, by internal JavaScript "name," or by JavaScript variable.
* \ **title**\ =\ *My Special Window*:
Finds the window using the text that appears in the title bar. Be careful;
two windows can share the same title. If that happens, this locator will
just pick one.
* \ **name**\ =\ *myWindow*:
Finds the window using its internal JavaScript "name" property. This is the second
parameter "windowName" passed to the JavaScript method window.open(url, windowName, windowFeatures, replaceFlag)
(which Selenium intercepts).
* \ **var**\ =\ *variableName*:
Some pop-up windows are unnamed (anonymous), but are associated with a JavaScript variable name in the current
application window, e.g. "window.foo = window.open(url);". In those cases, you can open the window using
"var=foo".
If no window locator prefix is provided, we'll try to guess what you mean like this:
1.) if windowID is null, (or the string "null") then it is assumed the user is referring to the original window instantiated by the browser).
2.) if the value of the "windowID" parameter is a JavaScript variable name in the current application window, then it is assumed
that this variable contains the return value from a call to the JavaScript window.open() method.
3.) Otherwise, selenium looks in a hash it maintains that maps string names to window "names".
4.) If \ *that* fails, we'll try looping over all of the known windows to try to find the appropriate "title".
Since "title" is not necessarily unique, this may have unexpected behavior.
If you're having trouble figuring out the name of a window that you want to manipulate, look at the Selenium log messages
which identify the names of windows created via window.open (and therefore intercepted by Selenium). You will see messages
like the following for each window as it is opened:
``debug: window.open call intercepted; window ID (which you can use with selectWindow()) is "myNewWindow"``
In some cases, Selenium will be unable to intercept a call to window.open (if the call occurs during or before the "onLoad" event, for example).
(This is bug SEL-339.) In those cases, you can force Selenium to notice the open window's name by using the Selenium openWindow command, using
an empty (blank) url, like this: openWindow("", "myFunnyWindow").
'windowID' is the JavaScript window ID of the window to select
"""
self.do_command("selectWindow", [windowID,])
def select_frame(self,locator):
"""
Selects a frame within the current window. (You may invoke this command
multiple times to select nested frames.) To select the parent frame, use
"relative=parent" as a locator; to select the top frame, use "relative=top".
You can also select a frame by its 0-based index number; select the first frame with
"index=0", or the third frame with "index=2".
You may also use a DOM expression to identify the frame you want directly,
like this: ``dom=frames["main"].frames["subframe"]``
'locator' is an element locator identifying a frame or iframe
"""
self.do_command("selectFrame", [locator,])
def get_whether_this_frame_match_frame_expression(self,currentFrameString,target):
"""
Determine whether current/locator identify the frame containing this running code.
This is useful in proxy injection mode, where this code runs in every
browser frame and window, and sometimes the selenium server needs to identify
the "current" frame. In this case, when the test calls selectFrame, this
routine is called for each frame to figure out which one has been selected.
The selected frame will return true, while all others will return false.
'currentFrameString' is starting frame
'target' is new frame (which might be relative to the current one)
"""
return self.get_boolean("getWhetherThisFrameMatchFrameExpression", [currentFrameString,target,])
def get_whether_this_window_match_window_expression(self,currentWindowString,target):
"""
Determine whether currentWindowString plus target identify the window containing this running code.
This is useful in proxy injection mode, where this code runs in every
browser frame and window, and sometimes the selenium server needs to identify
the "current" window. In this case, when the test calls selectWindow, this
routine is called for each window to figure out which one has been selected.
The selected window will return true, while all others will return false.
'currentWindowString' is starting window
'target' is new window (which might be relative to the current one, e.g., "_parent")
"""
return self.get_boolean("getWhetherThisWindowMatchWindowExpression", [currentWindowString,target,])
def wait_for_pop_up(self,windowID,timeout):
"""
Waits for a popup window to appear and load up.
'windowID' is the JavaScript window "name" of the window that will appear (not the text of the title bar)
'timeout' is a timeout in milliseconds, after which the action will return with an error
"""
self.do_command("waitForPopUp", [windowID,timeout,])
def choose_cancel_on_next_confirmation(self):
"""
By default, Selenium's overridden window.confirm() function will
return true, as if the user had manually clicked OK; after running
this command, the next call to confirm() will return false, as if
the user had clicked Cancel. Selenium will then resume using the
default behavior for future confirmations, automatically returning
true (OK) unless/until you explicitly call this command for each
confirmation.
"""
self.do_command("chooseCancelOnNextConfirmation", [])
def choose_ok_on_next_confirmation(self):
"""
Undo the effect of calling chooseCancelOnNextConfirmation. Note
that Selenium's overridden window.confirm() function will normally automatically
return true, as if the user had manually clicked OK, so you shouldn't
need to use this command unless for some reason you need to change
your mind prior to the next confirmation. After any confirmation, Selenium will resume using the
default behavior for future confirmations, automatically returning
true (OK) unless/until you explicitly call chooseCancelOnNextConfirmation for each
confirmation.
"""
self.do_command("chooseOkOnNextConfirmation", [])
def answer_on_next_prompt(self,answer):
"""
Instructs Selenium to return the specified answer string in response to
the next JavaScript prompt [window.prompt()].
'answer' is the answer to give in response to the prompt pop-up
"""
self.do_command("answerOnNextPrompt", [answer,])
def go_back(self):
"""
Simulates the user clicking the "back" button on their browser.
"""
self.do_command("goBack", [])
def refresh(self):
"""
Simulates the user clicking the "Refresh" button on their browser.
"""
self.do_command("refresh", [])
def close(self):
"""
Simulates the user clicking the "close" button in the titlebar of a popup
window or tab.
"""
self.do_command("close", [])
def is_alert_present(self):
"""
Has an alert occurred?
This function never throws an exception
"""
return self.get_boolean("isAlertPresent", [])
def is_prompt_present(self):
"""
Has a prompt occurred?
This function never throws an exception
"""
return self.get_boolean("isPromptPresent", [])
def is_confirmation_present(self):
"""
Has confirm() been called?
This function never throws an exception
"""
return self.get_boolean("isConfirmationPresent", [])
def get_alert(self):
"""
Retrieves the message of a JavaScript alert generated during the previous action, or fail if there were no alerts.
Getting an alert has the same effect as manually clicking OK. If an
alert is generated but you do not get/verify it, the next Selenium action
will fail.
NOTE: under Selenium, JavaScript alerts will NOT pop up a visible alert
dialog.
NOTE: Selenium does NOT support JavaScript alerts that are generated in a
page's onload() event handler. In this case a visible dialog WILL be
generated and Selenium will hang until someone manually clicks OK.
"""
return self.get_string("getAlert", [])
def get_confirmation(self):
"""
Retrieves the message of a JavaScript confirmation dialog generated during
the previous action.
By default, the confirm function will return true, having the same effect
as manually clicking OK. This can be changed by prior execution of the
chooseCancelOnNextConfirmation command. If an confirmation is generated
but you do not get/verify it, the next Selenium action will fail.
NOTE: under Selenium, JavaScript confirmations will NOT pop up a visible
dialog.
NOTE: Selenium does NOT support JavaScript confirmations that are
generated in a page's onload() event handler. In this case a visible
dialog WILL be generated and Selenium will hang until you manually click
OK.
"""
return self.get_string("getConfirmation", [])
def get_prompt(self):
"""
Retrieves the message of a JavaScript question prompt dialog generated during
the previous action.
Successful handling of the prompt requires prior execution of the
answerOnNextPrompt command. If a prompt is generated but you
do not get/verify it, the next Selenium action will fail.
NOTE: under Selenium, JavaScript prompts will NOT pop up a visible
dialog.
NOTE: Selenium does NOT support JavaScript prompts that are generated in a
page's onload() event handler. In this case a visible dialog WILL be
generated and Selenium will hang until someone manually clicks OK.
"""
return self.get_string("getPrompt", [])
def get_location(self):
"""
Gets the absolute URL of the current page.
"""
return self.get_string("getLocation", [])
def get_title(self):
"""
Gets the title of the current page.
"""
return self.get_string("getTitle", [])
def get_body_text(self):
"""
Gets the entire text of the page.
"""
return self.get_string("getBodyText", [])
def get_value(self,locator):
"""
Gets the (whitespace-trimmed) value of an input field (or anything else with a value parameter).
For checkbox/radio elements, the value will be "on" or "off" depending on
whether the element is checked or not.
'locator' is an element locator
"""
return self.get_string("getValue", [locator,])
def get_text(self,locator):
"""
Gets the text of an element. This works for any element that contains
text. This command uses either the textContent (Mozilla-like browsers) or
the innerText (IE-like browsers) of the element, which is the rendered
text shown to the user.
'locator' is an element locator
"""
return self.get_string("getText", [locator,])
def highlight(self,locator):
"""
Briefly changes the backgroundColor of the specified element yellow. Useful for debugging.
'locator' is an element locator
"""
self.do_command("highlight", [locator,])
def get_eval(self,script):
"""
Gets the result of evaluating the specified JavaScript snippet. The snippet may
have multiple lines, but only the result of the last line will be returned.
Note that, by default, the snippet will run in the context of the "selenium"
object itself, so ``this`` will refer to the Selenium object. Use ``window`` to
refer to the window of your application, e.g. ``window.document.getElementById('foo')``
If you need to use
a locator to refer to a single element in your application page, you can
use ``this.browserbot.findElement("id=foo")`` where "id=foo" is your locator.
'script' is the JavaScript snippet to run
"""
return self.get_string("getEval", [script,])
def is_checked(self,locator):
"""
Gets whether a toggle-button (checkbox/radio) is checked. Fails if the specified element doesn't exist or isn't a toggle-button.
'locator' is an element locator pointing to a checkbox or radio button
"""
return self.get_boolean("isChecked", [locator,])
def get_table(self,tableCellAddress):
"""
Gets the text from a cell of a table. The cellAddress syntax
tableLocator.row.column, where row and column start at 0.
'tableCellAddress' is a cell address, e.g. "foo.1.4"
"""
return self.get_string("getTable", [tableCellAddress,])
def get_selected_labels(self,selectLocator):
"""
Gets all option labels (visible text) for selected options in the specified select or multi-select element.
'selectLocator' is an element locator identifying a drop-down menu
"""
return self.get_string_array("getSelectedLabels", [selectLocator,])
def get_selected_label(self,selectLocator):
"""
Gets option label (visible text) for selected option in the specified select element.
'selectLocator' is an element locator identifying a drop-down menu
"""
return self.get_string("getSelectedLabel", [selectLocator,])
def get_selected_values(self,selectLocator):
"""
Gets all option values (value attributes) for selected options in the specified select or multi-select element.
'selectLocator' is an element locator identifying a drop-down menu
"""
return self.get_string_array("getSelectedValues", [selectLocator,])
def get_selected_value(self,selectLocator):
"""
Gets option value (value attribute) for selected option in the specified select element.
'selectLocator' is an element locator identifying a drop-down menu
"""
return self.get_string("getSelectedValue", [selectLocator,])
def get_selected_indexes(self,selectLocator):
"""
Gets all option indexes (option number, starting at 0) for selected options in the specified select or multi-select element.
'selectLocator' is an element locator identifying a drop-down menu
"""
return self.get_string_array("getSelectedIndexes", [selectLocator,])
def get_selected_index(self,selectLocator):
"""
Gets option index (option number, starting at 0) for selected option in the specified select element.
'selectLocator' is an element locator identifying a drop-down menu
"""
return self.get_string("getSelectedIndex", [selectLocator,])
def get_selected_ids(self,selectLocator):
"""
Gets all option element IDs for selected options in the specified select or multi-select element.
'selectLocator' is an element locator identifying a drop-down menu
"""
return self.get_string_array("getSelectedIds", [selectLocator,])
def get_selected_id(self,selectLocator):
"""
Gets option element ID for selected option in the specified select element.
'selectLocator' is an element locator identifying a drop-down menu
"""
return self.get_string("getSelectedId", [selectLocator,])
def is_something_selected(self,selectLocator):
"""
Determines whether some option in a drop-down menu is selected.
'selectLocator' is an element locator identifying a drop-down menu
"""
return self.get_boolean("isSomethingSelected", [selectLocator,])
def get_select_options(self,selectLocator):
"""
Gets all option labels in the specified select drop-down.
'selectLocator' is an element locator identifying a drop-down menu
"""
return self.get_string_array("getSelectOptions", [selectLocator,])
def get_attribute(self,attributeLocator):
"""
Gets the value of an element attribute. The value of the attribute may
differ across browsers (this is the case for the "style" attribute, for
example).
'attributeLocator' is an element locator followed by an @ sign and then the name of the attribute, e.g. "foo@bar"
"""
return self.get_string("getAttribute", [attributeLocator,])
def is_text_present(self,pattern):
"""
Verifies that the specified text pattern appears somewhere on the rendered page shown to the user.
'pattern' is a pattern to match with the text of the page
"""
return self.get_boolean("isTextPresent", [pattern,])
def is_element_present(self,locator):
"""
Verifies that the specified element is somewhere on the page.
'locator' is an element locator
"""
return self.get_boolean("isElementPresent", [locator,])
def is_visible(self,locator):
"""
Determines if the specified element is visible. An
element can be rendered invisible by setting the CSS "visibility"
property to "hidden", or the "display" property to "none", either for the
element itself or one if its ancestors. This method will fail if
the element is not present.
'locator' is an element locator
"""
return self.get_boolean("isVisible", [locator,])
def is_editable(self,locator):
"""
Determines whether the specified input element is editable, ie hasn't been disabled.
This method will fail if the specified element isn't an input element.
'locator' is an element locator
"""
return self.get_boolean("isEditable", [locator,])
def get_all_buttons(self):
"""
Returns the IDs of all buttons on the page.
If a given button has no ID, it will appear as "" in this array.
"""
return self.get_string_array("getAllButtons", [])
def get_all_links(self):
"""
Returns the IDs of all links on the page.
If a given link has no ID, it will appear as "" in this array.
"""
return self.get_string_array("getAllLinks", [])
def get_all_fields(self):
"""
Returns the IDs of all input fields on the page.
If a given field has no ID, it will appear as "" in this array.
"""
return self.get_string_array("getAllFields", [])
def get_attribute_from_all_windows(self,attributeName):
"""
Returns every instance of some attribute from all known windows.
'attributeName' is name of an attribute on the windows
"""
return self.get_string_array("getAttributeFromAllWindows", [attributeName,])
def dragdrop(self,locator,movementsString):
"""
deprecated - use dragAndDrop instead
'locator' is an element locator
'movementsString' is offset in pixels from the current location to which the element should be moved, e.g., "+70,-300"
"""
self.do_command("dragdrop", [locator,movementsString,])
def set_mouse_speed(self,pixels):
"""
Configure the number of pixels between "mousemove" events during dragAndDrop commands (default=10).
Setting this value to 0 means that we'll send a "mousemove" event to every single pixel
in between the start location and the end location; that can be very slow, and may
cause some browsers to force the JavaScript to timeout.
If the mouse speed is greater than the distance between the two dragged objects, we'll
just send one "mousemove" at the start location and then one final one at the end location.
'pixels' is the number of pixels between "mousemove" events
"""
self.do_command("setMouseSpeed", [pixels,])
def get_mouse_speed(self):
"""
Returns the number of pixels between "mousemove" events during dragAndDrop commands (default=10).
"""
return self.get_number("getMouseSpeed", [])
def drag_and_drop(self,locator,movementsString):
"""
Drags an element a certain distance and then drops it
'locator' is an element locator
'movementsString' is offset in pixels from the current location to which the element should be moved, e.g., "+70,-300"
"""
self.do_command("dragAndDrop", [locator,movementsString,])
def drag_and_drop_to_object(self,locatorOfObjectToBeDragged,locatorOfDragDestinationObject):
"""
Drags an element and drops it on another element
'locatorOfObjectToBeDragged' is an element to be dragged
'locatorOfDragDestinationObject' is an element whose location (i.e., whose center-most pixel) will be the point where locatorOfObjectToBeDragged is dropped
"""
self.do_command("dragAndDropToObject", [locatorOfObjectToBeDragged,locatorOfDragDestinationObject,])
def window_focus(self):
"""
Gives focus to the currently selected window
"""
self.do_command("windowFocus", [])
def window_maximize(self):
"""
Resize currently selected window to take up the entire screen
"""
self.do_command("windowMaximize", [])
def get_all_window_ids(self):
"""
Returns the IDs of all windows that the browser knows about.
"""
return self.get_string_array("getAllWindowIds", [])
def get_all_window_names(self):
"""
Returns the names of all windows that the browser knows about.
"""
return self.get_string_array("getAllWindowNames", [])
def get_all_window_titles(self):
"""
Returns the titles of all windows that the browser knows about.
"""
return self.get_string_array("getAllWindowTitles", [])
def get_html_source(self):
"""
Returns the entire HTML source between the opening and
closing "html" tags.
"""
return self.get_string("getHtmlSource", [])
def set_cursor_position(self,locator,position):
"""
Moves the text cursor to the specified position in the given input element or textarea.
This method will fail if the specified element isn't an input element or textarea.
'locator' is an element locator pointing to an input element or textarea
'position' is the numerical position of the cursor in the field; position should be 0 to move the position to the beginning of the field. You can also set the cursor to -1 to move it to the end of the field.
"""
self.do_command("setCursorPosition", [locator,position,])
def get_element_index(self,locator):
"""
Get the relative index of an element to its parent (starting from 0). The comment node and empty text node
will be ignored.
'locator' is an element locator pointing to an element
"""
return self.get_number("getElementIndex", [locator,])
def is_ordered(self,locator1,locator2):
"""
Check if these two elements have same parent and are ordered siblings in the DOM. Two same elements will
not be considered ordered.
'locator1' is an element locator pointing to the first element
'locator2' is an element locator pointing to the second element
"""
return self.get_boolean("isOrdered", [locator1,locator2,])
def get_element_position_left(self,locator):
"""
Retrieves the horizontal position of an element
'locator' is an element locator pointing to an element OR an element itself
"""
return self.get_number("getElementPositionLeft", [locator,])
def get_element_position_top(self,locator):
"""
Retrieves the vertical position of an element
'locator' is an element locator pointing to an element OR an element itself
"""
return self.get_number("getElementPositionTop", [locator,])
def get_element_width(self,locator):
"""
Retrieves the width of an element
'locator' is an element locator pointing to an element
"""
return self.get_number("getElementWidth", [locator,])
def get_element_height(self,locator):
"""
Retrieves the height of an element
'locator' is an element locator pointing to an element
"""
return self.get_number("getElementHeight", [locator,])
def get_cursor_position(self,locator):
"""
Retrieves the text cursor position in the given input element or textarea; beware, this may not work perfectly on all browsers.
Specifically, if the cursor/selection has been cleared by JavaScript, this command will tend to
return the position of the last location of the cursor, even though the cursor is now gone from the page. This is filed as SEL-243.
This method will fail if the specified element isn't an input element or textarea, or there is no cursor in the element.
'locator' is an element locator pointing to an input element or textarea
"""
return self.get_number("getCursorPosition", [locator,])
def get_expression(self,expression):
"""
Returns the specified expression.
This is useful because of JavaScript preprocessing.
It is used to generate commands like assertExpression and waitForExpression.
'expression' is the value to return
"""
return self.get_string("getExpression", [expression,])
def get_xpath_count(self,xpath):
"""
Returns the number of nodes that match the specified xpath, eg. "//table" would give
the number of tables.
'xpath' is the xpath expression to evaluate. do NOT wrap this expression in a 'count()' function; we will do that for you.
"""
return self.get_number("getXpathCount", [xpath,])
def assign_id(self,locator,identifier):
"""
Temporarily sets the "id" attribute of the specified element, so you can locate it in the future
using its ID rather than a slow/complicated XPath. This ID will disappear once the page is
reloaded.
'locator' is an element locator pointing to an element
'identifier' is a string to be used as the ID of the specified element
"""
self.do_command("assignId", [locator,identifier,])
def allow_native_xpath(self,allow):
"""
Specifies whether Selenium should use the native in-browser implementation
of XPath (if any native version is available); if you pass "false" to
this function, we will always use our pure-JavaScript xpath library.
Using the pure-JS xpath library can improve the consistency of xpath
element locators between different browser vendors, but the pure-JS
version is much slower than the native implementations.
'allow' is boolean, true means we'll prefer to use native XPath; false means we'll only use JS XPath
"""
self.do_command("allowNativeXpath", [allow,])
def ignore_attributes_without_value(self,ignore):
"""
Specifies whether Selenium will ignore xpath attributes that have no
value, i.e. are the empty string, when using the non-native xpath
evaluation engine. You'd want to do this for performance reasons in IE.
However, this could break certain xpaths, for example an xpath that looks
for an attribute whose value is NOT the empty string.
The hope is that such xpaths are relatively rare, but the user should
have the option of using them. Note that this only influences xpath
evaluation when using the ajaxslt engine (i.e. not "javascript-xpath").
'ignore' is boolean, true means we'll ignore attributes without value at the expense of xpath "correctness"; false means we'll sacrifice speed for correctness.
"""
self.do_command("ignoreAttributesWithoutValue", [ignore,])
def wait_for_condition(self,script,timeout):
"""
Runs the specified JavaScript snippet repeatedly until it evaluates to "true".
The snippet may have multiple lines, but only the result of the last line
will be considered.
Note that, by default, the snippet will be run in the runner's test window, not in the window
of your application. To get the window of your application, you can use
the JavaScript snippet ``selenium.browserbot.getCurrentWindow()``, and then
run your JavaScript in there
'script' is the JavaScript snippet to run
'timeout' is a timeout in milliseconds, after which this command will return with an error
"""
self.do_command("waitForCondition", [script,timeout,])
def set_timeout(self,timeout):
"""
Specifies the amount of time that Selenium will wait for actions to complete.
Actions that require waiting include "open" and the "waitFor\*" actions.
The default timeout is 30 seconds.
'timeout' is a timeout in milliseconds, after which the action will return with an error
"""
self.do_command("setTimeout", [timeout,])
def wait_for_page_to_load(self,timeout):
"""
Waits for a new page to load.
You can use this command instead of the "AndWait" suffixes, "clickAndWait", "selectAndWait", "typeAndWait" etc.
(which are only available in the JS API).
Selenium constantly keeps track of new pages loading, and sets a "newPageLoaded"
flag when it first notices a page load. Running any other Selenium command after
turns the flag to false. Hence, if you want to wait for a page to load, you must
wait immediately after a Selenium command that caused a page-load.
'timeout' is a timeout in milliseconds, after which this command will return with an error
"""
self.do_command("waitForPageToLoad", [timeout,])
def wait_for_frame_to_load(self,frameAddress,timeout):
"""
Waits for a new frame to load.
Selenium constantly keeps track of new pages and frames loading,
and sets a "newPageLoaded" flag when it first notices a page load.
See waitForPageToLoad for more information.
'frameAddress' is FrameAddress from the server side
'timeout' is a timeout in milliseconds, after which this command will return with an error
"""
self.do_command("waitForFrameToLoad", [frameAddress,timeout,])
def get_cookie(self):
"""
Return all cookies of the current page under test.
"""
return self.get_string("getCookie", [])
def get_cookie_by_name(self,name):
"""
Returns the value of the cookie with the specified name, or throws an error if the cookie is not present.
'name' is the name of the cookie
"""
return self.get_string("getCookieByName", [name,])
def is_cookie_present(self,name):
"""
Returns true if a cookie with the specified name is present, or false otherwise.
'name' is the name of the cookie
"""
return self.get_boolean("isCookiePresent", [name,])
def create_cookie(self,nameValuePair,optionsString):
"""
Create a new cookie whose path and domain are same with those of current page
under test, unless you specified a path for this cookie explicitly.
'nameValuePair' is name and value of the cookie in a format "name=value"
'optionsString' is options for the cookie. Currently supported options include 'path', 'max_age' and 'domain'. the optionsString's format is "path=/path/, max_age=60, domain=.foo.com". The order of options are irrelevant, the unit of the value of 'max_age' is second. Note that specifying a domain that isn't a subset of the current domain will usually fail.
"""
self.do_command("createCookie", [nameValuePair,optionsString,])
def delete_cookie(self,name,optionsString):
"""
Delete a named cookie with specified path and domain. Be careful; to delete a cookie, you
need to delete it using the exact same path and domain that were used to create the cookie.
If the path is wrong, or the domain is wrong, the cookie simply won't be deleted. Also
note that specifying a domain that isn't a subset of the current domain will usually fail.
Since there's no way to discover at runtime the original path and domain of a given cookie,
we've added an option called 'recurse' to try all sub-domains of the current domain with
all paths that are a subset of the current path. Beware; this option can be slow. In
big-O notation, it operates in O(n\*m) time, where n is the number of dots in the domain
name and m is the number of slashes in the path.
'name' is the name of the cookie to be deleted
'optionsString' is options for the cookie. Currently supported options include 'path', 'domain' and 'recurse.' The optionsString's format is "path=/path/, domain=.foo.com, recurse=true". The order of options are irrelevant. Note that specifying a domain that isn't a subset of the current domain will usually fail.
"""
self.do_command("deleteCookie", [name,optionsString,])
def delete_all_visible_cookies(self):
"""
Calls deleteCookie with recurse=true on all cookies visible to the current page.
As noted on the documentation for deleteCookie, recurse=true can be much slower
than simply deleting the cookies using a known domain/path.
"""
self.do_command("deleteAllVisibleCookies", [])
def set_browser_log_level(self,logLevel):
"""
Sets the threshold for browser-side logging messages; log messages beneath this threshold will be discarded.
Valid logLevel strings are: "debug", "info", "warn", "error" or "off".
To see the browser logs, you need to
either show the log window in GUI mode, or enable browser-side logging in Selenium RC.
'logLevel' is one of the following: "debug", "info", "warn", "error" or "off"
"""
self.do_command("setBrowserLogLevel", [logLevel,])
def run_script(self,script):
"""
Creates a new "script" tag in the body of the current test window, and
adds the specified text into the body of the command. Scripts run in
this way can often be debugged more easily than scripts executed using
Selenium's "getEval" command. Beware that JS exceptions thrown in these script
tags aren't managed by Selenium, so you should probably wrap your script
in try/catch blocks if there is any chance that the script will throw
an exception.
'script' is the JavaScript snippet to run
"""
self.do_command("runScript", [script,])
def add_location_strategy(self,strategyName,functionDefinition):
"""
Defines a new function for Selenium to locate elements on the page.
For example,
if you define the strategy "foo", and someone runs click("foo=blah"), we'll
run your function, passing you the string "blah", and click on the element
that your function
returns, or throw an "Element not found" error if your function returns null.
We'll pass three arguments to your function:
* locator: the string the user passed in
* inWindow: the currently selected window
* inDocument: the currently selected document
The function must return null if the element can't be found.
'strategyName' is the name of the strategy to define; this should use only letters [a-zA-Z] with no spaces or other punctuation.
'functionDefinition' is a string defining the body of a function in JavaScript. For example: ``return inDocument.getElementById(locator);``
"""
self.do_command("addLocationStrategy", [strategyName,functionDefinition,])
def capture_entire_page_screenshot(self,filename):
"""
Saves the entire contents of the current window canvas to a PNG file.
Currently this only works in Mozilla and when running in chrome mode.
Contrast this with the captureScreenshot command, which captures the
contents of the OS viewport (i.e. whatever is currently being displayed
on the monitor), and is implemented in the RC only. Implementation
mostly borrowed from the Screengrab! Firefox extension. Please see
http://www.screengrab.org for details.
'filename' is the path to the file to persist the screenshot as. No filename extension will be appended by default. Directories will not be created if they do not exist, and an exception will be thrown, possibly by native code.
"""
self.do_command("captureEntirePageScreenshot", [filename,])
def set_context(self,context):
"""
Writes a message to the status bar and adds a note to the browser-side
log.
'context' is the message to be sent to the browser
"""
self.do_command("setContext", [context,])
def attach_file(self,fieldLocator,fileLocator):
"""
Sets a file input (upload) field to the file listed in fileLocator
'fieldLocator' is an element locator
'fileLocator' is a URL pointing to the specified file. Before the file can be set in the input field (fieldLocator), Selenium RC may need to transfer the file to the local machine before attaching the file in a web page form. This is common in selenium grid configurations where the RC server driving the browser is not the same machine that started the test. Supported Browsers: Firefox ("\*chrome") only.
"""
self.do_command("attachFile", [fieldLocator,fileLocator,])
def capture_screenshot(self,filename):
"""
Captures a PNG screenshot to the specified file.
'filename' is the absolute path to the file to be written, e.g. "c:\blah\screenshot.png"
"""
self.do_command("captureScreenshot", [filename,])
def shut_down_selenium_server(self):
"""
Kills the running Selenium Server and all browser sessions. After you run this command, you will no longer be able to send
commands to the server; you can't remotely start the server once it has been stopped. Normally
you should prefer to run the "stop" command, which terminates the current browser session, rather than
shutting down the entire server.
"""
self.do_command("shutDownSeleniumServer", [])
def key_down_native(self,keycode):
"""
Simulates a user pressing a key (without releasing it yet) by sending a native operating system keystroke.
This function uses the java.awt.Robot class to send a keystroke; this more accurately simulates typing
a key on the keyboard. It does not honor settings from the shiftKeyDown, controlKeyDown, altKeyDown and
metaKeyDown commands, and does not target any particular HTML element. To send a keystroke to a particular
element, focus on the element first before running this command.
'keycode' is an integer keycode number corresponding to a java.awt.event.KeyEvent; note that Java keycodes are NOT the same thing as JavaScript keycodes!
"""
self.do_command("keyDownNative", [keycode,])
def key_up_native(self,keycode):
"""
Simulates a user releasing a key by sending a native operating system keystroke.
This function uses the java.awt.Robot class to send a keystroke; this more accurately simulates typing
a key on the keyboard. It does not honor settings from the shiftKeyDown, controlKeyDown, altKeyDown and
metaKeyDown commands, and does not target any particular HTML element. To send a keystroke to a particular
element, focus on the element first before running this command.
'keycode' is an integer keycode number corresponding to a java.awt.event.KeyEvent; note that Java keycodes are NOT the same thing as JavaScript keycodes!
"""
self.do_command("keyUpNative", [keycode,])
def key_press_native(self,keycode):
"""
Simulates a user pressing and releasing a key by sending a native operating system keystroke.
This function uses the java.awt.Robot class to send a keystroke; this more accurately simulates typing
a key on the keyboard. It does not honor settings from the shiftKeyDown, controlKeyDown, altKeyDown and
metaKeyDown commands, and does not target any particular HTML element. To send a keystroke to a particular
element, focus on the element first before running this command.
'keycode' is an integer keycode number corresponding to a java.awt.event.KeyEvent; note that Java keycodes are NOT the same thing as JavaScript keycodes!
"""
self.do_command("keyPressNative", [keycode,])
| bsd-2-clause |
draperjames/bokeh | bokeh/plotting/figure.py | 3 | 18251 | from __future__ import absolute_import, print_function
import logging
logger = logging.getLogger(__name__)
from six import string_types
from ..core.properties import Auto, Either, Enum, Float, Int, Seq, Instance, String, Tuple
from ..core.enums import HorizontalLocation, VerticalLocation
from ..models import Plot
from ..models.annotations import Title
from ..models.ranges import Range
from ..models.tools import Tool
from ..models import glyphs, markers
from ..util.options import Options
from ..util._plot_arg_helpers import _convert_responsive
from .helpers import _get_range, _process_axis_and_grid, _process_tools_arg, _glyph_function, _process_active_tools
DEFAULT_TOOLS = "pan,wheel_zoom,box_zoom,save,reset,help"
class FigureOptions(Options):
tools = Either(String, Seq(Either(String, Instance(Tool))), default=DEFAULT_TOOLS, help="""
Tools the plot should start with.
""")
x_range = Either(Tuple(Float, Float), Seq(String), Instance(Range), help="""
Customize the x-range of the plot.
""")
y_range = Either(Tuple(Float, Float), Seq(String), Instance(Range), help="""
Customize the x-range of the plot.
""")
x_minor_ticks = Either(Auto, Int, default="auto", help="""
Number of minor ticks between adjacent x-axis major ticks.
""")
y_minor_ticks = Either(Auto, Int, default="auto", help="""
Number of minor ticks between adjacent y-axis major ticks.
""")
x_axis_location = Enum(VerticalLocation, default="below", help="""
Where the x-axis should be located.
""")
y_axis_location = Enum(HorizontalLocation, default="left", help="""
Where the y-axis should be located.
""")
x_axis_label = String(default="", help="""
A label for the x-axis.
""")
y_axis_label = String(default="", help="""
A label for the y-axis.
""")
active_drag = Either(Auto, String, Instance(Tool), default="auto", help="""
Which drag tool should initially be active.
""")
active_scroll = Either(Auto, String, Instance(Tool), default="auto", help="""
Which scroll tool should initially be active.
""")
active_tap = Either(Auto, String, Instance(Tool), default="auto", help="""
Which tap tool should initially be active.
""")
x_axis_type = Either(Auto, Enum("linear", "log", "datetime"), default="auto", help="""
The type of the x-axis.
""")
y_axis_type = Either(Auto, Enum("linear", "log", "datetime"), default="auto", help="""
The type of the y-axis.
""")
class Figure(Plot):
''' A subclass of :class:`~bokeh.models.plots.Plot` that simplifies plot
creation with default axes, grids, tools, etc.
In addition to all the Bokeh model property attributes documented below,
the ``Figure`` initializer also accepts the following options, which can
help simplify configuration:
.. bokeh-options:: FigureOptions
:module: bokeh.plotting.figure
'''
__subtype__ = "Figure"
__view_model__ = "Plot"
def __init__(self, *arg, **kw):
opts = FigureOptions(kw)
title = kw.get("title", None)
if isinstance(title, string_types):
kw['title'] = Title(text=title)
super(Figure, self).__init__(*arg, **kw)
self.x_range = _get_range(opts.x_range)
self.y_range = _get_range(opts.y_range)
_process_axis_and_grid(self, opts.x_axis_type, opts.x_axis_location, opts.x_minor_ticks, opts.x_axis_label, self.x_range, 0)
_process_axis_and_grid(self, opts.y_axis_type, opts.y_axis_location, opts.y_minor_ticks, opts.y_axis_label, self.y_range, 1)
tool_objs, tool_map = _process_tools_arg(self, opts.tools)
self.add_tools(*tool_objs)
_process_active_tools(self.toolbar, tool_map, opts.active_drag, opts.active_scroll, opts.active_tap)
annular_wedge = _glyph_function(glyphs.AnnularWedge)
annulus = _glyph_function(glyphs.Annulus, """
Examples:
.. bokeh-plot::
:source-position: above
from bokeh.plotting import figure, output_file, show
plot = figure(width=300, height=300)
plot.annulus(x=[1, 2, 3], y=[1, 2, 3], color="#7FC97F",
inner_radius=0.2, outer_radius=0.5)
show(plot)
""")
arc = _glyph_function(glyphs.Arc)
asterisk = _glyph_function(markers.Asterisk, """
Examples:
.. bokeh-plot::
:source-position: above
from bokeh.plotting import figure, output_file, show
plot = figure(width=300, height=300)
plot.asterisk(x=[1,2,3], y=[1,2,3], size=20, color="#F0027F")
show(plot)
""")
bezier = _glyph_function(glyphs.Bezier)
circle = _glyph_function(markers.Circle, """
.. note::
Only one of ``size`` or ``radius`` should be provided. Note that ``radius``
defaults to data units.
Examples:
.. bokeh-plot::
:source-position: above
from bokeh.plotting import figure, output_file, show
plot = figure(width=300, height=300)
plot.circle(x=[1, 2, 3], y=[1, 2, 3], size=20)
show(plot)
""")
circle_cross = _glyph_function(markers.CircleCross, """
Examples:
.. bokeh-plot::
:source-position: above
from bokeh.plotting import figure, output_file, show
plot = figure(width=300, height=300)
plot.circle_cross(x=[1,2,3], y=[4,5,6], size=20,
color="#FB8072", fill_alpha=0.2, line_width=2)
show(plot)
""")
circle_x = _glyph_function(markers.CircleX, """
Examples:
.. bokeh-plot::
:source-position: above
from bokeh.plotting import figure, output_file, show
plot = figure(width=300, height=300)
plot.circle_x(x=[1, 2, 3], y=[1, 2, 3], size=20,
color="#DD1C77", fill_alpha=0.2)
show(plot)
""")
cross = _glyph_function(markers.Cross, """
Examples:
.. bokeh-plot::
:source-position: above
from bokeh.plotting import figure, output_file, show
plot = figure(width=300, height=300)
plot.cross(x=[1, 2, 3], y=[1, 2, 3], size=20,
color="#E6550D", line_width=2)
show(plot)
""")
diamond = _glyph_function(markers.Diamond, """
Examples:
.. bokeh-plot::
:source-position: above
from bokeh.plotting import figure, output_file, show
plot = figure(width=300, height=300)
plot.diamond(x=[1, 2, 3], y=[1, 2, 3], size=20,
color="#1C9099", line_width=2)
show(plot)
""")
diamond_cross = _glyph_function(markers.DiamondCross, """
Examples:
.. bokeh-plot::
:source-position: above
from bokeh.plotting import figure, output_file, show
plot = figure(width=300, height=300)
plot.diamond_cross(x=[1, 2, 3], y=[1, 2, 3], size=20,
color="#386CB0", fill_color=None, line_width=2)
show(plot)
""")
hbar = _glyph_function(glyphs.HBar, """
Examples:
.. bokeh-plot::
:source-position: above
from bokeh.plotting import figure, output_file, show
plot = figure(width=300, height=300)
plot.hbar(y=[1, 2, 3], height=0.5, left=0, right=[1,2,3], color="#CAB2D6")
show(plot)
""")
ellipse = _glyph_function(glyphs.Ellipse, """
Examples:
.. bokeh-plot::
:source-position: above
from bokeh.plotting import figure, output_file, show
plot = figure(width=300, height=300)
plot.ellipse(x=[1, 2, 3], y=[1, 2, 3], width=30, height=20,
color="#386CB0", fill_color=None, line_width=2)
show(plot)
""")
image = _glyph_function(glyphs.Image, """
.. note::
If both ``palette`` and ``color_mapper`` are passed, a ``ValueError``
exception will be raised. If neither is passed, then the ``Greys9``
palette will be used as a default.
""")
image_rgba = _glyph_function(glyphs.ImageRGBA, """
.. note::
The ``image_rgba`` method accepts images as a two-dimensional array of RGBA
values (encoded as 32-bit integers).
""")
image_url = _glyph_function(glyphs.ImageURL)
inverted_triangle = _glyph_function(markers.InvertedTriangle, """
Examples:
.. bokeh-plot::
:source-position: above
from bokeh.plotting import figure, output_file, show
plot = figure(width=300, height=300)
plot.inverted_triangle(x=[1, 2, 3], y=[1, 2, 3], size=20, color="#DE2D26")
show(plot)
""")
line = _glyph_function(glyphs.Line, """
Examples:
.. bokeh-plot::
:source-position: above
from bokeh.plotting import figure, output_file, show
p = figure(title="line", plot_width=300, plot_height=300)
p.line(x=[1, 2, 3, 4, 5], y=[6, 7, 2, 4, 5])
show(p)
""")
multi_line = _glyph_function(glyphs.MultiLine, """
.. note::
For this glyph, the data is not simply an array of scalars, it is an
"array of arrays".
Examples:
.. bokeh-plot::
:source-position: above
from bokeh.plotting import figure, output_file, show
p = figure(plot_width=300, plot_height=300)
p.multi_line(xs=[[1, 2, 3], [2, 3, 4]], ys=[[6, 7, 2], [4, 5, 7]],
color=['red','green'])
show(p)
""")
oval = _glyph_function(glyphs.Oval, """
Examples:
.. bokeh-plot::
:source-position: above
from bokeh.plotting import figure, output_file, show
plot = figure(width=300, height=300)
plot.oval(x=[1, 2, 3], y=[1, 2, 3], width=0.2, height=0.4,
angle=-0.7, color="#1D91C0")
show(plot)
""")
patch = _glyph_function(glyphs.Patch, """
Examples:
.. bokeh-plot::
:source-position: above
from bokeh.plotting import figure, output_file, show
p = figure(plot_width=300, plot_height=300)
p.patch(x=[1, 2, 3, 2], y=[6, 7, 2, 2], color="#99d8c9")
show(p)
""")
patches = _glyph_function(glyphs.Patches, """
.. note::
For this glyph, the data is not simply an array of scalars, it is an
"array of arrays".
Examples:
.. bokeh-plot::
:source-position: above
from bokeh.plotting import figure, output_file, show
p = figure(plot_width=300, plot_height=300)
p.patches(xs=[[1,2,3],[4,5,6,5]], ys=[[1,2,1],[4,5,5,4]],
color=["#43a2ca", "#a8ddb5"])
show(p)
""")
quad = _glyph_function(glyphs.Quad, """
Examples:
.. bokeh-plot::
:source-position: above
from bokeh.plotting import figure, output_file, show
plot = figure(width=300, height=300)
plot.quad(top=[2, 3, 4], bottom=[1, 2, 3], left=[1, 2, 3],
right=[1.2, 2.5, 3.7], color="#B3DE69")
show(plot)
""")
quadratic = _glyph_function(glyphs.Quadratic)
ray = _glyph_function(glyphs.Ray, """
Examples:
.. bokeh-plot::
:source-position: above
from bokeh.plotting import figure, output_file, show
plot = figure(width=300, height=300)
plot.ray(x=[1, 2, 3], y=[1, 2, 3], length=45, angle=-0.7, color="#FB8072",
line_width=2)
show(plot)
""")
rect = _glyph_function(glyphs.Rect, """
Examples:
.. bokeh-plot::
:source-position: above
from bokeh.plotting import figure, output_file, show
plot = figure(width=300, height=300)
plot.rect(x=[1, 2, 3], y=[1, 2, 3], width=10, height=20, color="#CAB2D6",
width_units="screen", height_units="screen")
show(plot)
""")
segment = _glyph_function(glyphs.Segment, """
Examples:
.. bokeh-plot::
:source-position: above
from bokeh.plotting import figure, output_file, show
plot = figure(width=300, height=300)
plot.segment(x0=[1, 2, 3], y0=[1, 2, 3], x1=[1, 2, 3],
y1=[1.2, 2.5, 3.7], color="#F4A582",
line_width=3)
show(plot)
""")
square = _glyph_function(markers.Square, """
Examples:
.. bokeh-plot::
:source-position: above
from bokeh.plotting import figure, output_file, show
plot = figure(width=300, height=300)
plot.square(x=[1, 2, 3], y=[1, 2, 3], size=[10,20,30], color="#74ADD1")
show(plot)
""")
square_cross = _glyph_function(markers.SquareCross, """
Examples:
.. bokeh-plot::
:source-position: above
from bokeh.plotting import figure, output_file, show
plot = figure(width=300, height=300)
plot.square_cross(x=[1, 2, 3], y=[1, 2, 3], size=[10,20,25],
color="#7FC97F",fill_color=None, line_width=2)
show(plot)
""")
square_x = _glyph_function(markers.SquareX, """
Examples:
.. bokeh-plot::
:source-position: above
from bokeh.plotting import figure, output_file, show
plot = figure(width=300, height=300)
plot.square_x(x=[1, 2, 3], y=[1, 2, 3], size=[10,20,25],
color="#FDAE6B",fill_color=None, line_width=2)
show(plot)
""")
text = _glyph_function(glyphs.Text, """
.. note::
The location and angle of the text relative to the ``x``, ``y`` coordinates
is indicated by the alignment and baseline text properties.
Returns:
GlyphRenderer
""")
triangle = _glyph_function(markers.Triangle, """
Examples:
.. bokeh-plot::
:source-position: above
from bokeh.plotting import figure, output_file, show
plot = figure(width=300, height=300)
plot.triangle(x=[1, 2, 3], y=[1, 2, 3], size=[10,20,25],
color="#99D594", line_width=2)
show(plot)
""")
vbar = _glyph_function(glyphs.VBar, """
Examples:
.. bokeh-plot::
:source-position: above
from bokeh.plotting import figure, output_file, show
plot = figure(width=300, height=300)
plot.vbar(x=[1, 2, 3], width=0.5, bottom=0, top=[1,2,3], color="#CAB2D6")
show(plot)
""")
wedge = _glyph_function(glyphs.Wedge, """
Examples:
.. bokeh-plot::
:source-position: above
from bokeh.plotting import figure, output_file, show
plot = figure(width=300, height=300)
plot.wedge(x=[1, 2, 3], y=[1, 2, 3], radius=15, start_angle=0.6,
end_angle=4.1, radius_units="screen", color="#2b8cbe")
show(plot)
""")
x = _glyph_function(markers.X, """
Examples:
.. bokeh-plot::
:source-position: above
from bokeh.plotting import figure, output_file, show
plot = figure(width=300, height=300)
plot.x(x=[1, 2, 3], y=[1, 2, 3], size=[10, 20, 25], color="#fa9fb5")
show(plot)
""")
def scatter(self, *args, **kwargs):
""" Creates a scatter plot of the given x and y items.
Args:
x (str or seq[float]) : values or field names of center x coordinates
y (str or seq[float]) : values or field names of center y coordinates
size (str or list[float]) : values or field names of sizes in screen units
marker (str, optional): a valid marker_type, defaults to "circle"
color (color value, optional): shorthand to set both fill and line color
source (:class:`~bokeh.models.sources.ColumnDataSource`) : a user-supplied data source.
An attempt will be made to convert the object to :class:`~bokeh.models.sources.ColumnDataSource`
if needed. If none is supplied, one is created for the user automatically.
**kwargs: :ref:`userguide_styling_line_properties` and :ref:`userguide_styling_fill_properties`
Examples:
>>> p.scatter([1,2,3],[4,5,6], fill_color="red")
>>> p.scatter("data1", "data2", source=data_source, ...)
"""
markertype = kwargs.pop("marker", "circle")
if markertype not in _marker_types:
raise ValueError("Invalid marker type '%s'. Use markers() to see a list of valid marker types." % markertype)
# TODO (bev) make better when plotting.scatter is removed
conversions = {
"*": "asterisk",
"+": "cross",
"o": "circle",
"ox": "circle_x",
"o+": "circle_cross"
}
if markertype in conversions:
markertype = conversions[markertype]
return getattr(self, markertype)(*args, **kwargs)
def figure(**kwargs):
''' Create a new :class:`~bokeh.plotting.figure.Figure` for plotting.
In addition to the standard :class:`~bokeh.plotting.figure.Figure`
property values (e.g. ``plot_width`` or ``sizing_mode``) the following
additional options can be passed as well:
.. bokeh-options:: FigureOptions
:module: bokeh.plotting.figure
Returns:
Figure
'''
if 'plot_width' in kwargs and 'width' in kwargs:
raise ValueError("figure() called with both 'plot_width' and 'width' supplied, supply only one")
if 'plot_height' in kwargs and 'height' in kwargs:
raise ValueError("figure() called with both 'plot_height' and 'height' supplied, supply only one")
if 'height' in kwargs:
kwargs['plot_height'] = kwargs.pop('height')
if 'width' in kwargs:
kwargs['plot_width'] = kwargs.pop('width')
if 'responsive' in kwargs and 'sizing_mode' in kwargs:
raise ValueError("figure() called with both 'responsive' and 'sizing_mode' supplied, supply only one")
if 'responsive' in kwargs:
kwargs['sizing_mode'] = _convert_responsive(kwargs['responsive'])
del kwargs['responsive']
fig = Figure(**kwargs)
return fig
_marker_types = [
"asterisk",
"circle",
"circle_cross",
"circle_x",
"cross",
"diamond",
"diamond_cross",
"inverted_triangle",
"square",
"square_x",
"square_cross",
"triangle",
"x",
"*",
"+",
"o",
"ox",
"o+",
]
def markers():
""" Prints a list of valid marker types for scatter()
Returns:
None
"""
print("Available markers: \n - " + "\n - ".join(_marker_types))
_color_fields = set(["color", "fill_color", "line_color"])
_alpha_fields = set(["alpha", "fill_alpha", "line_alpha"])
| bsd-3-clause |
fayf/pyload | module/plugins/hooks/Captcha9Kw.py | 13 | 9806 | # -*- coding: utf-8 -*-
from __future__ import with_statement
import re
import time
from base64 import b64encode
from module.network.HTTPRequest import BadHeader
from module.plugins.internal.Hook import Hook, threaded
class Captcha9Kw(Hook):
__name__ = "Captcha9Kw"
__type__ = "hook"
__version__ = "0.30"
__status__ = "testing"
__config__ = [("check_client" , "bool" , "Don't use if client is connected" , True ),
("confirm" , "bool" , "Confirm Captcha (cost +6 credits)" , False ),
("captchaperhour", "int" , "Captcha per hour" , "9999" ),
("captchapermin" , "int" , "Captcha per minute" , "9999" ),
("prio" , "int" , "Priority (max 10)(cost +0 -> +10 credits)" , "0" ),
("queue" , "int" , "Max. Queue (max 999)" , "50" ),
("hoster_options", "string" , "Hoster options (format: pluginname:prio=1:selfsolfe=1:confirm=1:timeout=900|...)", "ShareonlineBiz:prio=0:timeout=999 | UploadedTo:prio=0:timeout=999"),
("selfsolve" , "bool" , "Selfsolve (manually solve your captcha in your 9kw client if active)" , "0" ),
("passkey" , "password", "API key" , "" ),
("timeout" , "int" , "Timeout in seconds (min 60, max 3999)" , "900" )]
__description__ = """Send captchas to 9kw.eu"""
__license__ = "GPLv3"
__authors__ = [("RaNaN" , "[email protected]" ),
("Walter Purcaro", "[email protected]")]
API_URL = "https://www.9kw.eu/index.cgi"
def get_credits(self):
res = self.load(self.API_URL,
get={'apikey': self.get_config('passkey'),
'pyload': "1",
'source': "pyload",
'action': "usercaptchaguthaben"})
if res.isdigit():
self.log_info(_("%s credits left") % res)
credits = self.info['credits'] = int(res)
return credits
else:
self.log_error(res)
return 0
@threaded
def _process_captcha(self, task):
try:
with open(task.captchaFile, 'rb') as f:
data = f.read()
except IOError, e:
self.log_error(e)
return
pluginname = re.search(r'_(.+?)_\d+.\w+', task.captchaFile).group(1)
option = {'min' : 2,
'max' : 50,
'phrase' : 0,
'numeric' : 0,
'case_sensitive': 0,
'math' : 0,
'prio' : min(max(self.get_config('prio'), 0), 10),
'confirm' : self.get_config('confirm'),
'timeout' : min(max(self.get_config('timeout'), 300), 3999),
'selfsolve' : self.get_config('selfsolve'),
'cph' : self.get_config('captchaperhour'),
'cpm' : self.get_config('captchapermin')}
for opt in str(self.get_config('hoster_options').split('|')):
details = map(str.strip, opt.split(':'))
if not details or details[0].lower() is not pluginname.lower():
continue
for d in details:
hosteroption = d.split("=")
if len(hosteroption) < 2 or not hosteroption[1].isdigit():
continue
o = hosteroption[0].lower()
if o in option:
option[o] = hosteroption[1]
break
post_data = {'apikey' : self.get_config('passkey'),
'prio' : option['prio'],
'confirm' : option['confirm'],
'maxtimeout' : option['timeout'],
'selfsolve' : option['selfsolve'],
'captchaperhour': option['cph'],
'captchapermin' : option['cpm'],
'case-sensitive': option['case_sensitive'],
'min_len' : option['min'],
'max_len' : option['max'],
'phrase' : option['phrase'],
'numeric' : option['numeric'],
'math' : option['math'],
'oldsource' : pluginname,
'pyload' : 1,
'source' : "pyload",
'base64' : 1,
'mouse' : 1 if task.isPositional() else 0,
'file-upload-01': b64encode(data),
'action' : "usercaptchaupload"}
for _i in xrange(5):
try:
res = self.load(self.API_URL, post=post_data)
except BadHeader, e:
time.sleep(3)
else:
if res and res.isdigit():
break
else:
self.log_error(_("Bad upload: %s") % res)
return
self.log_debug("NewCaptchaID ticket: %s" % res, task.captchaFile)
task.data['ticket'] = res
for _i in xrange(int(self.get_config('timeout') / 5)):
result = self.load(self.API_URL,
get={'apikey': self.get_config('passkey'),
'id' : res,
'pyload': "1",
'info' : "1",
'source': "pyload",
'action': "usercaptchacorrectdata"})
if not result or result == "NO DATA":
time.sleep(5)
else:
break
else:
self.log_debug("Could not send request: %s" % res)
result = None
self.log_info(_("Captcha result for ticket %s: %s") % (res, result))
task.setResult(result)
def captcha_task(self, task):
if not task.isTextual() and not task.isPositional():
return
if not self.get_config('passkey'):
return
if self.pyload.isClientConnected() and self.get_config('check_client'):
return
credits = self.get_credits()
if not credits:
self.log_error(_("Your captcha 9kw.eu account has not enough credits"))
return
queue = min(self.get_config('queue'), 999)
timeout = min(max(self.get_config('timeout'), 300), 3999)
pluginname = re.search(r'_(.+?)_\d+.\w+', task.captchaFile).group(1)
for _i in xrange(5):
servercheck = self.load("http://www.9kw.eu/grafik/servercheck.txt")
if queue < re.search(r'queue=(\d+)', servercheck).group(1):
break
time.sleep(10)
else:
self.fail(_("Too many captchas in queue"))
for opt in str(self.get_config('hoster_options').split('|')):
details = map(str.strip, opt.split(':'))
if not details or details[0].lower() is not pluginname.lower():
continue
for d in details:
hosteroption = d.split("=")
if len(hosteroption) > 1 \
and hosteroption[0].lower() == "timeout" \
and hosteroption[1].isdigit():
timeout = int(hosteroption[1])
break
task.handler.append(self)
task.setWaiting(timeout)
self._process_captcha(task)
def _captcha_response(self, task, correct):
type = "correct" if correct else "refund"
if 'ticket' not in task.data:
self.log_debug("No CaptchaID for %s request (task: %s)" % (type, task))
return
passkey = self.get_config('passkey')
for _i in xrange(3):
res = self.load(self.API_URL,
get={'action' : "usercaptchacorrectback",
'apikey' : passkey,
'api_key': passkey,
'correct': "1" if correct else "2",
'pyload' : "1",
'source' : "pyload",
'id' : task.data['ticket']})
self.log_debug("Request %s: %s" % (type, res))
if res == "OK":
break
time.sleep(5)
else:
self.log_debug("Could not send %s request: %s" % (type, res))
def captcha_correct(self, task):
self._captcha_response(task, True)
def captcha_invalid(self, task):
self._captcha_response(task, False)
| gpl-3.0 |
zenefits/selenium | py/test/selenium/webdriver/common/utils.py | 68 | 2155 | # Licensed to the Software Freedom Conservancy (SFC) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The SFC licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import logging
import socket
import sys
import unittest
def run_tests(test_case, driver, webserver):
logging.basicConfig(level=logging.WARN)
webserver.start()
try:
testLoader = unittest.TestLoader()
testRunner = unittest.TextTestRunner()
test_case_name = "selenium.test.selenium.webdriver.common.%s" % test_case
if len(sys.argv) > 1:
testMethod = sys.argv[1]
testRunner.run(
testLoader.loadTestsFromName(
"%s.%s" % (test_case_name, testMethod)))
else:
testRunner.run(testLoader.loadTestsFromName(test_case_name))
driver.quit()
finally:
webserver.stop()
def require_online(func):
"""Only exucte the test method if the internet is accessible."""
def testMethod(self):
socket_ = socket.socket()
try:
socket_.settimeout(1)
socket_.connect(("www.google.com", 80))
return func(self)
except socket.error:
return lambda x: None
testMethod.func_name = func.func_name
return testMethod
def convert_cookie_to_json(cookie):
cookie_dict = {}
for key, value in cookie.items():
if key == "expires":
cookie_dict["expiry"] = int(value) * 1000
else:
cookie_dict[key] = value
return cookie_dict
| apache-2.0 |
skurochkin/selenium | py/selenium/webdriver/common/by.py | 69 | 1275 | # Licensed to the Software Freedom Conservancy (SFC) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The SFC licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
The By implementation.
"""
class By(object):
"""
Set of supported locator strategies.
"""
ID = "id"
XPATH = "xpath"
LINK_TEXT = "link text"
PARTIAL_LINK_TEXT = "partial link text"
NAME = "name"
TAG_NAME = "tag name"
CLASS_NAME = "class name"
CSS_SELECTOR = "css selector"
@classmethod
def is_valid(cls, by):
for attr in dir(cls):
if by == getattr(cls, attr):
return True
return False
| apache-2.0 |
adaxi/couchpotato | libs/tmdb3/tmdb_api.py | 14 | 33019 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#-----------------------
# Name: tmdb_api.py Simple-to-use Python interface to TMDB's API v3
# Python Library
# Author: Raymond Wagner
# Purpose: This Python library is intended to provide a series of classes
# and methods for search and retrieval of text metadata and image
# URLs from TMDB.
# Preliminary API specifications can be found at
# http://help.themoviedb.org/kb/api/about-3
# License: Creative Commons GNU GPL v2
# (http://creativecommons.org/licenses/GPL/2.0/)
#-----------------------
__title__ = ("tmdb_api - Simple-to-use Python interface to TMDB's API v3 " +
"(www.themoviedb.org)")
__author__ = "Raymond Wagner"
__purpose__ = """
This Python library is intended to provide a series of classes and methods
for search and retrieval of text metadata and image URLs from TMDB.
Preliminary API specifications can be found at
http://help.themoviedb.org/kb/api/about-3"""
__version__ = "v0.7.0"
# 0.1.0 Initial development
# 0.2.0 Add caching mechanism for API queries
# 0.2.1 Temporary work around for broken search paging
# 0.3.0 Rework backend machinery for managing OO interface to results
# 0.3.1 Add collection support
# 0.3.2 Remove MythTV key from results.py
# 0.3.3 Add functional language support
# 0.3.4 Re-enable search paging
# 0.3.5 Add methods for grabbing current, popular, and top rated movies
# 0.3.6 Rework paging mechanism
# 0.3.7 Generalize caching mechanism, and allow controllability
# 0.4.0 Add full locale support (language and country) and optional fall through
# 0.4.1 Add custom classmethod for dealing with IMDB movie IDs
# 0.4.2 Improve cache file selection for Windows systems
# 0.4.3 Add a few missed Person properties
# 0.4.4 Add support for additional Studio information
# 0.4.5 Add locale fallthrough for images and alternate titles
# 0.4.6 Add slice support for search results
# 0.5.0 Rework cache framework and improve file cache performance
# 0.6.0 Add user authentication support
# 0.6.1 Add adult filtering for people searches
# 0.6.2 Add similar movie search for Movie objects
# 0.6.3 Add Studio search
# 0.6.4 Add Genre list and associated Movie search
# 0.6.5 Prevent data from being blanked out by subsequent queries
# 0.6.6 Turn date processing errors into mutable warnings
# 0.6.7 Add support for searching by year
# 0.6.8 Add support for collection images
# 0.6.9 Correct Movie image language filtering
# 0.6.10 Add upcoming movie classmethod
# 0.6.11 Fix URL for top rated Movie query
# 0.6.12 Add support for Movie watchlist query and editing
# 0.6.13 Fix URL for rating Movies
# 0.6.14 Add support for Lists
# 0.6.15 Add ability to search Collections
# 0.6.16 Make absent primary images return None (previously u'')
# 0.6.17 Add userrating/votes to Image, add overview to Collection, remove
# releasedate sorting from Collection Movies
# 0.7.0 Add support for television series data
from request import set_key, Request
from util import Datapoint, Datalist, Datadict, Element, NameRepr, SearchRepr
from pager import PagedRequest
from locales import get_locale, set_locale
from tmdb_auth import get_session, set_session
from tmdb_exceptions import *
import json
import urllib
import urllib2
import datetime
DEBUG = False
def process_date(datestr):
try:
return datetime.date(*[int(x) for x in datestr.split('-')])
except (TypeError, ValueError):
import sys
import warnings
import traceback
_,_,tb = sys.exc_info()
f,l,_,_ = traceback.extract_tb(tb)[-1]
warnings.warn_explicit(('"{0}" is not a supported date format. ' +
'Please fix upstream data at ' +
'http://www.themoviedb.org.'
).format(datestr), Warning, f, l)
return None
class Configuration(Element):
images = Datapoint('images')
def _populate(self):
return Request('configuration')
Configuration = Configuration()
class Account(NameRepr, Element):
def _populate(self):
return Request('account', session_id=self._session.sessionid)
id = Datapoint('id')
adult = Datapoint('include_adult')
country = Datapoint('iso_3166_1')
language = Datapoint('iso_639_1')
name = Datapoint('name')
username = Datapoint('username')
@property
def locale(self):
return get_locale(self.language, self.country)
def searchMovie(query, locale=None, adult=False, year=None):
kwargs = {'query': query, 'include_adult': adult}
if year is not None:
try:
kwargs['year'] = year.year
except AttributeError:
kwargs['year'] = year
return MovieSearchResult(Request('search/movie', **kwargs), locale=locale)
def searchMovieWithYear(query, locale=None, adult=False):
year = None
if (len(query) > 6) and (query[-1] == ')') and (query[-6] == '('):
# simple syntax check, no need for regular expression
try:
year = int(query[-5:-1])
except ValueError:
pass
else:
if 1885 < year < 2050:
# strip out year from search
query = query[:-7]
else:
# sanity check on resolved year failed, pass through
year = None
return searchMovie(query, locale, adult, year)
class MovieSearchResult(SearchRepr, PagedRequest):
"""Stores a list of search matches."""
_name = None
def __init__(self, request, locale=None):
if locale is None:
locale = get_locale()
super(MovieSearchResult, self).__init__(
request.new(language=locale.language),
lambda x: Movie(raw=x, locale=locale))
def searchSeries(query, first_air_date_year=None, search_type=None, locale=None):
return SeriesSearchResult(
Request('search/tv', query=query, first_air_date_year=first_air_date_year, search_type=search_type),
locale=locale)
class SeriesSearchResult(SearchRepr, PagedRequest):
"""Stores a list of search matches."""
_name = None
def __init__(self, request, locale=None):
if locale is None:
locale = get_locale()
super(SeriesSearchResult, self).__init__(
request.new(language=locale.language),
lambda x: Series(raw=x, locale=locale))
def searchPerson(query, adult=False):
return PeopleSearchResult(Request('search/person', query=query,
include_adult=adult))
class PeopleSearchResult(SearchRepr, PagedRequest):
"""Stores a list of search matches."""
_name = None
def __init__(self, request):
super(PeopleSearchResult, self).__init__(
request, lambda x: Person(raw=x))
def searchStudio(query):
return StudioSearchResult(Request('search/company', query=query))
class StudioSearchResult(SearchRepr, PagedRequest):
"""Stores a list of search matches."""
_name = None
def __init__(self, request):
super(StudioSearchResult, self).__init__(
request, lambda x: Studio(raw=x))
def searchList(query, adult=False):
ListSearchResult(Request('search/list', query=query, include_adult=adult))
class ListSearchResult(SearchRepr, PagedRequest):
"""Stores a list of search matches."""
_name = None
def __init__(self, request):
super(ListSearchResult, self).__init__(
request, lambda x: List(raw=x))
def searchCollection(query, locale=None):
return CollectionSearchResult(Request('search/collection', query=query),
locale=locale)
class CollectionSearchResult(SearchRepr, PagedRequest):
"""Stores a list of search matches."""
_name=None
def __init__(self, request, locale=None):
if locale is None:
locale = get_locale()
super(CollectionSearchResult, self).__init__(
request.new(language=locale.language),
lambda x: Collection(raw=x, locale=locale))
class Image(Element):
filename = Datapoint('file_path', initarg=1,
handler=lambda x: x.lstrip('/'))
aspectratio = Datapoint('aspect_ratio')
height = Datapoint('height')
width = Datapoint('width')
language = Datapoint('iso_639_1')
userrating = Datapoint('vote_average')
votes = Datapoint('vote_count')
def sizes(self):
return ['original']
def geturl(self, size='original'):
if size not in self.sizes():
raise TMDBImageSizeError
url = Configuration.images['secure_base_url'].rstrip('/')
return url+'/{0}/{1}'.format(size, self.filename)
# sort preferring locale's language, but keep remaining ordering consistent
def __lt__(self, other):
if not isinstance(other, Image):
return False
return (self.language == self._locale.language) \
and (self.language != other.language)
def __gt__(self, other):
if not isinstance(other, Image):
return True
return (self.language != other.language) \
and (other.language == self._locale.language)
# direct match for comparison
def __eq__(self, other):
if not isinstance(other, Image):
return False
return self.filename == other.filename
# special handling for boolean to see if exists
def __nonzero__(self):
if len(self.filename) == 0:
return False
return True
def __repr__(self):
# BASE62 encoded filename, no need to worry about unicode
return u"<{0.__class__.__name__} '{0.filename}'>".format(self)
class Backdrop(Image):
def sizes(self):
return Configuration.images['backdrop_sizes']
class Poster(Image):
def sizes(self):
return Configuration.images['poster_sizes']
class Profile(Image):
def sizes(self):
return Configuration.images['profile_sizes']
class Logo(Image):
def sizes(self):
return Configuration.images['logo_sizes']
class AlternateTitle(Element):
country = Datapoint('iso_3166_1')
title = Datapoint('title')
# sort preferring locale's country, but keep remaining ordering consistent
def __lt__(self, other):
return (self.country == self._locale.country) \
and (self.country != other.country)
def __gt__(self, other):
return (self.country != other.country) \
and (other.country == self._locale.country)
def __eq__(self, other):
return self.country == other.country
def __repr__(self):
return u"<{0.__class__.__name__} '{0.title}' ({0.country})>"\
.format(self).encode('utf-8')
class Person(Element):
id = Datapoint('id', initarg=1)
name = Datapoint('name')
biography = Datapoint('biography')
dayofbirth = Datapoint('birthday', default=None, handler=process_date)
dayofdeath = Datapoint('deathday', default=None, handler=process_date)
homepage = Datapoint('homepage')
birthplace = Datapoint('place_of_birth')
profile = Datapoint('profile_path', handler=Profile,
raw=False, default=None)
adult = Datapoint('adult')
aliases = Datalist('also_known_as')
def __repr__(self):
return u"<{0.__class__.__name__} '{0.name}'>"\
.format(self).encode('utf-8')
def _populate(self):
return Request('person/{0}'.format(self.id))
def _populate_credits(self):
return Request('person/{0}/credits'.format(self.id),
language=self._locale.language)
def _populate_images(self):
return Request('person/{0}/images'.format(self.id))
roles = Datalist('cast', handler=lambda x: ReverseCast(raw=x),
poller=_populate_credits)
crew = Datalist('crew', handler=lambda x: ReverseCrew(raw=x),
poller=_populate_credits)
profiles = Datalist('profiles', handler=Profile, poller=_populate_images)
class Cast(Person):
character = Datapoint('character')
order = Datapoint('order')
def __repr__(self):
return u"<{0.__class__.__name__} '{0.name}' as '{0.character}'>"\
.format(self).encode('utf-8')
class Crew(Person):
job = Datapoint('job')
department = Datapoint('department')
def __repr__(self):
return u"<{0.__class__.__name__} '{0.name}','{0.job}'>"\
.format(self).encode('utf-8')
class Keyword(Element):
id = Datapoint('id')
name = Datapoint('name')
def __repr__(self):
return u"<{0.__class__.__name__} {0.name}>"\
.format(self).encode('utf-8')
class Release(Element):
certification = Datapoint('certification')
country = Datapoint('iso_3166_1')
releasedate = Datapoint('release_date', handler=process_date)
def __repr__(self):
return u"<{0.__class__.__name__} {0.country}, {0.releasedate}>"\
.format(self).encode('utf-8')
class Trailer(Element):
name = Datapoint('name')
size = Datapoint('size')
source = Datapoint('source')
class YoutubeTrailer(Trailer):
def geturl(self):
return "http://www.youtube.com/watch?v={0}".format(self.source)
def __repr__(self):
# modified BASE64 encoding, no need to worry about unicode
return u"<{0.__class__.__name__} '{0.name}'>".format(self)
class AppleTrailer(Element):
name = Datapoint('name')
sources = Datadict('sources', handler=Trailer, attr='size')
def sizes(self):
return self.sources.keys()
def geturl(self, size=None):
if size is None:
# sort assuming ###p format for now, take largest resolution
size = str(sorted(
[int(size[:-1]) for size in self.sources]
)[-1]) + 'p'
return self.sources[size].source
def __repr__(self):
return u"<{0.__class__.__name__} '{0.name}'>".format(self)
class Translation(Element):
name = Datapoint('name')
language = Datapoint('iso_639_1')
englishname = Datapoint('english_name')
def __repr__(self):
return u"<{0.__class__.__name__} '{0.name}' ({0.language})>"\
.format(self).encode('utf-8')
class Genre(NameRepr, Element):
id = Datapoint('id')
name = Datapoint('name')
def _populate_movies(self):
return Request('genre/{0}/movies'.format(self.id), \
language=self._locale.language)
@property
def movies(self):
if 'movies' not in self._data:
search = MovieSearchResult(self._populate_movies(), \
locale=self._locale)
search._name = "{0.name} Movies".format(self)
self._data['movies'] = search
return self._data['movies']
@classmethod
def getAll(cls, locale=None):
class GenreList(Element):
genres = Datalist('genres', handler=Genre)
def _populate(self):
return Request('genre/list', language=self._locale.language)
return GenreList(locale=locale).genres
class Studio(NameRepr, Element):
id = Datapoint('id', initarg=1)
name = Datapoint('name')
description = Datapoint('description')
headquarters = Datapoint('headquarters')
logo = Datapoint('logo_path', handler=Logo, raw=False, default=None)
# FIXME: manage not-yet-defined handlers in a way that will propogate
# locale information properly
parent = Datapoint('parent_company', handler=lambda x: Studio(raw=x))
def _populate(self):
return Request('company/{0}'.format(self.id))
def _populate_movies(self):
return Request('company/{0}/movies'.format(self.id),
language=self._locale.language)
# FIXME: add a cleaner way of adding types with no additional processing
@property
def movies(self):
if 'movies' not in self._data:
search = MovieSearchResult(self._populate_movies(),
locale=self._locale)
search._name = "{0.name} Movies".format(self)
self._data['movies'] = search
return self._data['movies']
class Country(NameRepr, Element):
code = Datapoint('iso_3166_1')
name = Datapoint('name')
class Language(NameRepr, Element):
code = Datapoint('iso_639_1')
name = Datapoint('name')
class Movie(Element):
@classmethod
def latest(cls):
req = Request('latest/movie')
req.lifetime = 600
return cls(raw=req.readJSON())
@classmethod
def nowplaying(cls, locale=None):
res = MovieSearchResult(Request('movie/now-playing'), locale=locale)
res._name = 'Now Playing'
return res
@classmethod
def mostpopular(cls, locale=None):
res = MovieSearchResult(Request('movie/popular'), locale=locale)
res._name = 'Popular'
return res
@classmethod
def toprated(cls, locale=None):
res = MovieSearchResult(Request('movie/top_rated'), locale=locale)
res._name = 'Top Rated'
return res
@classmethod
def upcoming(cls, locale=None):
res = MovieSearchResult(Request('movie/upcoming'), locale=locale)
res._name = 'Upcoming'
return res
@classmethod
def favorites(cls, session=None):
if session is None:
session = get_session()
account = Account(session=session)
res = MovieSearchResult(
Request('account/{0}/favorite_movies'.format(account.id),
session_id=session.sessionid))
res._name = "Favorites"
return res
@classmethod
def ratedmovies(cls, session=None):
if session is None:
session = get_session()
account = Account(session=session)
res = MovieSearchResult(
Request('account/{0}/rated_movies'.format(account.id),
session_id=session.sessionid))
res._name = "Movies You Rated"
return res
@classmethod
def watchlist(cls, session=None):
if session is None:
session = get_session()
account = Account(session=session)
res = MovieSearchResult(
Request('account/{0}/movie_watchlist'.format(account.id),
session_id=session.sessionid))
res._name = "Movies You're Watching"
return res
@classmethod
def fromIMDB(cls, imdbid, locale=None):
try:
# assume string
if not imdbid.startswith('tt'):
imdbid = "tt{0:0>7}".format(imdbid)
except AttributeError:
# assume integer
imdbid = "tt{0:0>7}".format(imdbid)
if locale is None:
locale = get_locale()
movie = cls(imdbid, locale=locale)
movie._populate()
return movie
id = Datapoint('id', initarg=1)
title = Datapoint('title')
originaltitle = Datapoint('original_title')
tagline = Datapoint('tagline')
overview = Datapoint('overview')
runtime = Datapoint('runtime')
budget = Datapoint('budget')
revenue = Datapoint('revenue')
releasedate = Datapoint('release_date', handler=process_date)
homepage = Datapoint('homepage')
imdb = Datapoint('imdb_id')
backdrop = Datapoint('backdrop_path', handler=Backdrop,
raw=False, default=None)
poster = Datapoint('poster_path', handler=Poster,
raw=False, default=None)
popularity = Datapoint('popularity')
userrating = Datapoint('vote_average')
votes = Datapoint('vote_count')
adult = Datapoint('adult')
collection = Datapoint('belongs_to_collection', handler=lambda x: \
Collection(raw=x))
genres = Datalist('genres', handler=Genre)
studios = Datalist('production_companies', handler=Studio)
countries = Datalist('production_countries', handler=Country)
languages = Datalist('spoken_languages', handler=Language)
def _populate(self):
return Request('movie/{0}'.format(self.id), \
language=self._locale.language)
def _populate_titles(self):
kwargs = {}
if not self._locale.fallthrough:
kwargs['country'] = self._locale.country
return Request('movie/{0}/alternative_titles'.format(self.id),
**kwargs)
def _populate_cast(self):
return Request('movie/{0}/casts'.format(self.id))
def _populate_images(self):
kwargs = {}
if not self._locale.fallthrough:
kwargs['language'] = self._locale.language
return Request('movie/{0}/images'.format(self.id), **kwargs)
def _populate_keywords(self):
return Request('movie/{0}/keywords'.format(self.id))
def _populate_releases(self):
return Request('movie/{0}/releases'.format(self.id))
def _populate_trailers(self):
return Request('movie/{0}/trailers'.format(self.id),
language=self._locale.language)
def _populate_translations(self):
return Request('movie/{0}/translations'.format(self.id))
alternate_titles = Datalist('titles', handler=AlternateTitle, \
poller=_populate_titles, sort=True)
# FIXME: this data point will need to be changed to 'credits' at some point
cast = Datalist('cast', handler=Cast,
poller=_populate_cast, sort='order')
crew = Datalist('crew', handler=Crew, poller=_populate_cast)
backdrops = Datalist('backdrops', handler=Backdrop,
poller=_populate_images, sort=True)
posters = Datalist('posters', handler=Poster,
poller=_populate_images, sort=True)
keywords = Datalist('keywords', handler=Keyword,
poller=_populate_keywords)
releases = Datadict('countries', handler=Release,
poller=_populate_releases, attr='country')
youtube_trailers = Datalist('youtube', handler=YoutubeTrailer,
poller=_populate_trailers)
apple_trailers = Datalist('quicktime', handler=AppleTrailer,
poller=_populate_trailers)
translations = Datalist('translations', handler=Translation,
poller=_populate_translations)
def setFavorite(self, value):
req = Request('account/{0}/favorite'.format(
Account(session=self._session).id),
session_id=self._session.sessionid)
req.add_data({'movie_id': self.id,
'favorite': str(bool(value)).lower()})
req.lifetime = 0
req.readJSON()
def setRating(self, value):
if not (0 <= value <= 10):
raise TMDBError("Ratings must be between '0' and '10'.")
req = Request('movie/{0}/rating'.format(self.id),
session_id=self._session.sessionid)
req.lifetime = 0
req.add_data({'value':value})
req.readJSON()
def setWatchlist(self, value):
req = Request('account/{0}/movie_watchlist'.format(
Account(session=self._session).id),
session_id=self._session.sessionid)
req.lifetime = 0
req.add_data({'movie_id': self.id,
'movie_watchlist': str(bool(value)).lower()})
req.readJSON()
def getSimilar(self):
return self.similar
@property
def similar(self):
res = MovieSearchResult(Request(
'movie/{0}/similar_movies'.format(self.id)),
locale=self._locale)
res._name = 'Similar to {0}'.format(self._printable_name())
return res
@property
def lists(self):
res = ListSearchResult(Request('movie/{0}/lists'.format(self.id)))
res._name = "Lists containing {0}".format(self._printable_name())
return res
def _printable_name(self):
if self.title is not None:
s = u"'{0}'".format(self.title)
elif self.originaltitle is not None:
s = u"'{0}'".format(self.originaltitle)
else:
s = u"'No Title'"
if self.releasedate:
s = u"{0} ({1})".format(s, self.releasedate.year)
return s
def __repr__(self):
return u"<{0} {1}>".format(self.__class__.__name__,
self._printable_name()).encode('utf-8')
class ReverseCast( Movie ):
character = Datapoint('character')
def __repr__(self):
return (u"<{0.__class__.__name__} '{0.character}' on {1}>"
.format(self, self._printable_name()).encode('utf-8'))
class ReverseCrew( Movie ):
department = Datapoint('department')
job = Datapoint('job')
def __repr__(self):
return (u"<{0.__class__.__name__} '{0.job}' for {1}>"
.format(self, self._printable_name()).encode('utf-8'))
class Collection(NameRepr, Element):
id = Datapoint('id', initarg=1)
name = Datapoint('name')
backdrop = Datapoint('backdrop_path', handler=Backdrop, \
raw=False, default=None)
poster = Datapoint('poster_path', handler=Poster, raw=False, default=None)
members = Datalist('parts', handler=Movie)
overview = Datapoint('overview')
def _populate(self):
return Request('collection/{0}'.format(self.id),
language=self._locale.language)
def _populate_images(self):
kwargs = {}
if not self._locale.fallthrough:
kwargs['language'] = self._locale.language
return Request('collection/{0}/images'.format(self.id), **kwargs)
backdrops = Datalist('backdrops', handler=Backdrop,
poller=_populate_images, sort=True)
posters = Datalist('posters', handler=Poster,
poller=_populate_images, sort=True)
class List(NameRepr, Element):
id = Datapoint('id', initarg=1)
name = Datapoint('name')
author = Datapoint('created_by')
description = Datapoint('description')
favorites = Datapoint('favorite_count')
language = Datapoint('iso_639_1')
count = Datapoint('item_count')
poster = Datapoint('poster_path', handler=Poster, raw=False, default=None)
members = Datalist('items', handler=Movie)
def _populate(self):
return Request('list/{0}'.format(self.id))
class Network(NameRepr,Element):
id = Datapoint('id', initarg=1)
name = Datapoint('name')
class Episode(NameRepr, Element):
episode_number = Datapoint('episode_number', initarg=3)
season_number = Datapoint('season_number', initarg=2)
series_id = Datapoint('series_id', initarg=1)
air_date = Datapoint('air_date', handler=process_date)
overview = Datapoint('overview')
name = Datapoint('name')
userrating = Datapoint('vote_average')
votes = Datapoint('vote_count')
id = Datapoint('id')
production_code = Datapoint('production_code')
still = Datapoint('still_path', handler=Backdrop, raw=False, default=None)
def _populate(self):
return Request('tv/{0}/season/{1}/episode/{2}'.format(self.series_id, self.season_number, self.episode_number),
language=self._locale.language)
def _populate_cast(self):
return Request('tv/{0}/season/{1}/episode/{2}/credits'.format(
self.series_id, self.season_number, self.episode_number),
language=self._locale.language)
def _populate_external_ids(self):
return Request('tv/{0}/season/{1}/episode/{2}/external_ids'.format(
self.series_id, self.season_number, self.episode_number))
def _populate_images(self):
kwargs = {}
if not self._locale.fallthrough:
kwargs['language'] = self._locale.language
return Request('tv/{0}/season/{1}/episode/{2}/images'.format(
self.series_id, self.season_number, self.episode_number), **kwargs)
cast = Datalist('cast', handler=Cast,
poller=_populate_cast, sort='order')
guest_stars = Datalist('guest_stars', handler=Cast,
poller=_populate_cast, sort='order')
crew = Datalist('crew', handler=Crew, poller=_populate_cast)
imdb_id = Datapoint('imdb_id', poller=_populate_external_ids)
freebase_id = Datapoint('freebase_id', poller=_populate_external_ids)
freebase_mid = Datapoint('freebase_mid', poller=_populate_external_ids)
tvdb_id = Datapoint('tvdb_id', poller=_populate_external_ids)
tvrage_id = Datapoint('tvrage_id', poller=_populate_external_ids)
stills = Datalist('stills', handler=Backdrop, poller=_populate_images, sort=True)
class Season(NameRepr, Element):
season_number = Datapoint('season_number', initarg=2)
series_id = Datapoint('series_id', initarg=1)
id = Datapoint('id')
air_date = Datapoint('air_date', handler=process_date)
poster = Datapoint('poster_path', handler=Poster, raw=False, default=None)
overview = Datapoint('overview')
name = Datapoint('name')
episodes = Datadict('episodes', attr='episode_number', handler=Episode,
passthrough={'series_id': 'series_id', 'season_number': 'season_number'})
def _populate(self):
return Request('tv/{0}/season/{1}'.format(self.series_id, self.season_number),
language=self._locale.language)
def _populate_images(self):
kwargs = {}
if not self._locale.fallthrough:
kwargs['language'] = self._locale.language
return Request('tv/{0}/season/{1}/images'.format(self.series_id, self.season_number), **kwargs)
def _populate_external_ids(self):
return Request('tv/{0}/season/{1}/external_ids'.format(self.series_id, self.season_number))
posters = Datalist('posters', handler=Poster,
poller=_populate_images, sort=True)
freebase_id = Datapoint('freebase_id', poller=_populate_external_ids)
freebase_mid = Datapoint('freebase_mid', poller=_populate_external_ids)
tvdb_id = Datapoint('tvdb_id', poller=_populate_external_ids)
tvrage_id = Datapoint('tvrage_id', poller=_populate_external_ids)
class Series(NameRepr, Element):
id = Datapoint('id', initarg=1)
backdrop = Datapoint('backdrop_path', handler=Backdrop, raw=False, default=None)
authors = Datalist('created_by', handler=Person)
episode_run_times = Datalist('episode_run_time')
first_air_date = Datapoint('first_air_date', handler=process_date)
last_air_date = Datapoint('last_air_date', handler=process_date)
genres = Datalist('genres', handler=Genre)
homepage = Datapoint('homepage')
in_production = Datapoint('in_production')
languages = Datalist('languages')
origin_countries = Datalist('origin_country')
name = Datapoint('name')
original_name = Datapoint('original_name')
number_of_episodes = Datapoint('number_of_episodes')
number_of_seasons = Datapoint('number_of_seasons')
overview = Datapoint('overview')
popularity = Datapoint('popularity')
status = Datapoint('status')
userrating = Datapoint('vote_average')
votes = Datapoint('vote_count')
poster = Datapoint('poster_path', handler=Poster, raw=False, default=None)
networks = Datalist('networks', handler=Network)
seasons = Datadict('seasons', attr='season_number', handler=Season, passthrough={'id': 'series_id'})
def _populate(self):
return Request('tv/{0}'.format(self.id),
language=self._locale.language)
def _populate_cast(self):
return Request('tv/{0}/credits'.format(self.id))
def _populate_images(self):
kwargs = {}
if not self._locale.fallthrough:
kwargs['language'] = self._locale.language
return Request('tv/{0}/images'.format(self.id), **kwargs)
def _populate_external_ids(self):
return Request('tv/{0}/external_ids'.format(self.id))
cast = Datalist('cast', handler=Cast,
poller=_populate_cast, sort='order')
crew = Datalist('crew', handler=Crew, poller=_populate_cast)
backdrops = Datalist('backdrops', handler=Backdrop,
poller=_populate_images, sort=True)
posters = Datalist('posters', handler=Poster,
poller=_populate_images, sort=True)
imdb_id = Datapoint('imdb_id', poller=_populate_external_ids)
freebase_id = Datapoint('freebase_id', poller=_populate_external_ids)
freebase_mid = Datapoint('freebase_mid', poller=_populate_external_ids)
tvdb_id = Datapoint('tvdb_id', poller=_populate_external_ids)
tvrage_id = Datapoint('tvrage_id', poller=_populate_external_ids)
| gpl-3.0 |
awalls-cx18/gnuradio | gr-vocoder/python/vocoder/cvsd.py | 6 | 3765 | #!/usr/bin/env python
#
# Copyright 2007,2011 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
from __future__ import division
from __future__ import unicode_literals
from gnuradio import gr, filter, blocks
from . import vocoder_swig
class cvsd_encode_fb(gr.hier_block2):
'''
This is a wrapper for the CVSD encoder that performs interpolation and filtering
necessary to work with the vocoding. It converts an incoming float (+-1) to a short, scales
it (to 32000; slightly below the maximum value), interpolates it, and then vocodes it.
The incoming sampling rate can be anything, though, of course, the higher the sampling rate and the
higher the interpolation rate are, the better the sound quality.
'''
def __init__(self, resample=8, bw=0.5):
'''
When using the CVSD vocoder, appropriate sampling rates are from 8k to 64k with resampling rates
from 1 to 8. A rate of 8k with a resampling rate of 8 provides a good quality signal.
'''
gr.hier_block2.__init__(self, "cvsd_encode",
gr.io_signature(1, 1, gr.sizeof_float), # Input signature
gr.io_signature(1, 1, gr.sizeof_char)) # Output signature
scale_factor = 32000.0
self.interp = resample
src_scale = blocks.multiply_const_ff(scale_factor)
taps = filter.firdes.low_pass(self.interp, self.interp, bw, 2*bw)
interp = filter.interp_fir_filter_fff(self.interp, taps)
f2s = blocks.float_to_short()
enc = vocoder_swig.cvsd_encode_sb()
self.connect(self, src_scale, interp, f2s, enc, self)
class cvsd_decode_bf(gr.hier_block2):
'''
This is a wrapper for the CVSD decoder that performs decimation and filtering
necessary to work with the vocoding. It converts an incoming CVSD-encoded short to a float, decodes it
to a float, decimates it, and scales it (by 32000; slightly below the maximum value to avoid clipping).
The sampling rate can be anything, though, of course, the higher the sampling rate and the
higher the interpolation rate are, the better the sound quality.
'''
def __init__(self, resample=8, bw=0.5):
'''
When using the CVSD vocoder, appropriate sampling rates are from 8k to 64k with resampling rates
from 1 to 8. A rate of 8k with a resampling rate of 8 provides a good quality signal.
'''
gr.hier_block2.__init__(self, "cvsd_decode",
gr.io_signature(1, 1, gr.sizeof_char), # Input signature
gr.io_signature(1, 1, gr.sizeof_float)) # Output signature
scale_factor = 32000.0
self.decim = resample
dec = vocoder_swig.cvsd_decode_bs()
s2f = blocks.short_to_float()
taps = filter.firdes.low_pass(1, 1, bw, 2*bw)
decim = filter.fir_filter_fff(self.decim, taps)
sink_scale = blocks.multiply_const_ff(1.0 / scale_factor)
self.connect(self, dec, s2f, decim, sink_scale, self)
| gpl-3.0 |
tverlaan/ansible-modules-core | source_control/git.py | 47 | 30762 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2012, Michael DeHaan <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: git
author:
- "Ansible Core Team"
- "Michael DeHaan"
version_added: "0.0.1"
short_description: Deploy software (or files) from git checkouts
description:
- Manage I(git) checkouts of repositories to deploy files or software.
options:
repo:
required: true
aliases: [ name ]
description:
- git, SSH, or HTTP protocol address of the git repository.
dest:
required: true
description:
- Absolute path of where the repository should be checked out to.
This parameter is required, unless C(clone) is set to C(no)
This change was made in version 1.8.3. Prior to this version,
the C(dest) parameter was always required.
version:
required: false
default: "HEAD"
description:
- What version of the repository to check out. This can be the
full 40-character I(SHA-1) hash, the literal string C(HEAD), a
branch name, or a tag name.
accept_hostkey:
required: false
default: "no"
choices: [ "yes", "no" ]
version_added: "1.5"
description:
- if C(yes), adds the hostkey for the repo url if not already
added. If ssh_args contains "-o StrictHostKeyChecking=no",
this parameter is ignored.
ssh_opts:
required: false
default: None
version_added: "1.5"
description:
- Creates a wrapper script and exports the path as GIT_SSH
which git then automatically uses to override ssh arguments.
An example value could be "-o StrictHostKeyChecking=no"
key_file:
required: false
default: None
version_added: "1.5"
description:
- Specify an optional private key file to use for the checkout.
reference:
required: false
default: null
version_added: "1.4"
description:
- Reference repository (see "git clone --reference ...")
remote:
required: false
default: "origin"
description:
- Name of the remote.
refspec:
required: false
default: null
version_added: "1.9"
description:
- Add an additional refspec to be fetched.
If version is set to a I(SHA-1) not reachable from any branch
or tag, this option may be necessary to specify the ref containing
the I(SHA-1).
Uses the same syntax as the 'git fetch' command.
An example value could be "refs/meta/config".
force:
required: false
default: "no"
choices: [ "yes", "no" ]
version_added: "0.7"
description:
- If C(yes), any modified files in the working
repository will be discarded. Prior to 0.7, this was always
'yes' and could not be disabled. Prior to 1.9, the default was
`yes`
depth:
required: false
default: null
version_added: "1.2"
description:
- Create a shallow clone with a history truncated to the specified
number or revisions. The minimum possible value is C(1), otherwise
ignored.
clone:
required: false
default: "yes"
choices: [ "yes", "no" ]
version_added: "1.9"
description:
- If C(no), do not clone the repository if it does not exist locally
update:
required: false
default: "yes"
choices: [ "yes", "no" ]
version_added: "1.2"
description:
- If C(no), do not retrieve new revisions from the origin repository
executable:
required: false
default: null
version_added: "1.4"
description:
- Path to git executable to use. If not supplied,
the normal mechanism for resolving binary paths will be used.
bare:
required: false
default: "no"
choices: [ "yes", "no" ]
version_added: "1.4"
description:
- if C(yes), repository will be created as a bare repo, otherwise
it will be a standard repo with a workspace.
recursive:
required: false
default: "yes"
choices: [ "yes", "no" ]
version_added: "1.6"
description:
- if C(no), repository will be cloned without the --recursive
option, skipping sub-modules.
track_submodules:
required: false
default: "no"
choices: ["yes", "no"]
version_added: "1.8"
description:
- if C(yes), submodules will track the latest commit on their
master branch (or other branch specified in .gitmodules). If
C(no), submodules will be kept at the revision specified by the
main project. This is equivalent to specifying the --remote flag
to git submodule update.
verify_commit:
required: false
default: "no"
choices: ["yes", "no"]
version_added: "2.0"
description:
- if C(yes), when cloning or checking out a C(version) verify the
signature of a GPG signed commit. This requires C(git) version>=2.1.0
to be installed. The commit MUST be signed and the public key MUST
be trusted in the GPG trustdb.
requirements:
- git (the command line tool)
notes:
- "If the task seems to be hanging, first verify remote host is in C(known_hosts).
SSH will prompt user to authorize the first contact with a remote host. To avoid this prompt,
one solution is to add the remote host public key in C(/etc/ssh/ssh_known_hosts) before calling
the git module, with the following command: ssh-keyscan -H remote_host.com >> /etc/ssh/ssh_known_hosts."
'''
EXAMPLES = '''
# Example git checkout from Ansible Playbooks
- git: repo=git://foosball.example.org/path/to/repo.git
dest=/srv/checkout
version=release-0.22
# Example read-write git checkout from github
- git: repo=ssh://[email protected]/mylogin/hello.git dest=/home/mylogin/hello
# Example just ensuring the repo checkout exists
- git: repo=git://foosball.example.org/path/to/repo.git dest=/srv/checkout update=no
# Example just get information about the repository whether or not it has
# already been cloned locally.
- git: repo=git://foosball.example.org/path/to/repo.git dest=/srv/checkout clone=no update=no
# Example checkout a github repo and use refspec to fetch all pull requests
- git: repo=https://github.com/ansible/ansible-examples.git dest=/src/ansible-examples refspec=+refs/pull/*:refs/heads/*
'''
import re
import tempfile
def get_submodule_update_params(module, git_path, cwd):
#or: git submodule [--quiet] update [--init] [-N|--no-fetch]
#[-f|--force] [--rebase] [--reference <repository>] [--merge]
#[--recursive] [--] [<path>...]
params = []
# run a bad submodule command to get valid params
cmd = "%s submodule update --help" % (git_path)
rc, stdout, stderr = module.run_command(cmd, cwd=cwd)
lines = stderr.split('\n')
update_line = None
for line in lines:
if 'git submodule [--quiet] update ' in line:
update_line = line
if update_line:
update_line = update_line.replace('[','')
update_line = update_line.replace(']','')
update_line = update_line.replace('|',' ')
parts = shlex.split(update_line)
for part in parts:
if part.startswith('--'):
part = part.replace('--', '')
params.append(part)
return params
def write_ssh_wrapper():
module_dir = get_module_path()
try:
# make sure we have full permission to the module_dir, which
# may not be the case if we're sudo'ing to a non-root user
if os.access(module_dir, os.W_OK|os.R_OK|os.X_OK):
fd, wrapper_path = tempfile.mkstemp(prefix=module_dir + '/')
else:
raise OSError
except (IOError, OSError):
fd, wrapper_path = tempfile.mkstemp()
fh = os.fdopen(fd, 'w+b')
template = """#!/bin/sh
if [ -z "$GIT_SSH_OPTS" ]; then
BASEOPTS=""
else
BASEOPTS=$GIT_SSH_OPTS
fi
if [ -z "$GIT_KEY" ]; then
ssh $BASEOPTS "$@"
else
ssh -i "$GIT_KEY" $BASEOPTS "$@"
fi
"""
fh.write(template)
fh.close()
st = os.stat(wrapper_path)
os.chmod(wrapper_path, st.st_mode | stat.S_IEXEC)
return wrapper_path
def set_git_ssh(ssh_wrapper, key_file, ssh_opts):
if os.environ.get("GIT_SSH"):
del os.environ["GIT_SSH"]
os.environ["GIT_SSH"] = ssh_wrapper
if os.environ.get("GIT_KEY"):
del os.environ["GIT_KEY"]
if key_file:
os.environ["GIT_KEY"] = key_file
if os.environ.get("GIT_SSH_OPTS"):
del os.environ["GIT_SSH_OPTS"]
if ssh_opts:
os.environ["GIT_SSH_OPTS"] = ssh_opts
def get_version(module, git_path, dest, ref="HEAD"):
''' samples the version of the git repo '''
cmd = "%s rev-parse %s" % (git_path, ref)
rc, stdout, stderr = module.run_command(cmd, cwd=dest)
sha = stdout.rstrip('\n')
return sha
def get_submodule_versions(git_path, module, dest, version='HEAD'):
cmd = [git_path, 'submodule', 'foreach', git_path, 'rev-parse', version]
(rc, out, err) = module.run_command(cmd, cwd=dest)
if rc != 0:
module.fail_json(msg='Unable to determine hashes of submodules')
submodules = {}
subm_name = None
for line in out.splitlines():
if line.startswith("Entering '"):
subm_name = line[10:-1]
elif len(line.strip()) == 40:
if subm_name is None:
module.fail_json()
submodules[subm_name] = line.strip()
subm_name = None
else:
module.fail_json(msg='Unable to parse submodule hash line: %s' % line.strip())
if subm_name is not None:
module.fail_json(msg='Unable to find hash for submodule: %s' % subm_name)
return submodules
def clone(git_path, module, repo, dest, remote, depth, version, bare,
reference, refspec, verify_commit):
''' makes a new git repo if it does not already exist '''
dest_dirname = os.path.dirname(dest)
try:
os.makedirs(dest_dirname)
except:
pass
cmd = [ git_path, 'clone' ]
if bare:
cmd.append('--bare')
else:
cmd.extend([ '--origin', remote ])
if is_remote_branch(git_path, module, dest, repo, version) \
or is_remote_tag(git_path, module, dest, repo, version):
cmd.extend([ '--branch', version ])
if depth:
cmd.extend([ '--depth', str(depth) ])
if reference:
cmd.extend([ '--reference', str(reference) ])
cmd.extend([ repo, dest ])
module.run_command(cmd, check_rc=True, cwd=dest_dirname)
if bare:
if remote != 'origin':
module.run_command([git_path, 'remote', 'add', remote, repo], check_rc=True, cwd=dest)
if refspec:
module.run_command([git_path, 'fetch', remote, refspec], check_rc=True, cwd=dest)
if verify_commit:
verify_commit_sign(git_path, module, dest, version)
def has_local_mods(module, git_path, dest, bare):
if bare:
return False
cmd = "%s status -s" % (git_path)
rc, stdout, stderr = module.run_command(cmd, cwd=dest)
lines = stdout.splitlines()
lines = filter(lambda c: not re.search('^\\?\\?.*$', c), lines)
return len(lines) > 0
def reset(git_path, module, dest):
'''
Resets the index and working tree to HEAD.
Discards any changes to tracked files in working
tree since that commit.
'''
cmd = "%s reset --hard HEAD" % (git_path,)
return module.run_command(cmd, check_rc=True, cwd=dest)
def get_remote_head(git_path, module, dest, version, remote, bare):
cloning = False
cwd = None
tag = False
if remote == module.params['repo']:
cloning = True
else:
cwd = dest
if version == 'HEAD':
if cloning:
# cloning the repo, just get the remote's HEAD version
cmd = '%s ls-remote %s -h HEAD' % (git_path, remote)
else:
head_branch = get_head_branch(git_path, module, dest, remote, bare)
cmd = '%s ls-remote %s -h refs/heads/%s' % (git_path, remote, head_branch)
elif is_remote_branch(git_path, module, dest, remote, version):
cmd = '%s ls-remote %s -h refs/heads/%s' % (git_path, remote, version)
elif is_remote_tag(git_path, module, dest, remote, version):
tag = True
cmd = '%s ls-remote %s -t refs/tags/%s*' % (git_path, remote, version)
else:
# appears to be a sha1. return as-is since it appears
# cannot check for a specific sha1 on remote
return version
(rc, out, err) = module.run_command(cmd, check_rc=True, cwd=cwd)
if len(out) < 1:
module.fail_json(msg="Could not determine remote revision for %s" % version)
if tag:
# Find the dereferenced tag if this is an annotated tag.
for tag in out.split('\n'):
if tag.endswith(version + '^{}'):
out = tag
break
elif tag.endswith(version):
out = tag
rev = out.split()[0]
return rev
def is_remote_tag(git_path, module, dest, remote, version):
cmd = '%s ls-remote %s -t refs/tags/%s' % (git_path, remote, version)
(rc, out, err) = module.run_command(cmd, check_rc=True, cwd=dest)
if version in out:
return True
else:
return False
def get_branches(git_path, module, dest):
branches = []
cmd = '%s branch -a' % (git_path,)
(rc, out, err) = module.run_command(cmd, cwd=dest)
if rc != 0:
module.fail_json(msg="Could not determine branch data - received %s" % out)
for line in out.split('\n'):
branches.append(line.strip())
return branches
def get_tags(git_path, module, dest):
tags = []
cmd = '%s tag' % (git_path,)
(rc, out, err) = module.run_command(cmd, cwd=dest)
if rc != 0:
module.fail_json(msg="Could not determine tag data - received %s" % out)
for line in out.split('\n'):
tags.append(line.strip())
return tags
def is_remote_branch(git_path, module, dest, remote, version):
cmd = '%s ls-remote %s -h refs/heads/%s' % (git_path, remote, version)
(rc, out, err) = module.run_command(cmd, check_rc=True, cwd=dest)
if version in out:
return True
else:
return False
def is_local_branch(git_path, module, dest, branch):
branches = get_branches(git_path, module, dest)
lbranch = '%s' % branch
if lbranch in branches:
return True
elif '* %s' % branch in branches:
return True
else:
return False
def is_not_a_branch(git_path, module, dest):
branches = get_branches(git_path, module, dest)
for b in branches:
if b.startswith('* ') and 'no branch' in b:
return True
return False
def get_head_branch(git_path, module, dest, remote, bare=False):
'''
Determine what branch HEAD is associated with. This is partly
taken from lib/ansible/utils/__init__.py. It finds the correct
path to .git/HEAD and reads from that file the branch that HEAD is
associated with. In the case of a detached HEAD, this will look
up the branch in .git/refs/remotes/<remote>/HEAD.
'''
if bare:
repo_path = dest
else:
repo_path = os.path.join(dest, '.git')
# Check if the .git is a file. If it is a file, it means that we are in a submodule structure.
if os.path.isfile(repo_path):
try:
gitdir = yaml.safe_load(open(repo_path)).get('gitdir')
# There is a posibility the .git file to have an absolute path.
if os.path.isabs(gitdir):
repo_path = gitdir
else:
repo_path = os.path.join(repo_path.split('.git')[0], gitdir)
except (IOError, AttributeError):
return ''
# Read .git/HEAD for the name of the branch.
# If we're in a detached HEAD state, look up the branch associated with
# the remote HEAD in .git/refs/remotes/<remote>/HEAD
f = open(os.path.join(repo_path, "HEAD"))
if is_not_a_branch(git_path, module, dest):
f.close()
f = open(os.path.join(repo_path, 'refs', 'remotes', remote, 'HEAD'))
branch = f.readline().split('/')[-1].rstrip("\n")
f.close()
return branch
def set_remote_url(git_path, module, repo, dest, remote):
''' updates repo from remote sources '''
commands = [("set a new url %s for %s" % (repo, remote), [git_path, 'remote', 'set-url', remote, repo])]
for (label,command) in commands:
(rc,out,err) = module.run_command(command, cwd=dest)
if rc != 0:
module.fail_json(msg="Failed to %s: %s %s" % (label, out, err))
def fetch(git_path, module, repo, dest, version, remote, bare, refspec):
''' updates repo from remote sources '''
set_remote_url(git_path, module, repo, dest, remote)
commands = []
fetch_str = 'download remote objects and refs'
if bare:
refspecs = ['+refs/heads/*:refs/heads/*', '+refs/tags/*:refs/tags/*']
if refspec:
refspecs.append(refspec)
commands.append((fetch_str, [git_path, 'fetch', remote] + refspecs))
else:
# unlike in bare mode, there's no way to combine the
# additional refspec with the default git fetch behavior,
# so use two commands
commands.append((fetch_str, [git_path, 'fetch', remote]))
refspecs = ['+refs/tags/*:refs/tags/*']
if refspec:
refspecs.append(refspec)
commands.append((fetch_str, [git_path, 'fetch', remote] + refspecs))
for (label,command) in commands:
(rc,out,err) = module.run_command(command, cwd=dest)
if rc != 0:
module.fail_json(msg="Failed to %s: %s %s" % (label, out, err))
def submodules_fetch(git_path, module, remote, track_submodules, dest):
changed = False
if not os.path.exists(os.path.join(dest, '.gitmodules')):
# no submodules
return changed
gitmodules_file = open(os.path.join(dest, '.gitmodules'), 'r')
for line in gitmodules_file:
# Check for new submodules
if not changed and line.strip().startswith('path'):
path = line.split('=', 1)[1].strip()
# Check that dest/path/.git exists
if not os.path.exists(os.path.join(dest, path, '.git')):
changed = True
# add the submodule repo's hostkey
if line.strip().startswith('url'):
repo = line.split('=', 1)[1].strip()
if module.params['ssh_opts'] is not None:
if not "-o StrictHostKeyChecking=no" in module.params['ssh_opts']:
add_git_host_key(module, repo, accept_hostkey=module.params['accept_hostkey'])
else:
add_git_host_key(module, repo, accept_hostkey=module.params['accept_hostkey'])
# Check for updates to existing modules
if not changed:
# Fetch updates
begin = get_submodule_versions(git_path, module, dest)
cmd = [git_path, 'submodule', 'foreach', git_path, 'fetch']
(rc, out, err) = module.run_command(cmd, check_rc=True, cwd=dest)
if rc != 0:
module.fail_json(msg="Failed to fetch submodules: %s" % out + err)
if track_submodules:
# Compare against submodule HEAD
### FIXME: determine this from .gitmodules
version = 'master'
after = get_submodule_versions(git_path, module, dest, '%s/%s'
% (remote, version))
if begin != after:
changed = True
else:
# Compare against the superproject's expectation
cmd = [git_path, 'submodule', 'status']
(rc, out, err) = module.run_command(cmd, check_rc=True, cwd=dest)
if rc != 0:
module.fail_json(msg='Failed to retrieve submodule status: %s' % out + err)
for line in out.splitlines():
if line[0] != ' ':
changed = True
break
return changed
def submodule_update(git_path, module, dest, track_submodules):
''' init and update any submodules '''
# get the valid submodule params
params = get_submodule_update_params(module, git_path, dest)
# skip submodule commands if .gitmodules is not present
if not os.path.exists(os.path.join(dest, '.gitmodules')):
return (0, '', '')
cmd = [ git_path, 'submodule', 'sync' ]
(rc, out, err) = module.run_command(cmd, check_rc=True, cwd=dest)
if 'remote' in params and track_submodules:
cmd = [ git_path, 'submodule', 'update', '--init', '--recursive' ,'--remote' ]
else:
cmd = [ git_path, 'submodule', 'update', '--init', '--recursive' ]
(rc, out, err) = module.run_command(cmd, cwd=dest)
if rc != 0:
module.fail_json(msg="Failed to init/update submodules: %s" % out + err)
return (rc, out, err)
def switch_version(git_path, module, dest, remote, version, verify_commit):
cmd = ''
if version != 'HEAD':
if is_remote_branch(git_path, module, dest, remote, version):
if not is_local_branch(git_path, module, dest, version):
cmd = "%s checkout --track -b %s %s/%s" % (git_path, version, remote, version)
else:
(rc, out, err) = module.run_command("%s checkout --force %s" % (git_path, version), cwd=dest)
if rc != 0:
module.fail_json(msg="Failed to checkout branch %s" % version)
cmd = "%s reset --hard %s/%s" % (git_path, remote, version)
else:
cmd = "%s checkout --force %s" % (git_path, version)
else:
branch = get_head_branch(git_path, module, dest, remote)
(rc, out, err) = module.run_command("%s checkout --force %s" % (git_path, branch), cwd=dest)
if rc != 0:
module.fail_json(msg="Failed to checkout branch %s" % branch)
cmd = "%s reset --hard %s" % (git_path, remote)
(rc, out1, err1) = module.run_command(cmd, cwd=dest)
if rc != 0:
if version != 'HEAD':
module.fail_json(msg="Failed to checkout %s" % (version))
else:
module.fail_json(msg="Failed to checkout branch %s" % (branch))
if verify_commit:
verify_commit_sign(git_path, module, dest, version)
return (rc, out1, err1)
def verify_commit_sign(git_path, module, dest, version):
cmd = "%s verify-commit %s" % (git_path, version)
(rc, out, err) = module.run_command(cmd, cwd=dest)
if rc != 0:
module.fail_json(msg='Failed to verify GPG signature of commit/tag "%s"' % version)
return (rc, out, err)
# ===========================================
def main():
module = AnsibleModule(
argument_spec = dict(
dest=dict(),
repo=dict(required=True, aliases=['name']),
version=dict(default='HEAD'),
remote=dict(default='origin'),
refspec=dict(default=None),
reference=dict(default=None),
force=dict(default='no', type='bool'),
depth=dict(default=None, type='int'),
clone=dict(default='yes', type='bool'),
update=dict(default='yes', type='bool'),
verify_commit=dict(default='no', type='bool'),
accept_hostkey=dict(default='no', type='bool'),
key_file=dict(default=None, required=False),
ssh_opts=dict(default=None, required=False),
executable=dict(default=None),
bare=dict(default='no', type='bool'),
recursive=dict(default='yes', type='bool'),
track_submodules=dict(default='no', type='bool'),
),
supports_check_mode=True
)
dest = module.params['dest']
repo = module.params['repo']
version = module.params['version']
remote = module.params['remote']
refspec = module.params['refspec']
force = module.params['force']
depth = module.params['depth']
update = module.params['update']
allow_clone = module.params['clone']
bare = module.params['bare']
verify_commit = module.params['verify_commit']
reference = module.params['reference']
git_path = module.params['executable'] or module.get_bin_path('git', True)
key_file = module.params['key_file']
ssh_opts = module.params['ssh_opts']
gitconfig = None
if not dest and allow_clone:
module.fail_json(msg="the destination directory must be specified unless clone=no")
elif dest:
dest = os.path.abspath(os.path.expanduser(dest))
if bare:
gitconfig = os.path.join(dest, 'config')
else:
gitconfig = os.path.join(dest, '.git', 'config')
# make sure the key_file path is expanded for ~ and $HOME
if key_file is not None:
key_file = os.path.abspath(os.path.expanduser(key_file))
# create a wrapper script and export
# GIT_SSH=<path> as an environment variable
# for git to use the wrapper script
ssh_wrapper = None
if key_file or ssh_opts:
ssh_wrapper = write_ssh_wrapper()
set_git_ssh(ssh_wrapper, key_file, ssh_opts)
module.add_cleanup_file(path=ssh_wrapper)
# add the git repo's hostkey
if module.params['ssh_opts'] is not None:
if not "-o StrictHostKeyChecking=no" in module.params['ssh_opts']:
add_git_host_key(module, repo, accept_hostkey=module.params['accept_hostkey'])
else:
add_git_host_key(module, repo, accept_hostkey=module.params['accept_hostkey'])
recursive = module.params['recursive']
track_submodules = module.params['track_submodules']
rc, out, err, status = (0, None, None, None)
before = None
local_mods = False
repo_updated = None
if (dest and not os.path.exists(gitconfig)) or (not dest and not allow_clone):
# if there is no git configuration, do a clone operation unless:
# * the user requested no clone (they just want info)
# * we're doing a check mode test
# In those cases we do an ls-remote
if module.check_mode or not allow_clone:
remote_head = get_remote_head(git_path, module, dest, version, repo, bare)
module.exit_json(changed=True, before=before, after=remote_head)
# there's no git config, so clone
clone(git_path, module, repo, dest, remote, depth, version, bare, reference, refspec, verify_commit)
repo_updated = True
elif not update:
# Just return having found a repo already in the dest path
# this does no checking that the repo is the actual repo
# requested.
before = get_version(module, git_path, dest)
module.exit_json(changed=False, before=before, after=before)
else:
# else do a pull
local_mods = has_local_mods(module, git_path, dest, bare)
before = get_version(module, git_path, dest)
if local_mods:
# failure should happen regardless of check mode
if not force:
module.fail_json(msg="Local modifications exist in repository (force=no).")
# if force and in non-check mode, do a reset
if not module.check_mode:
reset(git_path, module, dest)
# exit if already at desired sha version
set_remote_url(git_path, module, repo, dest, remote)
remote_head = get_remote_head(git_path, module, dest, version, remote, bare)
if before == remote_head:
if local_mods:
module.exit_json(changed=True, before=before, after=remote_head,
msg="Local modifications exist")
elif is_remote_tag(git_path, module, dest, repo, version):
# if the remote is a tag and we have the tag locally, exit early
if version in get_tags(git_path, module, dest):
repo_updated = False
else:
repo_updated = False
if repo_updated is None:
if module.check_mode:
module.exit_json(changed=True, before=before, after=remote_head)
fetch(git_path, module, repo, dest, version, remote, bare, refspec)
repo_updated = True
# switch to version specified regardless of whether
# we got new revisions from the repository
if not bare:
switch_version(git_path, module, dest, remote, version, verify_commit)
# Deal with submodules
submodules_updated = False
if recursive and not bare:
submodules_updated = submodules_fetch(git_path, module, remote, track_submodules, dest)
if module.check_mode:
if submodules_updated:
module.exit_json(changed=True, before=before, after=remote_head, submodules_changed=True)
else:
module.exit_json(changed=False, before=before, after=remote_head)
if submodules_updated:
# Switch to version specified
submodule_update(git_path, module, dest, track_submodules)
# determine if we changed anything
after = get_version(module, git_path, dest)
changed = False
if before != after or local_mods or submodules_updated:
changed = True
# cleanup the wrapper script
if ssh_wrapper:
try:
os.remove(ssh_wrapper)
except OSError:
# No need to fail if the file already doesn't exist
pass
module.exit_json(changed=changed, before=before, after=after)
# import module snippets
from ansible.module_utils.basic import *
from ansible.module_utils.known_hosts import *
main()
| gpl-3.0 |
MichaelMraka/hawkey | tests/python/tests/test_advisory.py | 3 | 3068 | #
# Copyright (C) 2014 Red Hat, Inc.
#
# Licensed under the GNU Lesser General Public License Version 2.1
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
"""Tests of the _hawkey.Advisory class."""
from __future__ import absolute_import
from __future__ import unicode_literals
from . import base
import datetime
import hawkey
import itertools
import time
def find_advisory(sack, id_):
"""Find an advisory with given ID."""
# The function is needed because advisories cannot be retrieved directly.
advisories_iterable = itertools.chain(
(pkg.get_advisories(hawkey.LT) for pkg in hawkey.Query(sack)),
(pkg.get_advisories(hawkey.GT | hawkey.EQ) for pkg in hawkey.Query(sack)))
for advisory in itertools.chain.from_iterable(advisories_iterable):
if advisory.id == id_:
return advisory
class Test(base.TestCase):
"""Test case consisting of one random advisory."""
def setUp(self):
"""Prepare the test fixture."""
sack = base.TestSack(repo_dir=self.repo_dir)
sack.load_yum_repo(load_updateinfo=True)
self.advisory = find_advisory(sack, 'FEDORA-2008-9969')
def test_description(self):
self.assertEqual(self.advisory.description, 'An example update to the tour package.')
def test_packages(self):
filenames = [apkg.filename for apkg in self.advisory.packages]
self.assertEqual(filenames, ['tour.noarch.rpm'])
def test_filenames(self):
self.assertEqual(self.advisory.filenames, ['tour.noarch.rpm'])
def test_id(self):
self.assertEqual(self.advisory.id, 'FEDORA-2008-9969')
def test_references(self):
urls = [ref.url for ref in self.advisory.references]
self.assertEqual(urls,
['https://bugzilla.redhat.com/show_bug.cgi?id=472090',
'https://bugzilla.gnome.com/show_bug.cgi?id=472091'])
def test_rights(self):
self.assertIsNone(self.advisory.rights)
def test_title(self):
self.assertEqual(self.advisory.title, 'lvm2-2.02.39-7.fc10')
def test_type(self):
self.assertEqual(self.advisory.type, hawkey.ADVISORY_BUGFIX)
def test_updated(self):
self.assertEqual(self.advisory.updated,
datetime.datetime(2008, 12, 9, 11, 31, 26) -
datetime.timedelta(seconds=time.timezone))
| lgpl-2.1 |
jag1g13/lammps | tools/i-pi/ipi/inputs/prng.py | 33 | 4326 | """Deals with creating the random number generator.
Copyright (C) 2013, Joshua More and Michele Ceriotti
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http.//www.gnu.org/licenses/>.
Generates a random number generator either from a seed number, or from a
state vector.
Classes:
InputRandom: Deals with creating the Random object from a file, and
writing the checkpoints.
"""
__all__ = ['InputRandom']
import numpy as np
from ipi.utils.prng import *
from ipi.utils.inputvalue import *
class InputRandom(Input):
"""Random input class.
Handles generating the appropriate random number class from the xml
input file, and generating the xml checkpoint tags and data from an
instance of the object.
Attributes:
seed: An optional integer giving a seed to initialise the random number
generator from. Defaults to 123456.
state: An optional array giving the state of the random number generator.
Defaults to an empty array.
has_gauss: An optional integer giving whether there is a stored
Gaussian number or not. Defaults to 0.
gauss: An optional float giving the stored Gaussian number. Defaults to
0.0.
set_pos: An optional integer giving the position in the state array
that is being read from. Defaults to 0.
"""
fields = {"seed" : (InputValue, {"dtype" : int,
"default" : 123456,
"help" : "This is the seed number used to generate the initial state of the random number generator."}),
"state" : (InputArray, {"dtype" : np.uint,
"default" : input_default(factory=np.zeros, kwargs={'shape': (0,), 'dtype': np.uint}),
"help" : "Gives the state vector for the random number generator. Avoid directly modifying this unless you are very familiar with the inner workings of the algorithm used."}),
"has_gauss" : (InputValue, {"dtype" : int,
"default" : 0,
"help" : "Determines whether there is a stored gaussian number or not. A value of 0 means there is none stored."}),
"gauss" : (InputValue, {"dtype" : float,
"default" : 0.00,
"help" : "The stored Gaussian number." }),
"set_pos" : (InputValue, {"dtype" : int,
"default" : 0,
"help" : "Gives the position in the state array that the random number generator is reading from."})}
default_help = "Deals with the pseudo-random number generator."
default_label = "PRNG"
def store(self, prng):
"""Takes a random number instance and stores a minimal
representation of it.
Args:
prng: A random number object from which to initialise from.
"""
super(InputRandom,self).store(prng)
self.seed.store(prng.seed)
gstate = prng.state
self.state.store(gstate[1])
self.set_pos.store(gstate[2])
self.has_gauss.store(gstate[3])
self.gauss.store(gstate[4])
def fetch(self):
"""Creates a random number object.
Returns:
An random number object of the appropriate type and with the
appropriate properties given the attributes of the InputRandom
object.
"""
super(InputRandom,self).fetch()
if not self.state._explicit:
return Random(seed=self.seed.fetch())
else:
return Random(state=('MT19937',self.state.fetch(), self.set_pos.fetch(), self.has_gauss.fetch(), self.gauss.fetch() ))
| gpl-2.0 |
eunchong/build | scripts/slave/bot_update.py | 1 | 62497 | #!/usr/bin/env python
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# TODO(hinoka): Use logging.
import cStringIO
import codecs
import collections
import copy
import ctypes
import json
import optparse
import os
import pprint
import random
import re
import socket
import subprocess
import sys
import tempfile
import threading
import time
import urllib2
import urlparse
import uuid
import os.path as path
# How many bytes at a time to read from pipes.
BUF_SIZE = 256
# Define a bunch of directory paths.
# Relative to the current working directory.
CURRENT_DIR = path.abspath(os.getcwd())
BUILDER_DIR = path.dirname(CURRENT_DIR)
SLAVE_DIR = path.dirname(BUILDER_DIR)
# Relative to this script's filesystem path.
THIS_DIR = path.dirname(path.abspath(__file__))
SCRIPTS_DIR = path.dirname(THIS_DIR)
BUILD_DIR = path.dirname(SCRIPTS_DIR)
ROOT_DIR = path.dirname(BUILD_DIR)
DEPOT_TOOLS_DIR = path.join(ROOT_DIR, 'depot_tools')
# TODO(luqui): This is a horrible hack to identify build_internal when build
# is a recipe dependency. bot_update should not be depending on internal,
# rather the arrow should go the other way (or just be destroyed).
def check_build_internal(d):
d = path.abspath(d)
if path.basename(d) == 'build_internal' and path.isdir(d):
return d
else:
return None
BUILD_INTERNAL_DIR = (
check_build_internal(path.join(ROOT_DIR, 'build_internal')) or
check_build_internal(path.join(ROOT_DIR, # .recipe_deps
path.pardir, # slave
path.pardir, # scripts
path.pardir))) # build_internal
CHROMIUM_GIT_HOST = 'https://chromium.googlesource.com'
CHROMIUM_SRC_URL = CHROMIUM_GIT_HOST + '/chromium/src.git'
# Official builds use buildspecs, so this is a special case.
BUILDSPEC_TYPE = collections.namedtuple('buildspec',
('container', 'version'))
BUILDSPEC_RE = (r'^/chrome-internal/trunk/tools/buildspec/'
'(build|branches|releases)/(.+)$')
GIT_BUILDSPEC_PATH = ('https://chrome-internal.googlesource.com/chrome/tools/'
'buildspec')
BRANCH_HEADS_REFSPEC = '+refs/branch-heads/*'
BUILDSPEC_COMMIT_RE = (
re.compile(r'Buildspec for.*version (\d+\.\d+\.\d+\.\d+)'),
re.compile(r'Create (\d+\.\d+\.\d+\.\d+) buildspec'),
re.compile(r'Auto-converted (\d+\.\d+\.\d+\.\d+) buildspec to git'),
)
# Regular expression that matches a single commit footer line.
COMMIT_FOOTER_ENTRY_RE = re.compile(r'([^:]+):\s+(.+)')
# Footer metadata keys for regular and gsubtreed mirrored commit positions.
COMMIT_POSITION_FOOTER_KEY = 'Cr-Commit-Position'
COMMIT_ORIGINAL_POSITION_FOOTER_KEY = 'Cr-Original-Commit-Position'
# Regular expression to parse a commit position
COMMIT_POSITION_RE = re.compile(r'(.+)@\{#(\d+)\}')
# Regular expression to parse gclient's revinfo entries.
REVINFO_RE = re.compile(r'^([^:]+):\s+([^@]+)@(.+)$')
# Used by 'ResolveSvnRevisionFromGitiles'
GIT_SVN_PROJECT_MAP = {
'webkit': {
'svn_url': 'svn://svn.chromium.org/blink',
'branch_map': [
(r'trunk', r'refs/heads/master'),
(r'branches/([^/]+)', r'refs/branch-heads/\1'),
],
},
'v8': {
'svn_url': 'https://v8.googlecode.com/svn',
'branch_map': [
(r'trunk', r'refs/heads/candidates'),
(r'branches/bleeding_edge', r'refs/heads/master'),
(r'branches/([^/]+)', r'refs/branch-heads/\1'),
],
},
'nacl': {
'svn_url': 'svn://svn.chromium.org/native_client',
'branch_map': [
(r'trunk/src/native_client', r'refs/heads/master'),
],
},
}
# Key for the 'git-svn' ID metadata commit footer entry.
GIT_SVN_ID_FOOTER_KEY = 'git-svn-id'
# e.g., git-svn-id: https://v8.googlecode.com/svn/trunk@23117
# ce2b1a6d-e550-0410-aec6-3dcde31c8c00
GIT_SVN_ID_RE = re.compile(r'((?:\w+)://[^@]+)@(\d+)\s+(?:[a-zA-Z0-9\-]+)')
# This is the git mirror of the buildspecs repository. We could rely on the svn
# checkout, now that the git buildspecs are checked in alongside the svn
# buildspecs, but we're going to want to pull all the buildspecs from here
# eventually anyhow, and there's already some logic to pull from git (for the
# old git_buildspecs.git repo), so just stick with that.
GIT_BUILDSPEC_REPO = (
'https://chrome-internal.googlesource.com/chrome/tools/buildspec')
# Copied from scripts/recipes/chromium.py.
GOT_REVISION_MAPPINGS = {
'/chrome/trunk/src': {
'src/': 'got_revision',
'src/native_client/': 'got_nacl_revision',
'src/tools/swarm_client/': 'got_swarm_client_revision',
'src/tools/swarming_client/': 'got_swarming_client_revision',
'src/third_party/WebKit/': 'got_webkit_revision',
'src/third_party/webrtc/': 'got_webrtc_revision',
'src/v8/': 'got_v8_revision',
}
}
BOT_UPDATE_MESSAGE = """
What is the "Bot Update" step?
==============================
This step ensures that the source checkout on the bot (e.g. Chromium's src/ and
its dependencies) is checked out in a consistent state. This means that all of
the necessary repositories are checked out, no extra repositories are checked
out, and no locally modified files are present.
These actions used to be taken care of by the "gclient revert" and "update"
steps. However, those steps are known to be buggy and occasionally flaky. This
step has two main advantages over them:
* it only operates in Git, so the logic can be clearer and cleaner; and
* it is a slave-side script, so its behavior can be modified without
restarting the master.
Why Git, you ask? Because that is the direction that the Chromium project is
heading. This step is an integral part of the transition from using the SVN repo
at chrome/trunk/src to using the Git repo src.git. Please pardon the dust while
we fully convert everything to Git. This message will get out of your way
eventually, and the waterfall will be a happier place because of it.
This step can be activated or deactivated independently on every builder on
every master. When it is active, the "gclient revert" and "update" steps become
no-ops. When it is inactive, it prints this message, cleans up after itself, and
lets everything else continue as though nothing has changed. Eventually, when
everything is stable enough, this step will replace them entirely.
Debugging information:
(master/builder/slave may be unspecified on recipes)
master: %(master)s
builder: %(builder)s
slave: %(slave)s
forced by recipes: %(recipe)s
CURRENT_DIR: %(CURRENT_DIR)s
BUILDER_DIR: %(BUILDER_DIR)s
SLAVE_DIR: %(SLAVE_DIR)s
THIS_DIR: %(THIS_DIR)s
SCRIPTS_DIR: %(SCRIPTS_DIR)s
BUILD_DIR: %(BUILD_DIR)s
ROOT_DIR: %(ROOT_DIR)s
DEPOT_TOOLS_DIR: %(DEPOT_TOOLS_DIR)s
bot_update.py is:"""
ACTIVATED_MESSAGE = """ACTIVE.
The bot will perform a Git checkout in this step.
The "gclient revert" and "update" steps are no-ops.
"""
NOT_ACTIVATED_MESSAGE = """INACTIVE.
This step does nothing. You actually want to look at the "update" step.
"""
GCLIENT_TEMPLATE = """solutions = %(solutions)s
cache_dir = r%(cache_dir)s
%(target_os)s
%(target_os_only)s
"""
internal_data = {}
if BUILD_INTERNAL_DIR:
local_vars = {}
try:
execfile(os.path.join(
BUILD_INTERNAL_DIR, 'scripts', 'slave', 'bot_update_cfg.py'),
local_vars)
except Exception:
# Same as if BUILD_INTERNAL_DIR didn't exist in the first place.
print 'Warning: unable to read internal configuration file.'
print 'If this is an internal bot, this step may be erroneously inactive.'
internal_data = local_vars
RECOGNIZED_PATHS = {
# If SVN path matches key, the entire URL is rewritten to the Git url.
'/chrome/trunk/src':
CHROMIUM_SRC_URL,
'/chrome/trunk/src/tools/cros.DEPS':
CHROMIUM_GIT_HOST + '/chromium/src/tools/cros.DEPS.git',
}
RECOGNIZED_PATHS.update(internal_data.get('RECOGNIZED_PATHS', {}))
ENABLED_MASTERS = [
'bot_update.always_on',
'chromium.android',
'chromium.angle',
'chromium.chrome',
'chromium.chromedriver',
'chromium.chromiumos',
'chromium',
'chromium.fyi',
'chromium.goma',
'chromium.gpu',
'chromium.gpu.fyi',
'chromium.infra',
'chromium.infra.cron',
'chromium.linux',
'chromium.lkgr',
'chromium.mac',
'chromium.memory',
'chromium.memory.fyi',
'chromium.perf',
'chromium.perf.fyi',
'chromium.swarm',
'chromium.webkit',
'chromium.webrtc',
'chromium.webrtc.fyi',
'chromium.win',
'client.catapult',
'client.drmemory',
'client.mojo',
'client.nacl',
'client.nacl.ports',
'client.nacl.sdk',
'client.nacl.toolchain',
'client.pdfium',
'client.skia',
'client.skia.fyi',
'client.v8',
'client.v8.branches',
'client.v8.fyi',
'client.v8.ports',
'client.webrtc',
'client.webrtc.fyi',
'tryserver.blink',
'tryserver.client.catapult',
'tryserver.client.mojo',
'tryserver.chromium.android',
'tryserver.chromium.angle',
'tryserver.chromium.linux',
'tryserver.chromium.mac',
'tryserver.chromium.perf',
'tryserver.chromium.win',
'tryserver.infra',
'tryserver.nacl',
'tryserver.v8',
'tryserver.webrtc',
]
ENABLED_MASTERS += internal_data.get('ENABLED_MASTERS', [])
ENABLED_BUILDERS = {
'client.dart.fyi': [
'v8-linux-release',
'v8-mac-release',
'v8-win-release',
],
'client.dynamorio': [
'linux-v8-dr',
],
}
ENABLED_BUILDERS.update(internal_data.get('ENABLED_BUILDERS', {}))
ENABLED_SLAVES = {}
ENABLED_SLAVES.update(internal_data.get('ENABLED_SLAVES', {}))
# Disabled filters get run AFTER enabled filters, so for example if a builder
# config is enabled, but a bot on that builder is disabled, that bot will
# be disabled.
DISABLED_BUILDERS = {}
DISABLED_BUILDERS.update(internal_data.get('DISABLED_BUILDERS', {}))
DISABLED_SLAVES = {}
DISABLED_SLAVES.update(internal_data.get('DISABLED_SLAVES', {}))
# These masters work only in Git, meaning for got_revision, always output
# a git hash rather than a SVN rev.
GIT_MASTERS = [
'client.v8',
'client.v8.branches',
'client.v8.ports',
'tryserver.v8',
]
GIT_MASTERS += internal_data.get('GIT_MASTERS', [])
# How many times to try before giving up.
ATTEMPTS = 5
# Find deps2git
DEPS2GIT_DIR_PATH = path.join(SCRIPTS_DIR, 'tools', 'deps2git')
DEPS2GIT_PATH = path.join(DEPS2GIT_DIR_PATH, 'deps2git.py')
S2G_INTERNAL_PATH = path.join(SCRIPTS_DIR, 'tools', 'deps2git_internal',
'svn_to_git_internal.py')
# ../../cache_dir aka /b/build/slave/cache_dir
GIT_CACHE_PATH = path.join(DEPOT_TOOLS_DIR, 'git_cache.py')
CACHE_DIR = path.join(SLAVE_DIR, 'cache_dir')
# Because we print CACHE_DIR out into a .gclient file, and then later run
# eval() on it, backslashes need to be escaped, otherwise "E:\b\build" gets
# parsed as "E:[\x08][\x08]uild".
if sys.platform.startswith('win'):
CACHE_DIR = CACHE_DIR.replace('\\', '\\\\')
# Find the patch tool.
if sys.platform.startswith('win'):
if not BUILD_INTERNAL_DIR:
print 'Warning: could not find patch tool because there is no '
print 'build_internal present.'
PATCH_TOOL = None
else:
PATCH_TOOL = path.join(BUILD_INTERNAL_DIR, 'tools', 'patch.EXE')
else:
PATCH_TOOL = '/usr/bin/patch'
# If there is less than 100GB of disk space on the system, then we do
# a shallow checkout.
SHALLOW_CLONE_THRESHOLD = 100 * 1024 * 1024 * 1024
class SubprocessFailed(Exception):
def __init__(self, message, code, output):
Exception.__init__(self, message)
self.code = code
self.output = output
class PatchFailed(SubprocessFailed):
pass
class GclientSyncFailed(SubprocessFailed):
pass
class SVNRevisionNotFound(Exception):
pass
class InvalidDiff(Exception):
pass
class Inactive(Exception):
"""Not really an exception, just used to exit early cleanly."""
pass
RETRY = object()
OK = object()
FAIL = object()
class PsPrinter(object):
def __init__(self, interval=300):
self.interval = interval
self.active = sys.platform.startswith('linux2')
self.thread = None
@staticmethod
def print_pstree():
"""Debugging function used to print "ps auxwwf" for stuck processes."""
subprocess.call(['ps', 'auxwwf'])
def poke(self):
if self.active:
self.cancel()
self.thread = threading.Timer(self.interval, self.print_pstree)
self.thread.start()
def cancel(self):
if self.active and self.thread is not None:
self.thread.cancel()
self.thread = None
def call(*args, **kwargs): # pragma: no cover
"""Interactive subprocess call."""
kwargs['stdout'] = subprocess.PIPE
kwargs['stderr'] = subprocess.STDOUT
kwargs.setdefault('bufsize', BUF_SIZE)
cwd = kwargs.get('cwd', os.getcwd())
result_fn = kwargs.pop('result_fn', lambda code, out: RETRY if code else OK)
stdin_data = kwargs.pop('stdin_data', None)
tries = kwargs.pop('tries', ATTEMPTS)
if stdin_data:
kwargs['stdin'] = subprocess.PIPE
out = cStringIO.StringIO()
new_env = kwargs.get('env', {})
env = copy.copy(os.environ)
env.update(new_env)
kwargs['env'] = env
attempt = 0
for attempt in range(1, tries + 1):
attempt_msg = ' (attempt #%d)' % attempt if attempt else ''
if new_env:
print '===Injecting Environment Variables==='
for k, v in sorted(new_env.items()):
print '%s: %s' % (k, v)
print '===Running %s%s===' % (' '.join(args), attempt_msg)
print 'In directory: %s' % cwd
start_time = time.time()
proc = subprocess.Popen(args, **kwargs)
if stdin_data:
proc.stdin.write(stdin_data)
proc.stdin.close()
psprinter = PsPrinter()
# This is here because passing 'sys.stdout' into stdout for proc will
# produce out of order output.
hanging_cr = False
while True:
psprinter.poke()
buf = proc.stdout.read(BUF_SIZE)
if not buf:
break
if hanging_cr:
buf = '\r' + buf
hanging_cr = buf.endswith('\r')
if hanging_cr:
buf = buf[:-1]
buf = buf.replace('\r\n', '\n').replace('\r', '\n')
sys.stdout.write(buf)
out.write(buf)
if hanging_cr:
sys.stdout.write('\n')
out.write('\n')
psprinter.cancel()
code = proc.wait()
elapsed_time = ((time.time() - start_time) / 60.0)
outval = out.getvalue()
result = result_fn(code, outval)
if result in (FAIL, RETRY):
print '===Failed in %.1f mins===' % elapsed_time
print
else:
print '===Succeeded in %.1f mins===' % elapsed_time
print
return outval
if result is FAIL:
break
if result is RETRY and attempt < tries:
sleep_backoff = 4 ** attempt
sleep_time = random.randint(sleep_backoff, int(sleep_backoff * 1.2))
print '===backing off, sleeping for %d secs===' % sleep_time
time.sleep(sleep_time)
raise SubprocessFailed('%s failed with code %d in %s after %d attempts.' %
(' '.join(args), code, cwd, attempt),
code, outval)
def git(*args, **kwargs): # pragma: no cover
"""Wrapper around call specifically for Git commands."""
if args and args[0] == 'cache':
# Rewrite "git cache" calls into "python git_cache.py".
cmd = (sys.executable, '-u', GIT_CACHE_PATH) + args[1:]
else:
git_executable = 'git'
# On windows, subprocess doesn't fuzzy-match 'git' to 'git.bat', so we
# have to do it explicitly. This is better than passing shell=True.
if sys.platform.startswith('win'):
git_executable += '.bat'
cmd = (git_executable,) + args
return call(*cmd, **kwargs)
def get_gclient_spec(solutions, target_os, target_os_only):
return GCLIENT_TEMPLATE % {
'solutions': pprint.pformat(solutions, indent=4),
'cache_dir': '"%s"' % CACHE_DIR,
'target_os': ('\ntarget_os=%s' % target_os) if target_os else '',
'target_os_only': '\ntarget_os_only=%s' % target_os_only
}
def check_enabled(master, builder, slave):
if master in ENABLED_MASTERS:
return True
builder_list = ENABLED_BUILDERS.get(master)
if builder_list and builder in builder_list:
return True
slave_list = ENABLED_SLAVES.get(master)
if slave_list and slave in slave_list:
return True
return False
def check_disabled(master, builder, slave):
"""Returns True if disabled, False if not disabled."""
builder_list = DISABLED_BUILDERS.get(master)
if builder_list and builder in builder_list:
return True
slave_list = DISABLED_SLAVES.get(master)
if slave_list and slave in slave_list:
return True
return False
def check_valid_host(master, builder, slave):
return (check_enabled(master, builder, slave)
and not check_disabled(master, builder, slave))
def maybe_ignore_revision(revision, buildspec):
"""Handle builders that don't care what buildbot tells them to build.
This is especially the case with branch builders that build from buildspecs
and/or trigger off multiple repositories, where the --revision passed in has
nothing to do with the solution being built. Clearing the revision in this
case causes bot_update to use HEAD rather that trying to checkout an
inappropriate version of the solution.
"""
if buildspec and buildspec.container == 'branches':
return []
return revision
def solutions_printer(solutions):
"""Prints gclient solution to stdout."""
print 'Gclient Solutions'
print '================='
for solution in solutions:
name = solution.get('name')
url = solution.get('url')
print '%s (%s)' % (name, url)
if solution.get('deps_file'):
print ' Dependencies file is %s' % solution['deps_file']
if 'managed' in solution:
print ' Managed mode is %s' % ('ON' if solution['managed'] else 'OFF')
custom_vars = solution.get('custom_vars')
if custom_vars:
print ' Custom Variables:'
for var_name, var_value in sorted(custom_vars.iteritems()):
print ' %s = %s' % (var_name, var_value)
custom_deps = solution.get('custom_deps')
if 'custom_deps' in solution:
print ' Custom Dependencies:'
for deps_name, deps_value in sorted(custom_deps.iteritems()):
if deps_value:
print ' %s -> %s' % (deps_name, deps_value)
else:
print ' %s: Ignore' % deps_name
for k, v in solution.iteritems():
# Print out all the keys we don't know about.
if k in ['name', 'url', 'deps_file', 'custom_vars', 'custom_deps',
'managed']:
continue
print ' %s is %s' % (k, v)
print
def solutions_to_git(input_solutions):
"""Modifies urls in solutions to point at Git repos.
returns: (git solution, svn root of first solution) tuple.
"""
assert input_solutions
solutions = copy.deepcopy(input_solutions)
first_solution = True
buildspec = None
for solution in solutions:
original_url = solution['url']
parsed_url = urlparse.urlparse(original_url)
parsed_path = parsed_url.path
# Rewrite SVN urls into Git urls.
buildspec_m = re.match(BUILDSPEC_RE, parsed_path)
if first_solution and buildspec_m:
solution['url'] = GIT_BUILDSPEC_PATH
buildspec = BUILDSPEC_TYPE(
container=buildspec_m.group(1),
version=buildspec_m.group(2),
)
solution['deps_file'] = path.join(buildspec.container, buildspec.version,
'DEPS')
elif parsed_path in RECOGNIZED_PATHS:
solution['url'] = RECOGNIZED_PATHS[parsed_path]
solution['deps_file'] = '.DEPS.git'
elif parsed_url.scheme == 'https' and 'googlesource' in parsed_url.netloc:
pass
else:
print 'Warning: %s' % ('path %r not recognized' % parsed_path,)
# Strip out deps containing $$V8_REV$$, etc.
if 'custom_deps' in solution:
new_custom_deps = {}
for deps_name, deps_value in solution['custom_deps'].iteritems():
if deps_value and '$$' in deps_value:
print 'Dropping %s:%s from custom deps' % (deps_name, deps_value)
else:
new_custom_deps[deps_name] = deps_value
solution['custom_deps'] = new_custom_deps
if first_solution:
root = parsed_path
first_solution = False
solution['managed'] = False
# We don't want gclient to be using a safesync URL. Instead it should
# using the lkgr/lkcr branch/tags.
if 'safesync_url' in solution:
print 'Removing safesync url %s from %s' % (solution['safesync_url'],
parsed_path)
del solution['safesync_url']
return solutions, root, buildspec
def remove(target):
"""Remove a target by moving it into build.dead."""
dead_folder = path.join(BUILDER_DIR, 'build.dead')
if not path.exists(dead_folder):
os.makedirs(dead_folder)
os.rename(target, path.join(dead_folder, uuid.uuid4().hex))
def ensure_no_checkout(dir_names, scm_dirname):
"""Ensure that there is no undesired checkout under build/.
If there is an incorrect checkout under build/, then
move build/ to build.dead/
This function will check each directory in dir_names.
scm_dirname is expected to be either ['.svn', '.git']
"""
assert scm_dirname in ['.svn', '.git', '*']
has_checkout = any(path.exists(path.join(os.getcwd(), dir_name, scm_dirname))
for dir_name in dir_names)
if has_checkout or scm_dirname == '*':
build_dir = os.getcwd()
prefix = ''
if scm_dirname != '*':
prefix = '%s detected in checkout, ' % scm_dirname
for filename in os.listdir(build_dir):
deletion_target = path.join(build_dir, filename)
print '%sdeleting %s...' % (prefix, deletion_target),
remove(deletion_target)
print 'done'
def gclient_configure(solutions, target_os, target_os_only):
"""Should do the same thing as gclient --spec='...'."""
with codecs.open('.gclient', mode='w', encoding='utf-8') as f:
f.write(get_gclient_spec(solutions, target_os, target_os_only))
def gclient_sync(with_branch_heads, shallow):
# We just need to allocate a filename.
fd, gclient_output_file = tempfile.mkstemp(suffix='.json')
os.close(fd)
gclient_bin = 'gclient.bat' if sys.platform.startswith('win') else 'gclient'
cmd = [gclient_bin, 'sync', '--verbose', '--reset', '--force',
'--ignore_locks', '--output-json', gclient_output_file,
'--nohooks', '--noprehooks', '--delete_unversioned_trees']
if with_branch_heads:
cmd += ['--with_branch_heads']
if shallow:
cmd += ['--shallow']
try:
call(*cmd, tries=1)
except SubprocessFailed as e:
# Throw a GclientSyncFailed exception so we can catch this independently.
raise GclientSyncFailed(e.message, e.code, e.output)
else:
with open(gclient_output_file) as f:
return json.load(f)
finally:
os.remove(gclient_output_file)
def gclient_runhooks(gyp_envs):
gclient_bin = 'gclient.bat' if sys.platform.startswith('win') else 'gclient'
env = dict([env_var.split('=', 1) for env_var in gyp_envs])
call(gclient_bin, 'runhooks', env=env)
def gclient_revinfo():
gclient_bin = 'gclient.bat' if sys.platform.startswith('win') else 'gclient'
return call(gclient_bin, 'revinfo', '-a') or ''
def create_manifest():
manifest = {}
output = gclient_revinfo()
for line in output.strip().splitlines():
match = REVINFO_RE.match(line.strip())
if match:
manifest[match.group(1)] = {
'repository': match.group(2),
'revision': match.group(3),
}
else:
print "WARNING: Couldn't match revinfo line:\n%s" % line
return manifest
def get_commit_message_footer_map(message):
"""Returns: (dict) A dictionary of commit message footer entries.
"""
footers = {}
# Extract the lines in the footer block.
lines = []
for line in message.strip().splitlines():
line = line.strip()
if len(line) == 0:
del lines[:]
continue
lines.append(line)
# Parse the footer
for line in lines:
m = COMMIT_FOOTER_ENTRY_RE.match(line)
if not m:
# If any single line isn't valid, the entire footer is invalid.
footers.clear()
return footers
footers[m.group(1)] = m.group(2).strip()
return footers
def get_commit_message_footer(message, key):
"""Returns: (str/None) The footer value for 'key', or None if none was found.
"""
return get_commit_message_footer_map(message).get(key)
def get_svn_rev(git_hash, dir_name):
log = git('log', '-1', git_hash, cwd=dir_name)
git_svn_id = get_commit_message_footer(log, GIT_SVN_ID_FOOTER_KEY)
if not git_svn_id:
return None
m = GIT_SVN_ID_RE.match(git_svn_id)
if not m:
return None
return int(m.group(2))
def get_git_hash(revision, branch, sln_dir):
"""We want to search for the SVN revision on the git-svn branch.
Note that git will search backwards from origin/master.
"""
match = "^%s: [^ ]*@%s " % (GIT_SVN_ID_FOOTER_KEY, revision)
ref = branch if branch.startswith('refs/') else 'origin/%s' % branch
cmd = ['log', '-E', '--grep', match, '--format=%H', '--max-count=1', ref]
result = git(*cmd, cwd=sln_dir).strip()
if result:
return result
raise SVNRevisionNotFound('We can\'t resolve svn r%s into a git hash in %s' %
(revision, sln_dir))
def _last_commit_for_file(filename, repo_base):
cmd = ['log', '--format=%H', '--max-count=1', '--', filename]
return git(*cmd, cwd=repo_base).strip()
def need_to_run_deps2git(repo_base, deps_file, deps_git_file):
"""Checks to see if we need to run deps2git.
Returns True if there was a DEPS change after the last .DEPS.git update
or if DEPS has local modifications.
"""
# See if DEPS is dirty
deps_file_status = git(
'status', '--porcelain', deps_file, cwd=repo_base).strip()
if deps_file_status and deps_file_status.startswith('M '):
return True
last_known_deps_ref = _last_commit_for_file(deps_file, repo_base)
last_known_deps_git_ref = _last_commit_for_file(deps_git_file, repo_base)
merge_base_ref = git('merge-base', last_known_deps_ref,
last_known_deps_git_ref, cwd=repo_base).strip()
# If the merge base of the last DEPS and last .DEPS.git file is not
# equivilent to the hash of the last DEPS file, that means the DEPS file
# was committed after the last .DEPS.git file.
return last_known_deps_ref != merge_base_ref
def ensure_deps2git(solution, shallow):
repo_base = path.join(os.getcwd(), solution['name'])
deps_file = path.join(repo_base, 'DEPS')
deps_git_file = path.join(repo_base, '.DEPS.git')
if (not git('ls-files', 'DEPS', cwd=repo_base).strip() or
not git('ls-files', '.DEPS.git', cwd=repo_base).strip()):
return
print 'Checking if %s is newer than %s' % (deps_file, deps_git_file)
if not need_to_run_deps2git(repo_base, deps_file, deps_git_file):
return
print '===DEPS file modified, need to run deps2git==='
cmd = [sys.executable, DEPS2GIT_PATH,
'--workspace', os.getcwd(),
'--cache_dir', CACHE_DIR,
'--deps', deps_file,
'--out', deps_git_file]
if 'chrome-internal.googlesource' in solution['url']:
cmd.extend(['--extra-rules', S2G_INTERNAL_PATH])
if shallow:
cmd.append('--shallow')
call(*cmd)
def emit_log_lines(name, lines):
for line in lines.splitlines():
print '@@@STEP_LOG_LINE@%s@%s@@@' % (name, line)
print '@@@STEP_LOG_END@%s@@@' % name
def emit_properties(properties):
for property_name, property_value in sorted(properties.items()):
print '@@@SET_BUILD_PROPERTY@%s@"%s"@@@' % (property_name, property_value)
# Derived from:
# http://code.activestate.com/recipes/577972-disk-usage/?in=user-4178764
def get_total_disk_space():
cwd = os.getcwd()
# Windows is the only platform that doesn't support os.statvfs, so
# we need to special case this.
if sys.platform.startswith('win'):
_, total, free = (ctypes.c_ulonglong(), ctypes.c_ulonglong(), \
ctypes.c_ulonglong())
if sys.version_info >= (3,) or isinstance(cwd, unicode):
fn = ctypes.windll.kernel32.GetDiskFreeSpaceExW
else:
fn = ctypes.windll.kernel32.GetDiskFreeSpaceExA
ret = fn(cwd, ctypes.byref(_), ctypes.byref(total), ctypes.byref(free))
if ret == 0:
# WinError() will fetch the last error code.
raise ctypes.WinError()
return (total.value, free.value)
else:
st = os.statvfs(cwd)
free = st.f_bavail * st.f_frsize
total = st.f_blocks * st.f_frsize
return (total, free)
def get_target_revision(folder_name, git_url, revisions):
normalized_name = folder_name.strip('/')
if normalized_name in revisions:
return revisions[normalized_name]
if git_url in revisions:
return revisions[git_url]
return None
def force_revision(folder_name, revision):
split_revision = revision.split(':', 1)
branch = 'master'
if len(split_revision) == 2:
# Support for "branch:revision" syntax.
branch, revision = split_revision
if revision and revision.upper() != 'HEAD':
if revision and revision.isdigit() and len(revision) < 40:
# rev_num is really a svn revision number, convert it into a git hash.
git_ref = get_git_hash(int(revision), branch, folder_name)
else:
# rev_num is actually a git hash or ref, we can just use it.
git_ref = revision
git('checkout', '--force', git_ref, cwd=folder_name)
else:
ref = branch if branch.startswith('refs/') else 'origin/%s' % branch
git('checkout', '--force', ref, cwd=folder_name)
def git_checkout(solutions, revisions, shallow, refs):
build_dir = os.getcwd()
# Before we do anything, break all git_cache locks.
if path.isdir(CACHE_DIR):
git('cache', 'unlock', '-vv', '--force', '--all', '--cache-dir', CACHE_DIR)
for item in os.listdir(CACHE_DIR):
filename = os.path.join(CACHE_DIR, item)
if item.endswith('.lock'):
raise Exception('%s exists after cache unlock' % filename)
first_solution = True
for sln in solutions:
# This is so we can loop back and try again if we need to wait for the
# git mirrors to update from SVN.
done = False
tries_left = 60
while not done:
name = sln['name']
url = sln['url']
if url == CHROMIUM_SRC_URL or url + '.git' == CHROMIUM_SRC_URL:
# Experiments show there's little to be gained from
# a shallow clone of src.
shallow = False
sln_dir = path.join(build_dir, name)
s = ['--shallow'] if shallow else []
populate_cmd = (['cache', 'populate', '--ignore_locks', '-v',
'--cache-dir', CACHE_DIR] + s + [url])
for ref in refs:
populate_cmd.extend(['--ref', ref])
git(*populate_cmd)
mirror_dir = git(
'cache', 'exists', '--quiet', '--cache-dir', CACHE_DIR, url).strip()
clone_cmd = (
'clone', '--no-checkout', '--local', '--shared', mirror_dir, sln_dir)
try:
if not path.isdir(sln_dir):
git(*clone_cmd)
else:
git('remote', 'set-url', 'origin', mirror_dir, cwd=sln_dir)
git('fetch', 'origin', cwd=sln_dir)
for ref in refs:
refspec = '%s:%s' % (ref, ref.lstrip('+'))
git('fetch', 'origin', refspec, cwd=sln_dir)
revision = get_target_revision(name, url, revisions) or 'HEAD'
force_revision(sln_dir, revision)
done = True
except SubprocessFailed as e:
# Exited abnormally, theres probably something wrong.
# Lets wipe the checkout and try again.
tries_left -= 1
if tries_left > 0:
print 'Something failed: %s.' % str(e)
print 'waiting 5 seconds and trying again...'
time.sleep(5)
else:
raise
remove(sln_dir)
except SVNRevisionNotFound:
tries_left -= 1
if tries_left > 0:
# If we don't have the correct revision, wait and try again.
print 'We can\'t find revision %s.' % revision
print 'The svn to git replicator is probably falling behind.'
print 'waiting 5 seconds and trying again...'
time.sleep(5)
else:
raise
git('clean', '-dff', cwd=sln_dir)
if first_solution:
git_ref = git('log', '--format=%H', '--max-count=1',
cwd=sln_dir).strip()
first_solution = False
return git_ref
def _download(url):
"""Fetch url and return content, with retries for flake."""
for attempt in xrange(ATTEMPTS):
try:
return urllib2.urlopen(url).read()
except Exception:
if attempt == ATTEMPTS - 1:
raise
def parse_diff(diff):
"""Takes a unified diff and returns a list of diffed files and their diffs.
The return format is a list of pairs of:
(<filename>, <diff contents>)
<diff contents> is inclusive of the diff line.
"""
result = []
current_diff = ''
current_header = None
for line in diff.splitlines():
# "diff" is for git style patches, and "Index: " is for SVN style patches.
if line.startswith('diff') or line.startswith('Index: '):
if current_header:
# If we are in a diff portion, then save the diff.
result.append((current_header, '%s\n' % current_diff))
git_header_match = re.match(r'diff (?:--git )?(\S+) (\S+)', line)
svn_header_match = re.match(r'Index: (.*)', line)
if git_header_match:
# First, see if its a git style header.
from_file = git_header_match.group(1)
to_file = git_header_match.group(2)
if from_file != to_file and from_file.startswith('a/'):
# Sometimes git prepends 'a/' and 'b/' in front of file paths.
from_file = from_file[2:]
current_header = from_file
elif svn_header_match:
# Otherwise, check if its an SVN style header.
current_header = svn_header_match.group(1)
else:
# Otherwise... I'm not really sure what to do with this.
raise InvalidDiff('Can\'t process header: %s\nFull diff:\n%s' %
(line, diff))
current_diff = ''
current_diff += '%s\n' % line
if current_header:
# We hit EOF, gotta save the last diff.
result.append((current_header, current_diff))
return result
def get_svn_patch(patch_url):
"""Fetch patch from patch_url, return list of (filename, diff)"""
svn_exe = 'svn.bat' if sys.platform.startswith('win') else 'svn'
patch_data = call(svn_exe, 'cat', patch_url)
return parse_diff(patch_data)
def apply_svn_patch(patch_root, patches, whitelist=None, blacklist=None):
"""Expects a list of (filename, diff), applies it on top of patch_root."""
if whitelist:
patches = [(name, diff) for name, diff in patches if name in whitelist]
elif blacklist:
patches = [(name, diff) for name, diff in patches if name not in blacklist]
diffs = [diff for _, diff in patches]
patch = ''.join(diffs)
if patch:
print '===Patching files==='
for filename, _ in patches:
print 'Patching %s' % filename
try:
call(PATCH_TOOL, '-p0', '--remove-empty-files', '--force', '--forward',
stdin_data=patch, cwd=patch_root, tries=1)
for filename, _ in patches:
full_filename = path.abspath(path.join(patch_root, filename))
git('add', full_filename, cwd=path.dirname(full_filename))
except SubprocessFailed as e:
raise PatchFailed(e.message, e.code, e.output)
def apply_rietveld_issue(issue, patchset, root, server, _rev_map, _revision,
email_file, key_file, whitelist=None, blacklist=None):
apply_issue_bin = ('apply_issue.bat' if sys.platform.startswith('win')
else 'apply_issue')
cmd = [apply_issue_bin,
# The patch will be applied on top of this directory.
'--root_dir', root,
# Tell apply_issue how to fetch the patch.
'--issue', issue,
'--server', server,
# Always run apply_issue.py, otherwise it would see update.flag
# and then bail out.
'--force',
# Don't run gclient sync when it sees a DEPS change.
'--ignore_deps',
# TODO(tandrii): remove after http://crbug.com/537417 is resolved.
# Temporary enable verbosity to see if Rietveld requests are actually
# retried.
'-v', '-v', # = logging.DEBUG level.
]
# Use an oauth key file if specified.
if email_file and key_file:
cmd.extend(['--email-file', email_file, '--private-key-file', key_file])
else:
cmd.append('--no-auth')
if patchset:
cmd.extend(['--patchset', patchset])
if whitelist:
for item in whitelist:
cmd.extend(['--whitelist', item])
elif blacklist:
for item in blacklist:
cmd.extend(['--blacklist', item])
# Only try once, since subsequent failures hide the real failure.
try:
call(*cmd, tries=1)
except SubprocessFailed as e:
raise PatchFailed(e.message, e.code, e.output)
def apply_gerrit_ref(gerrit_repo, gerrit_ref, root):
gerrit_repo = gerrit_repo or 'origin'
assert gerrit_ref
try:
base_rev = git('rev-parse', 'HEAD', cwd=root).strip()
git('retry', 'fetch', gerrit_repo, gerrit_ref, cwd=root, tries=1)
git('checkout', 'FETCH_HEAD', cwd=root)
git('reset', '--soft', base_rev, cwd=root)
except SubprocessFailed as e:
raise PatchFailed(e.message, e.code, e.output)
def check_flag(flag_file):
"""Returns True if the flag file is present."""
return os.path.isfile(flag_file)
def delete_flag(flag_file):
"""Remove bot update flag."""
if os.path.isfile(flag_file):
os.remove(flag_file)
def emit_flag(flag_file):
"""Deposit a bot update flag on the system to tell gclient not to run."""
print 'Emitting flag file at %s' % flag_file
with open(flag_file, 'wb') as f:
f.write('Success!')
def get_commit_position_for_git_svn(url, revision):
"""Generates a commit position string for a 'git-svn' URL/revision.
If the 'git-svn' URL maps to a known project, we will construct a commit
position branch value by applying substitution on the SVN URL.
"""
# Identify the base URL so we can strip off trunk/branch name
project_config = branch = None
for _, project_config in GIT_SVN_PROJECT_MAP.iteritems():
if url.startswith(project_config['svn_url']):
branch = url[len(project_config['svn_url']):]
break
if branch:
# Strip any leading slashes
branch = branch.lstrip('/')
# Try and map the branch
for pattern, repl in project_config.get('branch_map', ()):
nbranch, subn = re.subn(pattern, repl, branch, count=1)
if subn:
print 'INFO: Mapped SVN branch to Git branch [%s] => [%s]' % (
branch, nbranch)
branch = nbranch
break
else:
# Use generic 'svn' branch
print 'INFO: Could not resolve project for SVN URL %r' % (url,)
branch = 'svn'
return '%s@{#%s}' % (branch, revision)
def get_commit_position(git_path, revision='HEAD'):
"""Dumps the 'git' log for a specific revision and parses out the commit
position.
If a commit position metadata key is found, its value will be returned.
Otherwise, we will search for a 'git-svn' metadata entry. If one is found,
we will compose a commit position from it, using its SVN revision value as
the revision.
If the 'git-svn' URL maps to a known project, we will construct a commit
position branch value by truncating the URL, mapping 'trunk' to
"refs/heads/master". Otherwise, we will return the generic branch, 'svn'.
"""
git_log = git('log', '--format=%B', '-n1', revision, cwd=git_path)
footer_map = get_commit_message_footer_map(git_log)
# Search for commit position metadata
value = (footer_map.get(COMMIT_POSITION_FOOTER_KEY) or
footer_map.get(COMMIT_ORIGINAL_POSITION_FOOTER_KEY))
if value:
return value
# Compose a commit position from 'git-svn' metadata
value = footer_map.get(GIT_SVN_ID_FOOTER_KEY)
if value:
m = GIT_SVN_ID_RE.match(value)
if not m:
raise ValueError("Invalid 'git-svn' value: [%s]" % (value,))
return get_commit_position_for_git_svn(m.group(1), m.group(2))
return None
def parse_got_revision(gclient_output, got_revision_mapping, use_svn_revs):
"""Translate git gclient revision mapping to build properties.
If use_svn_revs is True, then translate git hashes in the revision mapping
to svn revision numbers.
"""
properties = {}
solutions_output = {
# Make sure path always ends with a single slash.
'%s/' % path.rstrip('/') : solution_output for path, solution_output
in gclient_output['solutions'].iteritems()
}
for dir_name, property_name in got_revision_mapping.iteritems():
# Make sure dir_name always ends with a single slash.
dir_name = '%s/' % dir_name.rstrip('/')
if dir_name not in solutions_output:
continue
solution_output = solutions_output[dir_name]
if solution_output.get('scm') is None:
# This is an ignored DEPS, so the output got_revision should be 'None'.
git_revision = revision = commit_position = None
else:
# Since we are using .DEPS.git, everything had better be git.
assert solution_output.get('scm') == 'git'
git_revision = git('rev-parse', 'HEAD', cwd=dir_name).strip()
if use_svn_revs:
revision = get_svn_rev(git_revision, dir_name)
if not revision:
revision = git_revision
else:
revision = git_revision
commit_position = get_commit_position(dir_name)
properties[property_name] = revision
if revision != git_revision:
properties['%s_git' % property_name] = git_revision
if commit_position:
properties['%s_cp' % property_name] = commit_position
return properties
def emit_json(out_file, did_run, gclient_output=None, **kwargs):
"""Write run information into a JSON file."""
output = {}
output.update(gclient_output if gclient_output else {})
output.update({'did_run': did_run})
output.update(kwargs)
with open(out_file, 'wb') as f:
f.write(json.dumps(output, sort_keys=True))
def ensure_deps_revisions(deps_url_mapping, solutions, revisions):
"""Ensure correct DEPS revisions, ignores solutions."""
for deps_name, deps_data in sorted(deps_url_mapping.items()):
if deps_name.strip('/') in solutions:
# This has already been forced to the correct solution by git_checkout().
continue
revision = get_target_revision(deps_name, deps_data.get('url', None),
revisions)
if not revision:
continue
# TODO(hinoka): Catch SVNRevisionNotFound error maybe?
git('fetch', 'origin', cwd=deps_name)
force_revision(deps_name, revision)
def ensure_checkout(solutions, revisions, first_sln, target_os, target_os_only,
patch_root, issue, patchset, patch_url, rietveld_server,
gerrit_repo, gerrit_ref, revision_mapping,
apply_issue_email_file, apply_issue_key_file, buildspec,
gyp_env, shallow, runhooks, refs):
# Get a checkout of each solution, without DEPS or hooks.
# Calling git directly because there is no way to run Gclient without
# invoking DEPS.
print 'Fetching Git checkout'
git_ref = git_checkout(solutions, revisions, shallow, refs)
patches = None
if patch_url:
patches = get_svn_patch(patch_url)
already_patched = []
patch_root = patch_root or ''
for solution in solutions:
if (patch_root == solution['name'] or
solution['name'].startswith(patch_root + '/')):
relative_root = solution['name'][len(patch_root) + 1:]
target = '/'.join([relative_root, 'DEPS']).lstrip('/')
if patches:
apply_svn_patch(patch_root, patches, whitelist=[target])
already_patched.append(target)
elif issue:
apply_rietveld_issue(issue, patchset, patch_root, rietveld_server,
revision_mapping, git_ref, apply_issue_email_file,
apply_issue_key_file, whitelist=[target])
already_patched.append(target)
if not buildspec:
# Run deps2git if there is a DEPS change after the last .DEPS.git commit.
for solution in solutions:
ensure_deps2git(solution, shallow)
# Ensure our build/ directory is set up with the correct .gclient file.
gclient_configure(solutions, target_os, target_os_only)
# backward compatibility DEPS patch
replacements = {'http://gyp.googlecode.com/svn/trunk':'https://github.com/svn2github/gyp/trunk', 'http://googlemock.googlecode.com/svn/trunk':'https://github.com/google/googlemock/trunk', 'http://googletest.googlecode.com/svn/trunk':'https://github.com/google/googletest/trunk'}
lines = []
with open('%s/DEPS'%(patch_root)) as infile:
for line in infile:
for src, target in replacements.iteritems():
line = line.replace(src, target)
lines.append(line)
with open('%s/DEPS'%(patch_root), 'w') as outfile:
for line in lines:
outfile.write(line)
# Let gclient do the DEPS syncing.
# The branch-head refspec is a special case because its possible Chrome
# src, which contains the branch-head refspecs, is DEPSed in.
gclient_output = gclient_sync(buildspec or BRANCH_HEADS_REFSPEC in refs,
shallow)
# Now that gclient_sync has finished, we should revert any .DEPS.git so that
# presubmit doesn't complain about it being modified.
if (not buildspec and
git('ls-files', '.DEPS.git', cwd=first_sln).strip()):
git('checkout', 'HEAD', '--', '.DEPS.git', cwd=first_sln)
if buildspec and runhooks:
# Run gclient runhooks if we're on an official builder.
# TODO(hinoka): Remove this when the official builders run their own
# runhooks step.
gclient_runhooks(gyp_env)
# Finally, ensure that all DEPS are pinned to the correct revision.
dir_names = [sln['name'] for sln in solutions]
ensure_deps_revisions(gclient_output.get('solutions', {}),
dir_names, revisions)
# Apply the rest of the patch here (sans DEPS)
if patches:
apply_svn_patch(patch_root, patches, blacklist=already_patched)
elif issue:
apply_rietveld_issue(issue, patchset, patch_root, rietveld_server,
revision_mapping, git_ref, apply_issue_email_file,
apply_issue_key_file, blacklist=already_patched)
elif gerrit_ref:
apply_gerrit_ref(gerrit_repo, gerrit_ref, patch_root)
# Reset the deps_file point in the solutions so that hooks get run properly.
for sln in solutions:
sln['deps_file'] = sln.get('deps_file', 'DEPS').replace('.DEPS.git', 'DEPS')
gclient_configure(solutions, target_os, target_os_only)
return gclient_output
def parse_revisions(revisions, root):
"""Turn a list of revision specs into a nice dictionary.
We will always return a dict with {root: something}. By default if root
is unspecified, or if revisions is [], then revision will be assigned 'HEAD'
"""
results = {root.strip('/'): 'HEAD'}
expanded_revisions = []
for revision in revisions:
# Allow rev1,rev2,rev3 format.
# TODO(hinoka): Delete this when webkit switches to recipes.
expanded_revisions.extend(revision.split(','))
for revision in expanded_revisions:
split_revision = revision.split('@')
if len(split_revision) == 1:
# This is just a plain revision, set it as the revision for root.
results[root] = split_revision[0]
elif len(split_revision) == 2:
# This is an alt_root@revision argument.
current_root, current_rev = split_revision
# We want to normalize svn/git urls into .git urls.
parsed_root = urlparse.urlparse(current_root)
if parsed_root.scheme == 'svn':
if parsed_root.path in RECOGNIZED_PATHS:
normalized_root = RECOGNIZED_PATHS[parsed_root.path]
else:
print 'WARNING: SVN path %s not recognized, ignoring' % current_root
continue
elif parsed_root.scheme in ['http', 'https']:
normalized_root = 'https://%s/%s' % (parsed_root.netloc,
parsed_root.path)
if not normalized_root.endswith('.git'):
normalized_root = '%s.git' % normalized_root
elif parsed_root.scheme:
print 'WARNING: Unrecognized scheme %s, ignoring' % parsed_root.scheme
continue
else:
# This is probably a local path.
normalized_root = current_root.strip('/')
results[normalized_root] = current_rev
else:
print ('WARNING: %r is not recognized as a valid revision specification,'
'skipping' % revision)
return results
def parse_args():
parse = optparse.OptionParser()
parse.add_option('--issue', help='Issue number to patch from.')
parse.add_option('--patchset',
help='Patchset from issue to patch from, if applicable.')
parse.add_option('--apply_issue_email_file',
help='--email-file option passthrough for apply_patch.py.')
parse.add_option('--apply_issue_key_file',
help='--private-key-file option passthrough for '
'apply_patch.py.')
parse.add_option('--patch_url', help='Optional URL to SVN patch.')
parse.add_option('--root', dest='patch_root',
help='DEPRECATED: Use --patch_root.')
parse.add_option('--patch_root', help='Directory to patch on top of.')
parse.add_option('--rietveld_server',
default='codereview.chromium.org',
help='Rietveld server.')
parse.add_option('--gerrit_repo',
help='Gerrit repository to pull the ref from.')
parse.add_option('--gerrit_ref', help='Gerrit ref to apply.')
parse.add_option('--specs', help='Gcilent spec.')
parse.add_option('--master', help='Master name.')
parse.add_option('-f', '--force', action='store_true',
help='Bypass check to see if we want to be run. '
'Should ONLY be used locally or by smart recipes.')
parse.add_option('--revision_mapping',
help='{"path/to/repo/": "property_name"}')
parse.add_option('--revision_mapping_file',
help=('Same as revision_mapping, except its a path to a json'
' file containing that format.'))
parse.add_option('--revision', action='append', default=[],
help='Revision to check out. Can be an SVN revision number, '
'git hash, or any form of git ref. Can prepend '
'root@<rev> to specify which repository, where root '
'is either a filesystem path, git https url, or '
'svn url. To specify Tip of Tree, set rev to HEAD.'
'To specify a git branch and an SVN rev, <rev> can be '
'set to <branch>:<revision>.')
parse.add_option('--output_manifest', action='store_true',
help=('Add manifest json to the json output.'))
parse.add_option('--slave_name', default=socket.getfqdn().split('.')[0],
help='Hostname of the current machine, '
'used for determining whether or not to activate.')
parse.add_option('--builder_name', help='Name of the builder, '
'used for determining whether or not to activate.')
parse.add_option('--build_dir', default=os.getcwd())
parse.add_option('--flag_file', default=path.join(os.getcwd(),
'update.flag'))
parse.add_option('--shallow', action='store_true',
help='Use shallow clones for cache repositories.')
parse.add_option('--gyp_env', action='append', default=[],
help='Environment variables to pass into gclient runhooks.')
parse.add_option('--clobber', action='store_true',
help='Delete checkout first, always')
parse.add_option('--bot_update_clobber', action='store_true', dest='clobber',
help='(synonym for --clobber)')
parse.add_option('-o', '--output_json',
help='Output JSON information into a specified file')
parse.add_option('--no_shallow', action='store_true',
help='Bypass disk detection and never shallow clone. '
'Does not override the --shallow flag')
parse.add_option('--no_runhooks', action='store_true',
help='Do not run hooks on official builder.')
parse.add_option('--refs', action='append',
help='Also fetch this refspec for the main solution(s). '
'Eg. +refs/branch-heads/*')
parse.add_option('--with_branch_heads', action='store_true',
help='Always pass --with_branch_heads to gclient. This '
'does the same thing as --refs +refs/branch-heads/*')
options, args = parse.parse_args()
if not options.refs:
options.refs = []
if options.with_branch_heads:
options.refs.append(BRANCH_HEADS_REFSPEC)
del options.with_branch_heads
try:
if options.revision_mapping_file:
if options.revision_mapping:
print ('WARNING: Ignoring --revision_mapping: --revision_mapping_file '
'was set at the same time as --revision_mapping?')
with open(options.revision_mapping_file, 'r') as f:
options.revision_mapping = json.load(f)
elif options.revision_mapping:
options.revision_mapping = json.loads(options.revision_mapping)
except Exception as e:
print (
'WARNING: Caught execption while parsing revision_mapping*: %s'
% (str(e),)
)
return options, args
def prepare(options, git_slns, active):
"""Prepares the target folder before we checkout."""
dir_names = [sln.get('name') for sln in git_slns if 'name' in sln]
# If we're active now, but the flag file doesn't exist (we weren't active
# last run) or vice versa, blow away all checkouts.
if bool(active) != bool(check_flag(options.flag_file)):
ensure_no_checkout(dir_names, '*')
if options.output_json:
# Make sure we tell recipes that we didn't run if the script exits here.
emit_json(options.output_json, did_run=active)
if active:
if options.clobber:
ensure_no_checkout(dir_names, '*')
else:
ensure_no_checkout(dir_names, '.svn')
emit_flag(options.flag_file)
else:
delete_flag(options.flag_file)
raise Inactive # This is caught in main() and we exit cleanly.
# Do a shallow checkout if the disk is less than 100GB.
total_disk_space, free_disk_space = get_total_disk_space()
total_disk_space_gb = int(total_disk_space / (1024 * 1024 * 1024))
used_disk_space_gb = int((total_disk_space - free_disk_space)
/ (1024 * 1024 * 1024))
percent_used = int(used_disk_space_gb * 100 / total_disk_space_gb)
step_text = '[%dGB/%dGB used (%d%%)]' % (used_disk_space_gb,
total_disk_space_gb,
percent_used)
if not options.output_json:
print '@@@STEP_TEXT@%s@@@' % step_text
if not options.shallow:
options.shallow = (total_disk_space < SHALLOW_CLONE_THRESHOLD
and not options.no_shallow)
# The first solution is where the primary DEPS file resides.
first_sln = dir_names[0]
# Split all the revision specifications into a nice dict.
print 'Revisions: %s' % options.revision
revisions = parse_revisions(options.revision, first_sln)
print 'Fetching Git checkout at %s@%s' % (first_sln, revisions[first_sln])
return revisions, step_text
def checkout(options, git_slns, specs, buildspec, master,
svn_root, revisions, step_text):
first_sln = git_slns[0]['name']
dir_names = [sln.get('name') for sln in git_slns if 'name' in sln]
try:
# Outer try is for catching patch failures and exiting gracefully.
# Inner try is for catching gclient failures and retrying gracefully.
try:
checkout_parameters = dict(
# First, pass in the base of what we want to check out.
solutions=git_slns,
revisions=revisions,
first_sln=first_sln,
# Also, target os variables for gclient.
target_os=specs.get('target_os', []),
target_os_only=specs.get('target_os_only', False),
# Then, pass in information about how to patch.
patch_root=options.patch_root,
issue=options.issue,
patchset=options.patchset,
patch_url=options.patch_url,
rietveld_server=options.rietveld_server,
gerrit_repo=options.gerrit_repo,
gerrit_ref=options.gerrit_ref,
revision_mapping=options.revision_mapping,
apply_issue_email_file=options.apply_issue_email_file,
apply_issue_key_file=options.apply_issue_key_file,
# For official builders.
buildspec=buildspec,
gyp_env=options.gyp_env,
runhooks=not options.no_runhooks,
# Finally, extra configurations such as shallowness of the clone.
shallow=options.shallow,
refs=options.refs)
gclient_output = ensure_checkout(**checkout_parameters)
except GclientSyncFailed:
print 'We failed gclient sync, lets delete the checkout and retry.'
ensure_no_checkout(dir_names, '*')
gclient_output = ensure_checkout(**checkout_parameters)
except PatchFailed as e:
if options.output_json:
# Tell recipes information such as root, got_revision, etc.
emit_json(options.output_json,
did_run=True,
root=first_sln,
log_lines=[('patch error', e.output),],
patch_apply_return_code=e.code,
patch_root=options.patch_root,
patch_failure=True,
step_text='%s PATCH FAILED' % step_text,
fixed_revisions=revisions)
else:
# If we're not on recipes, tell annotator about our got_revisions.
emit_log_lines('patch error', e.output)
print '@@@STEP_TEXT@%s PATCH FAILED@@@' % step_text
raise
# Revision is an svn revision, unless it's a git master.
use_svn_rev = master not in GIT_MASTERS
# Take care of got_revisions outputs.
revision_mapping = dict(GOT_REVISION_MAPPINGS.get(svn_root, {}))
if options.revision_mapping:
revision_mapping.update(options.revision_mapping)
# If the repo is not in the default GOT_REVISION_MAPPINGS and no
# revision_mapping were specified on the command line then
# default to setting 'got_revision' based on the first solution.
if not revision_mapping:
revision_mapping[first_sln] = 'got_revision'
got_revisions = parse_got_revision(gclient_output, revision_mapping,
use_svn_rev)
if not got_revisions:
# TODO(hinoka): We should probably bail out here, but in the interest
# of giving mis-configured bots some time to get fixed use a dummy
# revision here.
got_revisions = { 'got_revision': 'BOT_UPDATE_NO_REV_FOUND' }
#raise Exception('No got_revision(s) found in gclient output')
if options.output_json:
manifest = create_manifest() if options.output_manifest else None
# Tell recipes information such as root, got_revision, etc.
emit_json(options.output_json,
did_run=True,
root=first_sln,
patch_root=options.patch_root,
step_text=step_text,
fixed_revisions=revisions,
properties=got_revisions,
manifest=manifest)
else:
# If we're not on recipes, tell annotator about our got_revisions.
emit_properties(got_revisions)
def print_help_text(force, output_json, active, master, builder, slave):
"""Print helpful messages to tell devs whats going on."""
if force and output_json:
recipe_force = 'Forced on by recipes'
elif active and output_json:
recipe_force = 'Off by recipes, but forced on by bot update'
elif not active and output_json:
recipe_force = 'Forced off by recipes'
else:
recipe_force = 'N/A. Was not called by recipes'
print BOT_UPDATE_MESSAGE % {
'master': master or 'Not specified',
'builder': builder or 'Not specified',
'slave': slave or 'Not specified',
'recipe': recipe_force,
'CURRENT_DIR': CURRENT_DIR,
'BUILDER_DIR': BUILDER_DIR,
'SLAVE_DIR': SLAVE_DIR,
'THIS_DIR': THIS_DIR,
'SCRIPTS_DIR': SCRIPTS_DIR,
'BUILD_DIR': BUILD_DIR,
'ROOT_DIR': ROOT_DIR,
'DEPOT_TOOLS_DIR': DEPOT_TOOLS_DIR,
},
print ACTIVATED_MESSAGE if active else NOT_ACTIVATED_MESSAGE
def main():
# Get inputs.
options, _ = parse_args()
builder = options.builder_name
slave = options.slave_name
master = options.master
# Check if this script should activate or not.
active = check_valid_host(master, builder, slave) or options.force or False
# Print a helpful message to tell developers whats going on with this step.
print_help_text(
options.force, options.output_json, active, master, builder, slave)
# Parse, munipulate, and print the gclient solutions.
specs = {}
exec(options.specs, specs)
svn_solutions = specs.get('solutions', [])
git_slns, svn_root, buildspec = solutions_to_git(svn_solutions)
options.revision = maybe_ignore_revision(options.revision, buildspec)
solutions_printer(git_slns)
try:
# Dun dun dun, the main part of bot_update.
revisions, step_text = prepare(options, git_slns, active)
checkout(options, git_slns, specs, buildspec, master, svn_root, revisions,
step_text)
except Inactive:
# Not active, should count as passing.
pass
except PatchFailed as e:
emit_flag(options.flag_file)
# Return a specific non-zero exit code for patch failure (because it is
# a failure), but make it different than other failures to distinguish
# between infra failures (independent from patch author), and patch
# failures (that patch author can fix). However, PatchFailure due to
# download patch failure is still an infra problem.
if e.code == 3:
# Patch download problem.
return 87
# Genuine patch problem.
return 88
except Exception:
# Unexpected failure.
emit_flag(options.flag_file)
raise
else:
emit_flag(options.flag_file)
if __name__ == '__main__':
sys.exit(main())
| bsd-3-clause |
ingadhoc/odoo-addons | base_currency_inverse_rate/models/res_currency.py | 7 | 1413 | # -*- coding: utf-8 -*-
##############################################################################
# For copyright and license notices, see __openerp__.py file in module root
# directory
##############################################################################
from openerp import models, fields, api
import logging
_logger = logging.getLogger(__name__)
class res_currency(models.Model):
_inherit = "res.currency"
inverse_rate_silent = fields.Float(
'Current Inverse Rate', digits=(12, 4),
compute='get_inverse_rate_silent',
help='The rate of the currency from the currency of rate 1 (0 if no '
'rate defined).'
)
@api.one
@api.depends('rate_silent')
def get_inverse_rate_silent(self):
self.inverse_rate_silent = self.rate_silent and (
1.0 / (self.rate_silent))
class res_currency_rate(models.Model):
_inherit = "res.currency.rate"
inverse_rate = fields.Float(
'Inverse Rate', digits=(12, 4),
compute='get_inverse_rate',
inverse='set_inverse_rate',
help='The rate of the currency from the currency of rate 1',
)
@api.one
@api.depends('rate')
def get_inverse_rate(self):
self.inverse_rate = self.rate and (1.0 / (self.rate))
@api.one
def set_inverse_rate(self):
self.rate = self.inverse_rate and (1.0 / (self.inverse_rate))
| agpl-3.0 |
dancingdan/tensorflow | tensorflow/python/data/experimental/ops/resampling.py | 13 | 11963 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Resampling dataset transformations."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.data.experimental.ops import batching
from tensorflow.python.data.experimental.ops import interleave_ops
from tensorflow.python.data.experimental.ops import scan_ops
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import logging_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.util.tf_export import tf_export
@tf_export("data.experimental.rejection_resample")
def rejection_resample(class_func, target_dist, initial_dist=None, seed=None):
"""A transformation that resamples a dataset to achieve a target distribution.
**NOTE** Resampling is performed via rejection sampling; some fraction
of the input values will be dropped.
Args:
class_func: A function mapping an element of the input dataset to a scalar
`tf.int32` tensor. Values should be in `[0, num_classes)`.
target_dist: A floating point type tensor, shaped `[num_classes]`.
initial_dist: (Optional.) A floating point type tensor, shaped
`[num_classes]`. If not provided, the true class distribution is
estimated live in a streaming fashion.
seed: (Optional.) Python integer seed for the resampler.
Returns:
A `Dataset` transformation function, which can be passed to
`tf.data.Dataset.apply`.
"""
def _apply_fn(dataset):
"""Function from `Dataset` to `Dataset` that applies the transformation."""
target_dist_t = ops.convert_to_tensor(target_dist, name="target_dist")
class_values_ds = dataset.map(class_func)
# Get initial distribution.
if initial_dist is not None:
initial_dist_t = ops.convert_to_tensor(initial_dist, name="initial_dist")
acceptance_dist, prob_of_original = (
_calculate_acceptance_probs_with_mixing(initial_dist_t,
target_dist_t))
initial_dist_ds = dataset_ops.Dataset.from_tensors(
initial_dist_t).repeat()
acceptance_dist_ds = dataset_ops.Dataset.from_tensors(
acceptance_dist).repeat()
prob_of_original_ds = dataset_ops.Dataset.from_tensors(
prob_of_original).repeat()
else:
initial_dist_ds = _estimate_initial_dist_ds(
target_dist_t, class_values_ds)
acceptance_and_original_prob_ds = initial_dist_ds.map(
lambda initial: _calculate_acceptance_probs_with_mixing( # pylint: disable=g-long-lambda
initial, target_dist_t))
acceptance_dist_ds = acceptance_and_original_prob_ds.map(
lambda accept_prob, _: accept_prob)
prob_of_original_ds = acceptance_and_original_prob_ds.map(
lambda _, prob_original: prob_original)
filtered_ds = _filter_ds(dataset, acceptance_dist_ds, initial_dist_ds,
class_values_ds, seed)
# Prefetch filtered dataset for speed.
filtered_ds = filtered_ds.prefetch(3)
prob_original_static = _get_prob_original_static(
initial_dist_t, target_dist_t) if initial_dist is not None else None
if prob_original_static == 1:
return dataset_ops.Dataset.zip((class_values_ds, dataset))
elif prob_original_static == 0:
return filtered_ds
else:
return interleave_ops.sample_from_datasets(
[dataset_ops.Dataset.zip((class_values_ds, dataset)), filtered_ds],
weights=prob_of_original_ds.map(lambda prob: [(prob, 1.0 - prob)]),
seed=seed)
return _apply_fn
def _get_prob_original_static(initial_dist_t, target_dist_t):
"""Returns the static probability of sampling from the original.
`tensor_util.constant_value(prob_of_original)` returns `None` if it encounters
an Op that it isn't defined for. We have some custom logic to avoid this.
Args:
initial_dist_t: A tensor of the initial distribution.
target_dist_t: A tensor of the target distribution.
Returns:
The probability of sampling from the original distribution as a constant,
if it is a constant, or `None`.
"""
init_static = tensor_util.constant_value(initial_dist_t)
target_static = tensor_util.constant_value(target_dist_t)
if init_static is None or target_static is None:
return None
else:
return np.min(target_static / init_static)
def _filter_ds(dataset, acceptance_dist_ds, initial_dist_ds, class_values_ds,
seed):
"""Filters a dataset based on per-class acceptance probabilities.
Args:
dataset: The dataset to be filtered.
acceptance_dist_ds: A dataset of acceptance probabilities.
initial_dist_ds: A dataset of the initial probability distribution, given or
estimated.
class_values_ds: A dataset of the corresponding classes.
seed: (Optional.) Python integer seed for the resampler.
Returns:
A dataset of (class value, data) after filtering.
"""
def maybe_warn_on_large_rejection(accept_dist, initial_dist):
proportion_rejected = math_ops.reduce_sum((1 - accept_dist) * initial_dist)
return control_flow_ops.cond(
math_ops.less(proportion_rejected, .5),
lambda: accept_dist,
lambda: logging_ops.Print( # pylint: disable=g-long-lambda
accept_dist, [proportion_rejected, initial_dist, accept_dist],
message="Proportion of examples rejected by sampler is high: ",
summarize=100,
first_n=10))
acceptance_dist_ds = (dataset_ops.Dataset.zip((acceptance_dist_ds,
initial_dist_ds))
.map(maybe_warn_on_large_rejection))
def _gather_and_copy(class_val, acceptance_prob, data):
return class_val, array_ops.gather(acceptance_prob, class_val), data
current_probabilities_and_class_and_data_ds = dataset_ops.Dataset.zip(
(class_values_ds, acceptance_dist_ds, dataset)).map(_gather_and_copy)
filtered_ds = (
current_probabilities_and_class_and_data_ds
.filter(lambda _1, p, _2: random_ops.random_uniform([], seed=seed) < p))
return filtered_ds.map(lambda class_value, _, data: (class_value, data))
def _estimate_initial_dist_ds(
target_dist_t, class_values_ds, dist_estimation_batch_size=32,
smoothing_constant=10):
num_classes = (target_dist_t.shape[0].value or
array_ops.shape(target_dist_t)[0])
initial_examples_per_class_seen = array_ops.fill(
[num_classes], np.int64(smoothing_constant))
def update_estimate_and_tile(num_examples_per_class_seen, c):
updated_examples_per_class_seen, dist = _estimate_data_distribution(
c, num_examples_per_class_seen)
tiled_dist = array_ops.tile(
array_ops.expand_dims(dist, 0), [dist_estimation_batch_size, 1])
return updated_examples_per_class_seen, tiled_dist
initial_dist_ds = (class_values_ds.batch(dist_estimation_batch_size)
.apply(scan_ops.scan(initial_examples_per_class_seen,
update_estimate_and_tile))
.apply(batching.unbatch()))
return initial_dist_ds
def _get_target_to_initial_ratio(initial_probs, target_probs):
# Add tiny to initial_probs to avoid divide by zero.
denom = (initial_probs + np.finfo(initial_probs.dtype.as_numpy_dtype).tiny)
return target_probs / denom
def _estimate_data_distribution(c, num_examples_per_class_seen):
"""Estimate data distribution as labels are seen.
Args:
c: The class labels. Type `int32`, shape `[batch_size]`.
num_examples_per_class_seen: Type `int64`, shape `[num_classes]`,
containing counts.
Returns:
num_examples_per_lass_seen: Updated counts. Type `int64`, shape
`[num_classes]`.
dist: The updated distribution. Type `float32`, shape `[num_classes]`.
"""
num_classes = num_examples_per_class_seen.get_shape()[0].value
# Update the class-count based on what labels are seen in batch.
num_examples_per_class_seen = math_ops.add(
num_examples_per_class_seen, math_ops.reduce_sum(
array_ops.one_hot(c, num_classes, dtype=dtypes.int64), 0))
init_prob_estimate = math_ops.truediv(
num_examples_per_class_seen,
math_ops.reduce_sum(num_examples_per_class_seen))
dist = math_ops.cast(init_prob_estimate, dtypes.float32)
return num_examples_per_class_seen, dist
def _calculate_acceptance_probs_with_mixing(initial_probs, target_probs):
"""Calculates the acceptance probabilities and mixing ratio.
In this case, we assume that we can *either* sample from the original data
distribution with probability `m`, or sample from a reshaped distribution
that comes from rejection sampling on the original distribution. This
rejection sampling is done on a per-class basis, with `a_i` representing the
probability of accepting data from class `i`.
This method is based on solving the following analysis for the reshaped
distribution:
Let F be the probability of a rejection (on any example).
Let p_i be the proportion of examples in the data in class i (init_probs)
Let a_i is the rate the rejection sampler should *accept* class i
Let t_i is the target proportion in the minibatches for class i (target_probs)
```
F = sum_i(p_i * (1-a_i))
= 1 - sum_i(p_i * a_i) using sum_i(p_i) = 1
```
An example with class `i` will be accepted if `k` rejections occur, then an
example with class `i` is seen by the rejector, and it is accepted. This can
be written as follows:
```
t_i = sum_k=0^inf(F^k * p_i * a_i)
= p_i * a_j / (1 - F) using geometric series identity, since 0 <= F < 1
= p_i * a_i / sum_j(p_j * a_j) using F from above
```
Note that the following constraints hold:
```
0 <= p_i <= 1, sum_i(p_i) = 1
0 <= a_i <= 1
0 <= t_i <= 1, sum_i(t_i) = 1
```
A solution for a_i in terms of the other variables is the following:
```a_i = (t_i / p_i) / max_i[t_i / p_i]```
If we try to minimize the amount of data rejected, we get the following:
M_max = max_i [ t_i / p_i ]
M_min = min_i [ t_i / p_i ]
The desired probability of accepting data if it comes from class `i`:
a_i = (t_i/p_i - m) / (M_max - m)
The desired probability of pulling a data element from the original dataset,
rather than the filtered one:
m = M_min
Args:
initial_probs: A Tensor of the initial probability distribution, given or
estimated.
target_probs: A Tensor of the corresponding classes.
Returns:
(A 1D Tensor with the per-class acceptance probabilities, the desired
probability of pull from the original distribution.)
"""
ratio_l = _get_target_to_initial_ratio(initial_probs, target_probs)
max_ratio = math_ops.reduce_max(ratio_l)
min_ratio = math_ops.reduce_min(ratio_l)
# Target prob to sample from original distribution.
m = min_ratio
# TODO(joelshor): Simplify fraction, if possible.
a_i = (ratio_l - m) / (max_ratio - m)
return a_i, m
| apache-2.0 |
cctaylor/googleads-python-lib | examples/dfa/v1_20/create_content_category.py | 4 | 1709 | #!/usr/bin/python
#
# Copyright 2014 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This example creates a content category with the given name and description.
The LoadFromStorage method is pulling credentials and properties from a
"googleads.yaml" file. By default, it looks for this file in your home
directory. For more information, see the "Caching authentication information"
section of our README.
Tags: contentcategory.saveContentCategory
"""
__author__ = 'Joseph DiLallo'
import uuid
# Import appropriate modules from the client library.
from googleads import dfa
def main(client):
# Initialize appropriate service.
content_category_service = client.GetService(
'contentcategory', 'v1.20', 'https://advertisersapitest.doubleclick.net')
# Construct and save content category.
content_category = {
'name': 'Category %s' % uuid.uuid4()
}
result = content_category_service.saveContentCategory(
content_category)
# Display results.
print 'Content category with ID \'%s\' was created.' % result['id']
if __name__ == '__main__':
# Initialize client object.
dfa_client = dfa.DfaClient.LoadFromStorage()
main(dfa_client)
| apache-2.0 |
torchingloom/edx-platform | common/lib/xmodule/xmodule/modulestore/xml_importer.py | 2 | 36277 | import logging
import os
import mimetypes
from path import path
import json
from .xml import XMLModuleStore, ImportSystem, ParentTracker
from xmodule.modulestore import Location
from xblock.runtime import KvsFieldData, DictKeyValueStore
from xmodule.x_module import XModuleDescriptor
from xblock.fields import Scope, Reference, ReferenceList, ReferenceValueDict
from xmodule.contentstore.content import StaticContent
from .inheritance import own_metadata
from xmodule.errortracker import make_error_tracker
from .store_utilities import rewrite_nonportable_content_links
import xblock
log = logging.getLogger(__name__)
def import_static_content(
modules, course_loc, course_data_path, static_content_store,
target_location_namespace, subpath='static', verbose=False):
remap_dict = {}
# now import all static assets
static_dir = course_data_path / subpath
try:
with open(course_data_path / 'policies/assets.json') as f:
policy = json.load(f)
except (IOError, ValueError) as err:
# xml backed courses won't have this file, only exported courses;
# so, its absence is not really an exception.
policy = {}
verbose = True
mimetypes_list = mimetypes.types_map.values()
for dirname, _, filenames in os.walk(static_dir):
for filename in filenames:
content_path = os.path.join(dirname, filename)
if filename.endswith('~'):
if verbose:
log.debug('skipping static content %s...', content_path)
continue
if verbose:
log.debug('importing static content %s...', content_path)
try:
with open(content_path, 'rb') as f:
data = f.read()
except IOError:
if filename.startswith('._'):
# OS X "companion files". See
# http://www.diigo.com/annotated/0c936fda5da4aa1159c189cea227e174
continue
# Not a 'hidden file', then re-raise exception
raise
# strip away leading path from the name
fullname_with_subpath = content_path.replace(static_dir, '')
if fullname_with_subpath.startswith('/'):
fullname_with_subpath = fullname_with_subpath[1:]
content_loc = StaticContent.compute_location(
target_location_namespace.org, target_location_namespace.course,
fullname_with_subpath
)
policy_ele = policy.get(content_loc.name, {})
displayname = policy_ele.get('displayname', filename)
locked = policy_ele.get('locked', False)
mime_type = policy_ele.get('contentType')
# Check extracted contentType in list of all valid mimetypes
if not mime_type or mime_type not in mimetypes_list:
mime_type = mimetypes.guess_type(filename)[0] # Assign guessed mimetype
content = StaticContent(
content_loc, displayname, mime_type, data,
import_path=fullname_with_subpath, locked=locked
)
# first let's save a thumbnail so we can get back a thumbnail location
thumbnail_content, thumbnail_location = static_content_store.generate_thumbnail(content)
if thumbnail_content is not None:
content.thumbnail_location = thumbnail_location
# then commit the content
try:
static_content_store.save(content)
except Exception as err:
log.exception('Error importing {0}, error={1}'.format(
fullname_with_subpath, err
))
# store the remapping information which will be needed
# to subsitute in the module data
remap_dict[fullname_with_subpath] = content_loc.name
return remap_dict
def import_from_xml(
store, data_dir, course_dirs=None,
default_class='xmodule.raw_module.RawDescriptor',
load_error_modules=True, static_content_store=None,
target_location_namespace=None, verbose=False, draft_store=None,
do_import_static=True):
"""
Import the specified xml data_dir into the "store" modulestore,
using org and course as the location org and course.
course_dirs: If specified, the list of course_dirs to load. Otherwise, load
all course dirs
target_location_namespace is the namespace [passed as Location]
(i.e. {tag},{org},{course}) that all modules in the should be remapped to
after import off disk. We do this remapping as a post-processing step
because there's logic in the importing which expects a 'url_name' as an
identifier to where things are on disk
e.g. ../policies/<url_name>/policy.json as well as metadata keys in
the policy.json. so we need to keep the original url_name during import
:param do_import_static:
if False, then static files are not imported into the static content
store. This can be employed for courses which have substantial
unchanging static content, which is to inefficient to import every
time the course is loaded. Static content for some courses may also be
served directly by nginx, instead of going through django.
"""
xml_module_store = XMLModuleStore(
data_dir,
default_class=default_class,
course_dirs=course_dirs,
load_error_modules=load_error_modules,
xblock_mixins=store.xblock_mixins,
xblock_select=store.xblock_select,
)
# NOTE: the XmlModuleStore does not implement get_items()
# which would be a preferable means to enumerate the entire collection
# of course modules. It will be left as a TBD to implement that
# method on XmlModuleStore.
course_items = []
for course_id in xml_module_store.modules.keys():
if target_location_namespace is not None:
pseudo_course_id = u'{0.org}/{0.course}'.format(target_location_namespace)
else:
course_id_components = Location.parse_course_id(course_id)
pseudo_course_id = u'{org}/{course}'.format(**course_id_components)
try:
# turn off all write signalling while importing as this
# is a high volume operation on stores that need it
if (hasattr(store, 'ignore_write_events_on_courses') and
pseudo_course_id not in store.ignore_write_events_on_courses):
store.ignore_write_events_on_courses.append(pseudo_course_id)
course_data_path = None
course_location = None
if verbose:
log.debug("Scanning {0} for course module...".format(course_id))
# Quick scan to get course module as we need some info from there.
# Also we need to make sure that the course module is committed
# first into the store
for module in xml_module_store.modules[course_id].itervalues():
if module.scope_ids.block_type == 'course':
course_data_path = path(data_dir) / module.data_dir
course_location = module.location
log.debug('======> IMPORTING course to location {loc}'.format(
loc=course_location
))
module = remap_namespace(module, target_location_namespace)
if not do_import_static:
# for old-style xblock where this was actually linked to kvs
module.static_asset_path = module.data_dir
module.save()
log.debug('course static_asset_path={path}'.format(
path=module.static_asset_path
))
log.debug('course data_dir={0}'.format(module.data_dir))
# cdodge: more hacks (what else). Seems like we have a
# problem when importing a course (like 6.002) which
# does not have any tabs defined in the policy file.
# The import goes fine and then displays fine in LMS,
# but if someone tries to add a new tab in the CMS, then
# the LMS barfs because it expects that -- if there are
# *any* tabs -- then there at least needs to be
# some predefined ones
if module.tabs is None or len(module.tabs) == 0:
module.tabs = [
{"type": "courseware"},
{"type": "course_info", "name": "Course Info"},
{"type": "discussion", "name": "Discussion"},
{"type": "wiki", "name": "Wiki"},
# note, add 'progress' when we can support it on Edge
]
import_module(
module, store, course_data_path, static_content_store,
course_location,
target_location_namespace or course_location,
do_import_static=do_import_static
)
course_items.append(module)
# then import all the static content
if static_content_store is not None and do_import_static:
if target_location_namespace is not None:
_namespace_rename = target_location_namespace
else:
_namespace_rename = course_location
# first pass to find everything in /static/
import_static_content(
xml_module_store.modules[course_id], course_location,
course_data_path, static_content_store,
_namespace_rename, subpath='static', verbose=verbose
)
elif verbose and not do_import_static:
log.debug(
"Skipping import of static content, "
"since do_import_static={0}".format(do_import_static)
)
# no matter what do_import_static is, import "static_import" directory
# This is needed because the "about" pages (eg "overview") are
# loaded via load_extra_content, and do not inherit the lms
# metadata from the course module, and thus do not get
# "static_content_store" properly defined. Static content
# referenced in those extra pages thus need to come through the
# c4x:// contentstore, unfortunately. Tell users to copy that
# content into the "static_import" subdir.
simport = 'static_import'
if os.path.exists(course_data_path / simport):
if target_location_namespace is not None:
_namespace_rename = target_location_namespace
else:
_namespace_rename = course_location
import_static_content(
xml_module_store.modules[course_id], course_location,
course_data_path, static_content_store,
_namespace_rename, subpath=simport, verbose=verbose
)
# finally loop through all the modules
for module in xml_module_store.modules[course_id].itervalues():
if module.scope_ids.block_type == 'course':
# we've already saved the course module up at the top
# of the loop so just skip over it in the inner loop
continue
# remap module to the new namespace
if target_location_namespace is not None:
module = remap_namespace(module, target_location_namespace)
if verbose:
log.debug('importing module location {loc}'.format(
loc=module.location
))
import_module(
module, store, course_data_path, static_content_store,
course_location,
target_location_namespace if target_location_namespace else course_location,
do_import_static=do_import_static
)
# now import any 'draft' items
if draft_store is not None:
import_course_draft(
xml_module_store,
store,
draft_store,
course_data_path,
static_content_store,
course_location,
target_location_namespace if target_location_namespace else course_location
)
finally:
# turn back on all write signalling on stores that need it
if (hasattr(store, 'ignore_write_events_on_courses') and
pseudo_course_id in store.ignore_write_events_on_courses):
store.ignore_write_events_on_courses.remove(pseudo_course_id)
store.refresh_cached_metadata_inheritance_tree(
target_location_namespace if target_location_namespace is not None else course_location
)
return xml_module_store, course_items
def import_module(
module, store, course_data_path, static_content_store,
source_course_location, dest_course_location, allow_not_found=False,
do_import_static=True):
logging.debug(u'processing import of module {}...'.format(module.location.url()))
if do_import_static and 'data' in module.fields and isinstance(module.fields['data'], xblock.fields.String):
# we want to convert all 'non-portable' links in the module_data
# (if it is a string) to portable strings (e.g. /static/)
module.data = rewrite_nonportable_content_links(
source_course_location.course_id,
dest_course_location.course_id, module.data
)
# remove any export/import only xml_attributes
# which are used to wire together draft imports
if 'parent_sequential_url' in getattr(module, 'xml_attributes', []):
del module.xml_attributes['parent_sequential_url']
if 'index_in_children_list' in getattr(module, 'xml_attributes', []):
del module.xml_attributes['index_in_children_list']
store.update_item(module, '**replace_user**', allow_not_found=allow_not_found)
def import_course_draft(
xml_module_store, store, draft_store, course_data_path,
static_content_store, source_location_namespace,
target_location_namespace):
'''
This will import all the content inside of the 'drafts' folder, if it exists
NOTE: This is not a full course import, basically in our current
application only verticals (and downwards) can be in draft.
Therefore, we need to use slightly different call points into
the import process_xml as we can't simply call XMLModuleStore() constructor
(like we do for importing public content)
'''
draft_dir = course_data_path + "/drafts"
if not os.path.exists(draft_dir):
return
# create a new 'System' object which will manage the importing
errorlog = make_error_tracker()
# The course_dir as passed to ImportSystem is expected to just be relative, not
# the complete path including data_dir. ImportSystem will concatenate the two together.
data_dir = xml_module_store.data_dir
# Whether or not data_dir ends with a "/" differs in production vs. test.
if not data_dir.endswith("/"):
data_dir += "/"
draft_course_dir = draft_dir.replace(data_dir, '', 1)
system = ImportSystem(
xmlstore=xml_module_store,
course_id=target_location_namespace.course_id,
course_dir=draft_course_dir,
error_tracker=errorlog.tracker,
parent_tracker=ParentTracker(),
load_error_modules=False,
mixins=xml_module_store.xblock_mixins,
field_data=KvsFieldData(kvs=DictKeyValueStore()),
)
# now walk the /vertical directory where each file in there
# will be a draft copy of the Vertical
# First it is necessary to order the draft items by their desired index in the child list
# (order os.walk returns them in is not guaranteed).
drafts = dict()
for dirname, _dirnames, filenames in os.walk(draft_dir + "/vertical"):
for filename in filenames:
module_path = os.path.join(dirname, filename)
with open(module_path, 'r') as f:
try:
# note, on local dev it seems like OSX will put
# some extra files in the directory with "quarantine"
# information. These files are binary files and will
# throw exceptions when we try to parse the file
# as an XML string. Let's make sure we're
# dealing with a string before ingesting
data = f.read()
try:
xml = data.decode('utf-8')
except UnicodeDecodeError, err:
# seems like on OSX localdev, the OS is making
# quarantine files in the unzip directory
# when importing courses so if we blindly try to
# enumerate through the directory, we'll try
# to process a bunch of binary quarantine files
# (which are prefixed with a '._' character which
# will dump a bunch of exceptions to the output,
# although they are harmless.
#
# Reading online docs there doesn't seem to be
# a good means to detect a 'hidden' file that works
# well across all OS environments. So for now, I'm using
# OSX's utilization of a leading '.' in the filename
# to indicate a system hidden file.
#
# Better yet would be a way to figure out if this is
# a binary file, but I haven't found a good way
# to do this yet.
if filename.startswith('._'):
continue
# Not a 'hidden file', then re-raise exception
raise err
descriptor = system.process_xml(xml)
# HACK: since we are doing partial imports of drafts
# the vertical doesn't have the 'url-name' set in the
# attributes (they are normally in the parent object,
# aka sequential), so we have to replace the location.name
# with the XML filename that is part of the pack
fn, fileExtension = os.path.splitext(filename)
descriptor.location = descriptor.location.replace(name=fn)
index = int(descriptor.xml_attributes['index_in_children_list'])
if index in drafts:
drafts[index].append(descriptor)
else:
drafts[index] = [descriptor]
except Exception, e:
logging.exception('There was an error. {err}'.format(
err=unicode(e)
))
# For each index_in_children_list key, there is a list of vertical descriptors.
for key in sorted(drafts.iterkeys()):
for descriptor in drafts[key]:
try:
def _import_module(module):
# Update the module's location to "draft" revision
# We need to call this method (instead of updating the location directly)
# to ensure that pure XBlock field data is updated correctly.
_update_module_location(module, module.location.replace(revision='draft'))
# make sure our parent has us in its list of children
# this is to make sure private only verticals show up
# in the list of children since they would have been
# filtered out from the non-draft store export
if module.location.category == 'vertical':
non_draft_location = module.location.replace(revision=None)
sequential_url = module.xml_attributes['parent_sequential_url']
index = int(module.xml_attributes['index_in_children_list'])
seq_location = Location(sequential_url)
# IMPORTANT: Be sure to update the sequential
# in the NEW namespace
seq_location = seq_location.replace(
org=target_location_namespace.org,
course=target_location_namespace.course
)
sequential = store.get_item(seq_location, depth=0)
if non_draft_location.url() not in sequential.children:
sequential.children.insert(index, non_draft_location.url())
store.update_item(sequential, '**replace_user**')
import_module(
module, draft_store, course_data_path,
static_content_store, source_location_namespace,
target_location_namespace, allow_not_found=True
)
for child in module.get_children():
_import_module(child)
_import_module(descriptor)
except Exception, e:
logging.exception('There was an error. {err}'.format(
err=unicode(e)
))
def remap_namespace(module, target_location_namespace):
if target_location_namespace is None:
return module
original_location = module.location
# This looks a bit wonky as we need to also change the 'name' of the
# imported course to be what the caller passed in
if module.location.category != 'course':
_update_module_location(
module,
module.location.replace(
tag=target_location_namespace.tag,
org=target_location_namespace.org,
course=target_location_namespace.course
)
)
else:
#
# module is a course module
#
module.location = module.location.replace(
tag=target_location_namespace.tag,
org=target_location_namespace.org,
course=target_location_namespace.course,
name=target_location_namespace.name
)
# There is more re-namespacing work we have to do when
# importing course modules
# remap pdf_textbook urls
for entry in module.pdf_textbooks:
for chapter in entry.get('chapters', []):
if StaticContent.is_c4x_path(chapter.get('url', '')):
chapter['url'] = StaticContent.renamespace_c4x_path(
chapter['url'], target_location_namespace
)
# Original wiki_slugs had value location.course. To make them unique this was changed to 'org.course.name'.
# If we are importing into a course with a different course_id and wiki_slug is equal to either of these default
# values then remap it so that the wiki does not point to the old wiki.
if original_location.course_id != target_location_namespace.course_id:
original_unique_wiki_slug = u'{0}.{1}.{2}'.format(
original_location.org,
original_location.course,
original_location.name
)
if module.wiki_slug == original_unique_wiki_slug or module.wiki_slug == original_location.course:
module.wiki_slug = u'{0}.{1}.{2}'.format(
target_location_namespace.org,
target_location_namespace.course,
target_location_namespace.name,
)
module.save()
all_fields = module.get_explicitly_set_fields_by_scope(Scope.content)
all_fields.update(module.get_explicitly_set_fields_by_scope(Scope.settings))
if hasattr(module, 'children'):
all_fields['children'] = module.children
def convert_ref(reference):
"""
Convert a reference to the new namespace, but only
if the original namespace matched the original course.
Otherwise, returns the input value.
"""
new_ref = reference
ref = Location(reference)
in_original_namespace = (original_location.tag == ref.tag and
original_location.org == ref.org and
original_location.course == ref.course)
if in_original_namespace:
new_ref = ref.replace(
tag=target_location_namespace.tag,
org=target_location_namespace.org,
course=target_location_namespace.course
).url()
return new_ref
for field_name in all_fields:
field_object = module.fields.get(field_name)
if isinstance(field_object, Reference):
new_ref = convert_ref(getattr(module, field_name))
setattr(module, field_name, new_ref)
module.save()
elif isinstance(field_object, ReferenceList):
references = getattr(module, field_name)
new_references = [convert_ref(reference) for reference in references]
setattr(module, field_name, new_references)
module.save()
elif isinstance(field_object, ReferenceValueDict):
reference_dict = getattr(module, field_name)
new_reference_dict = {
key: convert_ref(reference)
for key, reference
in reference_dict.items()
}
setattr(module, field_name, new_reference_dict)
module.save()
return module
def allowed_metadata_by_category(category):
# should this be in the descriptors?!?
return {
'vertical': [],
'chapter': ['start'],
'sequential': ['due', 'format', 'start', 'graded']
}.get(category, ['*'])
def check_module_metadata_editability(module):
'''
Assert that there is no metadata within a particular module that
we can't support editing. However we always allow 'display_name'
and 'xml_attributes'
'''
allowed = allowed_metadata_by_category(module.location.category)
if '*' in allowed:
# everything is allowed
return 0
allowed = allowed + ['xml_attributes', 'display_name']
err_cnt = 0
illegal_keys = set(own_metadata(module).keys()) - set(allowed)
if len(illegal_keys) > 0:
err_cnt = err_cnt + 1
print(
": found non-editable metadata on {url}. "
"These metadata keys are not supported = {keys}".format(
url=module.location.url(), keys=illegal_keys
)
)
return err_cnt
def validate_no_non_editable_metadata(module_store, course_id, category):
err_cnt = 0
for module_loc in module_store.modules[course_id]:
module = module_store.modules[course_id][module_loc]
if module.location.category == category:
err_cnt = err_cnt + check_module_metadata_editability(module)
return err_cnt
def validate_category_hierarchy(
module_store, course_id, parent_category, expected_child_category):
err_cnt = 0
parents = []
# get all modules of parent_category
for module in module_store.modules[course_id].itervalues():
if module.location.category == parent_category:
parents.append(module)
for parent in parents:
for child_loc in [Location(child) for child in parent.children]:
if child_loc.category != expected_child_category:
err_cnt += 1
print(
"ERROR: child {child} of parent {parent} was expected to be "
"category of {expected} but was {actual}".format(
child=child_loc, parent=parent.location,
expected=expected_child_category,
actual=child_loc.category
)
)
return err_cnt
def validate_data_source_path_existence(path, is_err=True, extra_msg=None):
_cnt = 0
if not os.path.exists(path):
print(
"{type}: Expected folder at {path}. {extra}".format(
type='ERROR' if is_err else 'WARNING',
path=path,
extra=extra_msg or "",
)
)
_cnt = 1
return _cnt
def validate_data_source_paths(data_dir, course_dir):
# check that there is a '/static/' directory
course_path = data_dir / course_dir
err_cnt = 0
warn_cnt = 0
err_cnt += validate_data_source_path_existence(course_path / 'static')
warn_cnt += validate_data_source_path_existence(
course_path / 'static/subs', is_err=False,
extra_msg='Video captions (if they are used) will not work unless they are static/subs.'
)
return err_cnt, warn_cnt
def validate_course_policy(module_store, course_id):
"""
Validate that the course explicitly sets values for any fields
whose defaults may have changed between the export and the import.
Does not add to error count as these are just warnings.
"""
# is there a reliable way to get the module location just given the course_id?
warn_cnt = 0
for module in module_store.modules[course_id].itervalues():
if module.location.category == 'course':
if not module._field_data.has(module, 'rerandomize'):
warn_cnt += 1
print(
'WARN: course policy does not specify value for '
'"rerandomize" whose default is now "never". '
'The behavior of your course may change.'
)
if not module._field_data.has(module, 'showanswer'):
warn_cnt += 1
print(
'WARN: course policy does not specify value for '
'"showanswer" whose default is now "finished". '
'The behavior of your course may change.'
)
return warn_cnt
def perform_xlint(
data_dir, course_dirs,
default_class='xmodule.raw_module.RawDescriptor',
load_error_modules=True):
err_cnt = 0
warn_cnt = 0
module_store = XMLModuleStore(
data_dir,
default_class=default_class,
course_dirs=course_dirs,
load_error_modules=load_error_modules
)
# check all data source path information
for course_dir in course_dirs:
_err_cnt, _warn_cnt = validate_data_source_paths(path(data_dir), course_dir)
err_cnt += _err_cnt
warn_cnt += _warn_cnt
# first count all errors and warnings as part of the XMLModuleStore import
for err_log in module_store._location_errors.itervalues():
for err_log_entry in err_log.errors:
msg = err_log_entry[0]
if msg.startswith('ERROR:'):
err_cnt += 1
else:
warn_cnt += 1
# then count outright all courses that failed to load at all
for err_log in module_store.errored_courses.itervalues():
for err_log_entry in err_log.errors:
msg = err_log_entry[0]
print(msg)
if msg.startswith('ERROR:'):
err_cnt += 1
else:
warn_cnt += 1
for course_id in module_store.modules.keys():
# constrain that courses only have 'chapter' children
err_cnt += validate_category_hierarchy(
module_store, course_id, "course", "chapter"
)
# constrain that chapters only have 'sequentials'
err_cnt += validate_category_hierarchy(
module_store, course_id, "chapter", "sequential"
)
# constrain that sequentials only have 'verticals'
err_cnt += validate_category_hierarchy(
module_store, course_id, "sequential", "vertical"
)
# validate the course policy overrides any defaults
# which have changed over time
warn_cnt += validate_course_policy(module_store, course_id)
# don't allow metadata on verticals, since we can't edit them in studio
err_cnt += validate_no_non_editable_metadata(
module_store, course_id, "vertical"
)
# don't allow metadata on chapters, since we can't edit them in studio
err_cnt += validate_no_non_editable_metadata(
module_store, course_id, "chapter"
)
# don't allow metadata on sequences that we can't edit
err_cnt += validate_no_non_editable_metadata(
module_store, course_id, "sequential"
)
# check for a presence of a course marketing video
location_elements = Location.parse_course_id(course_id)
location_elements['tag'] = 'i4x'
location_elements['category'] = 'about'
location_elements['name'] = 'video'
loc = Location(location_elements)
if loc not in module_store.modules[course_id]:
print(
"WARN: Missing course marketing video. It is recommended "
"that every course have a marketing video."
)
warn_cnt += 1
print("\n")
print("------------------------------------------")
print("VALIDATION SUMMARY: {err} Errors {warn} Warnings".format(
err=err_cnt, warn=warn_cnt)
)
if err_cnt > 0:
print(
"This course is not suitable for importing. Please fix courseware "
"according to specifications before importing."
)
elif warn_cnt > 0:
print(
"This course can be imported, but some errors may occur "
"during the run of the course. It is recommend that you fix "
"your courseware before importing"
)
else:
print("This course can be imported successfully.")
return err_cnt
def _update_module_location(module, new_location):
"""
Update a module's location.
If the module is a pure XBlock (not an XModule), then its field data
keys will need to be updated to include the new location.
Args:
module (XModuleMixin): The module to update.
new_location (Location): The new location of the module.
Returns:
None
"""
# Retrieve the content and settings fields that have been explicitly set
# to ensure that they are properly re-keyed in the XBlock field data.
if isinstance(module, XModuleDescriptor):
rekey_fields = []
else:
rekey_fields = (
module.get_explicitly_set_fields_by_scope(Scope.content).keys() +
module.get_explicitly_set_fields_by_scope(Scope.settings).keys()
)
module.location = new_location
# Pure XBlocks store the field data in a key-value store
# in which one component of the key is the XBlock's location (equivalent to "scope_ids").
# Since we've changed the XBlock's location, we need to re-save
# all the XBlock's fields so they will be stored using the new location in the key.
# However, since XBlocks only save "dirty" fields, we need to first
# explicitly set each field to its current value before triggering the save.
if len(rekey_fields) > 0:
for rekey_field_name in rekey_fields:
setattr(module, rekey_field_name, getattr(module, rekey_field_name))
module.save()
| agpl-3.0 |
DavidNorman/tensorflow | tensorflow/lite/python/convert_saved_model.py | 17 | 7669 | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functions to convert SavedModel to frozen GraphDefs."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.lite.python import util
from tensorflow.core.framework import types_pb2
from tensorflow.python.client import session
from tensorflow.python.framework import ops
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.saved_model import constants
from tensorflow.python.saved_model import loader
def _log_tensor_details(tensor_info):
"""Log tensor details: name, shape, and type."""
for key in tensor_info:
val = tensor_info[key]
dtype = types_pb2.DataType.Name(val.dtype)
if val.tensor_shape.unknown_rank:
shape = "unknown_rank"
else:
dims = [str(dim.size) for dim in val.tensor_shape.dim]
shape = "({})".format(", ".join(dims))
logging.info("Tensor's key in saved_model's tensor_map: %s", key)
logging.info(" tensor name: %s, shape: %s, type: %s", val.name, shape,
dtype)
def get_meta_graph_def(saved_model_dir, tag_set):
"""Validate saved_model and extract MetaGraphDef.
Args:
saved_model_dir: saved_model path to convert.
tag_set: Set of tag(s) of the MetaGraphDef to load.
Returns:
The meta_graph_def used for tflite conversion.
Raises:
ValueError: No valid MetaGraphDef for given tag_set.
"""
with session.Session(graph=ops.Graph()) as sess:
return loader.load(sess, tag_set, saved_model_dir)
def get_signature_def(meta_graph, signature_key):
"""Get the signature def from meta_graph with given signature_key.
Args:
meta_graph: meta_graph_def.
signature_key: signature_def in the meta_graph_def.
Returns:
The signature_def used for tflite conversion.
Raises:
ValueError: Given signature_key is not valid for this meta_graph.
"""
signature_def_map = meta_graph.signature_def
signature_def_keys = set(signature_def_map.keys())
logging.info(
"The given SavedModel MetaGraphDef contains SignatureDefs with the "
"following keys: %s", signature_def_keys)
if signature_key not in signature_def_keys:
raise ValueError("No '{}' in the SavedModel\'s SignatureDefs. Possible "
"values are '{}'.".format(signature_key,
",".join(signature_def_keys)))
return signature_def_map[signature_key]
def get_inputs_outputs(signature_def):
"""Get inputs and outputs from SignatureDef.
Args:
signature_def: SignatureDef in the meta_graph_def for conversion.
Returns:
The inputs and outputs in the graph for conversion.
"""
inputs_tensor_info = signature_def.inputs
outputs_tensor_info = signature_def.outputs
logging.info("input tensors info: ")
_log_tensor_details(inputs_tensor_info)
logging.info("output tensors info: ")
_log_tensor_details(outputs_tensor_info)
def gather_names(tensor_info):
return [tensor_info[key].name for key in tensor_info]
inputs = gather_names(inputs_tensor_info)
outputs = gather_names(outputs_tensor_info)
return inputs, outputs
def _get_tensors(graph, signature_def_tensor_names=None,
user_tensor_names=None):
"""Gets the tensors associated with the tensor names.
Either signature_def_tensor_names or user_tensor_names should be provided. If
the user provides tensors, the tensors associated with the user provided
tensor names are provided. Otherwise, the tensors associated with the names in
the SignatureDef are provided.
Args:
graph: GraphDef representing graph.
signature_def_tensor_names: Tensor names stored in either the inputs or
outputs of a SignatureDef. (default None)
user_tensor_names: Tensor names provided by the user. (default None)
Returns:
List of tensors.
Raises:
ValueError:
signature_def_tensors and user_tensor_names are undefined or empty.
user_tensor_names are not valid.
"""
tensors = []
if user_tensor_names:
# Sort the tensor names.
user_tensor_names = sorted(user_tensor_names)
tensors = util.get_tensors_from_tensor_names(graph, user_tensor_names)
elif signature_def_tensor_names:
tensors = [
graph.get_tensor_by_name(name)
for name in sorted(signature_def_tensor_names)
]
else:
# Throw ValueError if signature_def_tensors and user_tensor_names are both
# either undefined or empty.
raise ValueError(
"Specify either signature_def_tensor_names or user_tensor_names")
return tensors
def freeze_saved_model(saved_model_dir, input_arrays, input_shapes,
output_arrays, tag_set, signature_key):
"""Converts a SavedModel to a frozen graph.
Args:
saved_model_dir: SavedModel directory to convert.
input_arrays: List of input tensors to freeze graph with. Uses input arrays
from SignatureDef when none are provided.
input_shapes: Dict of strings representing input tensor names to list of
integers representing input shapes (e.g., {"foo": : [1, 16, 16, 3]}).
Automatically determined when input shapes is None (e.g., {"foo" : None}).
output_arrays: List of output tensors to freeze graph with. Uses output
arrays from SignatureDef when none are provided.
tag_set: Set of tags identifying the MetaGraphDef within the SavedModel to
analyze. All tags in the tag set must be present.
signature_key: Key identifying SignatureDef containing inputs and outputs.
Returns:
frozen_graph_def: Frozen GraphDef.
in_tensors: List of input tensors for the graph.
out_tensors: List of output tensors for the graph.
graph: `Graph` object.
Raises:
ValueError:
SavedModel doesn't contain a MetaGraphDef identified by tag_set.
signature_key is not in the MetaGraphDef.
assets/ directory is in the MetaGraphDef.
input_shapes does not match the length of input_arrays.
input_arrays or output_arrays are not valid.
"""
# Read SignatureDef.
meta_graph = get_meta_graph_def(saved_model_dir, tag_set)
signature_def = get_signature_def(meta_graph, signature_key)
inputs, outputs = get_inputs_outputs(signature_def)
# Check SavedModel for assets directory.
collection_def = meta_graph.collection_def
if constants.ASSETS_KEY in collection_def:
raise ValueError("SavedModels with assets/ directory are not supported.")
graph = ops.Graph()
with session.Session(graph=graph) as sess:
loader.load(sess, meta_graph.meta_info_def.tags, saved_model_dir)
# Gets input and output tensors.
# TODO(zhixianyan): Use TFLite supported Op list to filter outputs.
in_tensors = _get_tensors(graph, inputs, input_arrays)
out_tensors = _get_tensors(graph, outputs, output_arrays)
util.set_tensor_shapes(in_tensors, input_shapes)
frozen_graph_def = util.freeze_graph(sess, in_tensors, out_tensors)
return frozen_graph_def, in_tensors, out_tensors, sess.graph
| apache-2.0 |
AtonLerin/pymel | pymel/core/rendering.py | 8 | 4108 | """Functions related to rendering"""
import pymel.util as _util
import pymel.internal.factories as _factories
import general as _general
import language as _language
import pymel.internal.pmcmds as cmds
def shadingNode(*args, **kwargs):
res = cmds.shadingNode(*args, **kwargs)
if res is not None:
return _general.PyNode(res)
def createSurfaceShader(shadertype, name=None):
"""
create a shader and shading group
"""
classification = _general.getClassification(shadertype)
# print classification
newShader = None
import nodetypes
# if 'shader/surface' in classification:
if 'rendernode/mentalray/material' in classification:
newShader = nodetypes.DependNode(_language.mel.mrCreateCustomNode("-asShader", "", shadertype))
else:
newShader = nodetypes.DependNode(_language.mel.renderCreateNode("-asShader", "surfaceShader", shadertype, "", 0, 0, 0, 1, 0, ""))
# else:
# raise TypeError, "%s is not a valid surface shader type. shader must be classified as 'shader/surface'" % shadertype
sg = newShader.shadingGroups()[0]
if name:
newShader = newShader.rename(name)
sg = sg.rename(name + 'SG')
return newShader, sg
def lsThroughFilter(*args, **kwargs):
"""
Modifications:
- returns an empty list when the result is None
- returns wrapped classes
"""
return map(_general.PyNode, _util.listForNone(cmds.lsThroughFilter(*args, **kwargs)))
def pointLight(*args, **kwargs):
"""
Maya Bug Fix:
- name flag was ignored
"""
if kwargs.get('query', kwargs.get('q', False)) or kwargs.get('edit', kwargs.get('e', False)):
return cmds.pointLight(*args, **kwargs)
else:
name = kwargs.pop('name', kwargs.pop('n', False))
if name:
tmp = cmds.pointLight(*args, **kwargs)
tmp = cmds.rename(cmds.listRelatives(tmp, parent=1)[0], name)
return _general.PyNode(cmds.listRelatives(tmp, shapes=1)[0])
return _general.PyNode(cmds.pointLight(*args, **kwargs))
def spotLight(*args, **kwargs):
"""
Maya Bug Fix:
- name flag was ignored
"""
if kwargs.get('query', kwargs.get('q', False)) or kwargs.get('edit', kwargs.get('e', False)):
return cmds.spotLight(*args, **kwargs)
else:
name = kwargs.pop('name', kwargs.pop('n', False))
if name:
tmp = cmds.spotLight(*args, **kwargs)
tmp = cmds.rename(cmds.listRelatives(tmp, parent=1)[0], name)
return _general.PyNode(cmds.listRelatives(tmp, shapes=1)[0])
return _general.PyNode(cmds.spotLight(*args, **kwargs))
def directionalLight(*args, **kwargs):
"""
Maya Bug Fix:
- name flag was ignored
"""
if kwargs.get('query', kwargs.get('q', False)) or kwargs.get('edit', kwargs.get('e', False)):
return cmds.directionalLight(*args, **kwargs)
else:
name = kwargs.pop('name', kwargs.pop('n', False))
if name:
tmp = cmds.directionalLight(*args, **kwargs)
tmp = cmds.rename(cmds.listRelatives(tmp, parent=1)[0], name)
return _general.PyNode(cmds.listRelatives(tmp, shapes=1)[0])
return _general.PyNode(cmds.directionalLight(*args, **kwargs))
def ambientLight(*args, **kwargs):
"""
Maya Bug Fix:
- name flag was ignored
"""
if kwargs.get('query', kwargs.get('q', False)) or kwargs.get('edit', kwargs.get('e', False)):
return cmds.ambientLight(*args, **kwargs)
else:
name = kwargs.pop('name', kwargs.pop('n', False))
if name:
tmp = cmds.ambientLight(*args, **kwargs)
tmp = cmds.rename(cmds.listRelatives(tmp, parent=1)[0], name)
return _general.PyNode(cmds.listRelatives(tmp, shapes=1)[0])
return _general.PyNode(cmds.ambientLight(*args, **kwargs))
# def createRenderLayer(*args, **kwargs):
# return _general.PyNode( cmds.createRenderLayer(*args, **kwargs) )
#
# def createDisplayLayer(*args, **kwargs):
# return _general.PyNode( cmds.createDisplayLayer(*args, **kwargs) )
_factories.createFunctions(__name__, _general.PyNode)
| bsd-3-clause |
byakuinss/spark | python/pyspark/mllib/stat/_statistics.py | 66 | 13383 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import sys
if sys.version >= '3':
basestring = str
from pyspark.rdd import RDD, ignore_unicode_prefix
from pyspark.mllib.common import callMLlibFunc, JavaModelWrapper
from pyspark.mllib.linalg import Matrix, _convert_to_vector
from pyspark.mllib.regression import LabeledPoint
from pyspark.mllib.stat.test import ChiSqTestResult, KolmogorovSmirnovTestResult
__all__ = ['MultivariateStatisticalSummary', 'Statistics']
class MultivariateStatisticalSummary(JavaModelWrapper):
"""
Trait for multivariate statistical summary of a data matrix.
"""
def mean(self):
return self.call("mean").toArray()
def variance(self):
return self.call("variance").toArray()
def count(self):
return int(self.call("count"))
def numNonzeros(self):
return self.call("numNonzeros").toArray()
def max(self):
return self.call("max").toArray()
def min(self):
return self.call("min").toArray()
def normL1(self):
return self.call("normL1").toArray()
def normL2(self):
return self.call("normL2").toArray()
class Statistics(object):
@staticmethod
def colStats(rdd):
"""
Computes column-wise summary statistics for the input RDD[Vector].
:param rdd: an RDD[Vector] for which column-wise summary statistics
are to be computed.
:return: :class:`MultivariateStatisticalSummary` object containing
column-wise summary statistics.
>>> from pyspark.mllib.linalg import Vectors
>>> rdd = sc.parallelize([Vectors.dense([2, 0, 0, -2]),
... Vectors.dense([4, 5, 0, 3]),
... Vectors.dense([6, 7, 0, 8])])
>>> cStats = Statistics.colStats(rdd)
>>> cStats.mean()
array([ 4., 4., 0., 3.])
>>> cStats.variance()
array([ 4., 13., 0., 25.])
>>> cStats.count()
3
>>> cStats.numNonzeros()
array([ 3., 2., 0., 3.])
>>> cStats.max()
array([ 6., 7., 0., 8.])
>>> cStats.min()
array([ 2., 0., 0., -2.])
"""
cStats = callMLlibFunc("colStats", rdd.map(_convert_to_vector))
return MultivariateStatisticalSummary(cStats)
@staticmethod
def corr(x, y=None, method=None):
"""
Compute the correlation (matrix) for the input RDD(s) using the
specified method.
Methods currently supported: I{pearson (default), spearman}.
If a single RDD of Vectors is passed in, a correlation matrix
comparing the columns in the input RDD is returned. Use C{method=}
to specify the method to be used for single RDD inout.
If two RDDs of floats are passed in, a single float is returned.
:param x: an RDD of vector for which the correlation matrix is to be computed,
or an RDD of float of the same cardinality as y when y is specified.
:param y: an RDD of float of the same cardinality as x.
:param method: String specifying the method to use for computing correlation.
Supported: `pearson` (default), `spearman`
:return: Correlation matrix comparing columns in x.
>>> x = sc.parallelize([1.0, 0.0, -2.0], 2)
>>> y = sc.parallelize([4.0, 5.0, 3.0], 2)
>>> zeros = sc.parallelize([0.0, 0.0, 0.0], 2)
>>> abs(Statistics.corr(x, y) - 0.6546537) < 1e-7
True
>>> Statistics.corr(x, y) == Statistics.corr(x, y, "pearson")
True
>>> Statistics.corr(x, y, "spearman")
0.5
>>> from math import isnan
>>> isnan(Statistics.corr(x, zeros))
True
>>> from pyspark.mllib.linalg import Vectors
>>> rdd = sc.parallelize([Vectors.dense([1, 0, 0, -2]), Vectors.dense([4, 5, 0, 3]),
... Vectors.dense([6, 7, 0, 8]), Vectors.dense([9, 0, 0, 1])])
>>> pearsonCorr = Statistics.corr(rdd)
>>> print(str(pearsonCorr).replace('nan', 'NaN'))
[[ 1. 0.05564149 NaN 0.40047142]
[ 0.05564149 1. NaN 0.91359586]
[ NaN NaN 1. NaN]
[ 0.40047142 0.91359586 NaN 1. ]]
>>> spearmanCorr = Statistics.corr(rdd, method="spearman")
>>> print(str(spearmanCorr).replace('nan', 'NaN'))
[[ 1. 0.10540926 NaN 0.4 ]
[ 0.10540926 1. NaN 0.9486833 ]
[ NaN NaN 1. NaN]
[ 0.4 0.9486833 NaN 1. ]]
>>> try:
... Statistics.corr(rdd, "spearman")
... print("Method name as second argument without 'method=' shouldn't be allowed.")
... except TypeError:
... pass
"""
# Check inputs to determine whether a single value or a matrix is needed for output.
# Since it's legal for users to use the method name as the second argument, we need to
# check if y is used to specify the method name instead.
if type(y) == str:
raise TypeError("Use 'method=' to specify method name.")
if not y:
return callMLlibFunc("corr", x.map(_convert_to_vector), method).toArray()
else:
return callMLlibFunc("corr", x.map(float), y.map(float), method)
@staticmethod
@ignore_unicode_prefix
def chiSqTest(observed, expected=None):
"""
If `observed` is Vector, conduct Pearson's chi-squared goodness
of fit test of the observed data against the expected distribution,
or againt the uniform distribution (by default), with each category
having an expected frequency of `1 / len(observed)`.
If `observed` is matrix, conduct Pearson's independence test on the
input contingency matrix, which cannot contain negative entries or
columns or rows that sum up to 0.
If `observed` is an RDD of LabeledPoint, conduct Pearson's independence
test for every feature against the label across the input RDD.
For each feature, the (feature, label) pairs are converted into a
contingency matrix for which the chi-squared statistic is computed.
All label and feature values must be categorical.
.. note:: `observed` cannot contain negative values
:param observed: it could be a vector containing the observed categorical
counts/relative frequencies, or the contingency matrix
(containing either counts or relative frequencies),
or an RDD of LabeledPoint containing the labeled dataset
with categorical features. Real-valued features will be
treated as categorical for each distinct value.
:param expected: Vector containing the expected categorical counts/relative
frequencies. `expected` is rescaled if the `expected` sum
differs from the `observed` sum.
:return: ChiSquaredTest object containing the test statistic, degrees
of freedom, p-value, the method used, and the null hypothesis.
>>> from pyspark.mllib.linalg import Vectors, Matrices
>>> observed = Vectors.dense([4, 6, 5])
>>> pearson = Statistics.chiSqTest(observed)
>>> print(pearson.statistic)
0.4
>>> pearson.degreesOfFreedom
2
>>> print(round(pearson.pValue, 4))
0.8187
>>> pearson.method
u'pearson'
>>> pearson.nullHypothesis
u'observed follows the same distribution as expected.'
>>> observed = Vectors.dense([21, 38, 43, 80])
>>> expected = Vectors.dense([3, 5, 7, 20])
>>> pearson = Statistics.chiSqTest(observed, expected)
>>> print(round(pearson.pValue, 4))
0.0027
>>> data = [40.0, 24.0, 29.0, 56.0, 32.0, 42.0, 31.0, 10.0, 0.0, 30.0, 15.0, 12.0]
>>> chi = Statistics.chiSqTest(Matrices.dense(3, 4, data))
>>> print(round(chi.statistic, 4))
21.9958
>>> data = [LabeledPoint(0.0, Vectors.dense([0.5, 10.0])),
... LabeledPoint(0.0, Vectors.dense([1.5, 20.0])),
... LabeledPoint(1.0, Vectors.dense([1.5, 30.0])),
... LabeledPoint(0.0, Vectors.dense([3.5, 30.0])),
... LabeledPoint(0.0, Vectors.dense([3.5, 40.0])),
... LabeledPoint(1.0, Vectors.dense([3.5, 40.0])),]
>>> rdd = sc.parallelize(data, 4)
>>> chi = Statistics.chiSqTest(rdd)
>>> print(chi[0].statistic)
0.75
>>> print(chi[1].statistic)
1.5
"""
if isinstance(observed, RDD):
if not isinstance(observed.first(), LabeledPoint):
raise ValueError("observed should be an RDD of LabeledPoint")
jmodels = callMLlibFunc("chiSqTest", observed)
return [ChiSqTestResult(m) for m in jmodels]
if isinstance(observed, Matrix):
jmodel = callMLlibFunc("chiSqTest", observed)
else:
if expected and len(expected) != len(observed):
raise ValueError("`expected` should have same length with `observed`")
jmodel = callMLlibFunc("chiSqTest", _convert_to_vector(observed), expected)
return ChiSqTestResult(jmodel)
@staticmethod
@ignore_unicode_prefix
def kolmogorovSmirnovTest(data, distName="norm", *params):
"""
Performs the Kolmogorov-Smirnov (KS) test for data sampled from
a continuous distribution. It tests the null hypothesis that
the data is generated from a particular distribution.
The given data is sorted and the Empirical Cumulative
Distribution Function (ECDF) is calculated
which for a given point is the number of points having a CDF
value lesser than it divided by the total number of points.
Since the data is sorted, this is a step function
that rises by (1 / length of data) for every ordered point.
The KS statistic gives us the maximum distance between the
ECDF and the CDF. Intuitively if this statistic is large, the
probabilty that the null hypothesis is true becomes small.
For specific details of the implementation, please have a look
at the Scala documentation.
:param data: RDD, samples from the data
:param distName: string, currently only "norm" is supported.
(Normal distribution) to calculate the
theoretical distribution of the data.
:param params: additional values which need to be provided for
a certain distribution.
If not provided, the default values are used.
:return: KolmogorovSmirnovTestResult object containing the test
statistic, degrees of freedom, p-value,
the method used, and the null hypothesis.
>>> kstest = Statistics.kolmogorovSmirnovTest
>>> data = sc.parallelize([-1.0, 0.0, 1.0])
>>> ksmodel = kstest(data, "norm")
>>> print(round(ksmodel.pValue, 3))
1.0
>>> print(round(ksmodel.statistic, 3))
0.175
>>> ksmodel.nullHypothesis
u'Sample follows theoretical distribution'
>>> data = sc.parallelize([2.0, 3.0, 4.0])
>>> ksmodel = kstest(data, "norm", 3.0, 1.0)
>>> print(round(ksmodel.pValue, 3))
1.0
>>> print(round(ksmodel.statistic, 3))
0.175
"""
if not isinstance(data, RDD):
raise TypeError("data should be an RDD, got %s." % type(data))
if not isinstance(distName, basestring):
raise TypeError("distName should be a string, got %s." % type(distName))
params = [float(param) for param in params]
return KolmogorovSmirnovTestResult(
callMLlibFunc("kolmogorovSmirnovTest", data, distName, params))
def _test():
import doctest
from pyspark.sql import SparkSession
globs = globals().copy()
spark = SparkSession.builder\
.master("local[4]")\
.appName("mllib.stat.statistics tests")\
.getOrCreate()
globs['sc'] = spark.sparkContext
(failure_count, test_count) = doctest.testmod(globs=globs, optionflags=doctest.ELLIPSIS)
spark.stop()
if failure_count:
exit(-1)
if __name__ == "__main__":
_test()
| apache-2.0 |
atmchrispark/linux-at91 | tools/perf/tests/attr.py | 3174 | 9441 | #! /usr/bin/python
import os
import sys
import glob
import optparse
import tempfile
import logging
import shutil
import ConfigParser
class Fail(Exception):
def __init__(self, test, msg):
self.msg = msg
self.test = test
def getMsg(self):
return '\'%s\' - %s' % (self.test.path, self.msg)
class Unsup(Exception):
def __init__(self, test):
self.test = test
def getMsg(self):
return '\'%s\'' % self.test.path
class Event(dict):
terms = [
'cpu',
'flags',
'type',
'size',
'config',
'sample_period',
'sample_type',
'read_format',
'disabled',
'inherit',
'pinned',
'exclusive',
'exclude_user',
'exclude_kernel',
'exclude_hv',
'exclude_idle',
'mmap',
'comm',
'freq',
'inherit_stat',
'enable_on_exec',
'task',
'watermark',
'precise_ip',
'mmap_data',
'sample_id_all',
'exclude_host',
'exclude_guest',
'exclude_callchain_kernel',
'exclude_callchain_user',
'wakeup_events',
'bp_type',
'config1',
'config2',
'branch_sample_type',
'sample_regs_user',
'sample_stack_user',
]
def add(self, data):
for key, val in data:
log.debug(" %s = %s" % (key, val))
self[key] = val
def __init__(self, name, data, base):
log.debug(" Event %s" % name);
self.name = name;
self.group = ''
self.add(base)
self.add(data)
def compare_data(self, a, b):
# Allow multiple values in assignment separated by '|'
a_list = a.split('|')
b_list = b.split('|')
for a_item in a_list:
for b_item in b_list:
if (a_item == b_item):
return True
elif (a_item == '*') or (b_item == '*'):
return True
return False
def equal(self, other):
for t in Event.terms:
log.debug(" [%s] %s %s" % (t, self[t], other[t]));
if not self.has_key(t) or not other.has_key(t):
return False
if not self.compare_data(self[t], other[t]):
return False
return True
def diff(self, other):
for t in Event.terms:
if not self.has_key(t) or not other.has_key(t):
continue
if not self.compare_data(self[t], other[t]):
log.warning("expected %s=%s, got %s" % (t, self[t], other[t]))
# Test file description needs to have following sections:
# [config]
# - just single instance in file
# - needs to specify:
# 'command' - perf command name
# 'args' - special command arguments
# 'ret' - expected command return value (0 by default)
#
# [eventX:base]
# - one or multiple instances in file
# - expected values assignments
class Test(object):
def __init__(self, path, options):
parser = ConfigParser.SafeConfigParser()
parser.read(path)
log.warning("running '%s'" % path)
self.path = path
self.test_dir = options.test_dir
self.perf = options.perf
self.command = parser.get('config', 'command')
self.args = parser.get('config', 'args')
try:
self.ret = parser.get('config', 'ret')
except:
self.ret = 0
self.expect = {}
self.result = {}
log.debug(" loading expected events");
self.load_events(path, self.expect)
def is_event(self, name):
if name.find("event") == -1:
return False
else:
return True
def load_events(self, path, events):
parser_event = ConfigParser.SafeConfigParser()
parser_event.read(path)
# The event record section header contains 'event' word,
# optionaly followed by ':' allowing to load 'parent
# event' first as a base
for section in filter(self.is_event, parser_event.sections()):
parser_items = parser_event.items(section);
base_items = {}
# Read parent event if there's any
if (':' in section):
base = section[section.index(':') + 1:]
parser_base = ConfigParser.SafeConfigParser()
parser_base.read(self.test_dir + '/' + base)
base_items = parser_base.items('event')
e = Event(section, parser_items, base_items)
events[section] = e
def run_cmd(self, tempdir):
cmd = "PERF_TEST_ATTR=%s %s %s -o %s/perf.data %s" % (tempdir,
self.perf, self.command, tempdir, self.args)
ret = os.WEXITSTATUS(os.system(cmd))
log.info(" '%s' ret %d " % (cmd, ret))
if ret != int(self.ret):
raise Unsup(self)
def compare(self, expect, result):
match = {}
log.debug(" compare");
# For each expected event find all matching
# events in result. Fail if there's not any.
for exp_name, exp_event in expect.items():
exp_list = []
log.debug(" matching [%s]" % exp_name)
for res_name, res_event in result.items():
log.debug(" to [%s]" % res_name)
if (exp_event.equal(res_event)):
exp_list.append(res_name)
log.debug(" ->OK")
else:
log.debug(" ->FAIL");
log.debug(" match: [%s] matches %s" % (exp_name, str(exp_list)))
# we did not any matching event - fail
if (not exp_list):
exp_event.diff(res_event)
raise Fail(self, 'match failure');
match[exp_name] = exp_list
# For each defined group in the expected events
# check we match the same group in the result.
for exp_name, exp_event in expect.items():
group = exp_event.group
if (group == ''):
continue
for res_name in match[exp_name]:
res_group = result[res_name].group
if res_group not in match[group]:
raise Fail(self, 'group failure')
log.debug(" group: [%s] matches group leader %s" %
(exp_name, str(match[group])))
log.debug(" matched")
def resolve_groups(self, events):
for name, event in events.items():
group_fd = event['group_fd'];
if group_fd == '-1':
continue;
for iname, ievent in events.items():
if (ievent['fd'] == group_fd):
event.group = iname
log.debug('[%s] has group leader [%s]' % (name, iname))
break;
def run(self):
tempdir = tempfile.mkdtemp();
try:
# run the test script
self.run_cmd(tempdir);
# load events expectation for the test
log.debug(" loading result events");
for f in glob.glob(tempdir + '/event*'):
self.load_events(f, self.result);
# resolve group_fd to event names
self.resolve_groups(self.expect);
self.resolve_groups(self.result);
# do the expectation - results matching - both ways
self.compare(self.expect, self.result)
self.compare(self.result, self.expect)
finally:
# cleanup
shutil.rmtree(tempdir)
def run_tests(options):
for f in glob.glob(options.test_dir + '/' + options.test):
try:
Test(f, options).run()
except Unsup, obj:
log.warning("unsupp %s" % obj.getMsg())
def setup_log(verbose):
global log
level = logging.CRITICAL
if verbose == 1:
level = logging.WARNING
if verbose == 2:
level = logging.INFO
if verbose >= 3:
level = logging.DEBUG
log = logging.getLogger('test')
log.setLevel(level)
ch = logging.StreamHandler()
ch.setLevel(level)
formatter = logging.Formatter('%(message)s')
ch.setFormatter(formatter)
log.addHandler(ch)
USAGE = '''%s [OPTIONS]
-d dir # tests dir
-p path # perf binary
-t test # single test
-v # verbose level
''' % sys.argv[0]
def main():
parser = optparse.OptionParser(usage=USAGE)
parser.add_option("-t", "--test",
action="store", type="string", dest="test")
parser.add_option("-d", "--test-dir",
action="store", type="string", dest="test_dir")
parser.add_option("-p", "--perf",
action="store", type="string", dest="perf")
parser.add_option("-v", "--verbose",
action="count", dest="verbose")
options, args = parser.parse_args()
if args:
parser.error('FAILED wrong arguments %s' % ' '.join(args))
return -1
setup_log(options.verbose)
if not options.test_dir:
print 'FAILED no -d option specified'
sys.exit(-1)
if not options.test:
options.test = 'test*'
try:
run_tests(options)
except Fail, obj:
print "FAILED %s" % obj.getMsg();
sys.exit(-1)
sys.exit(0)
if __name__ == '__main__':
main()
| gpl-2.0 |
openvapour/ryu | ryu/app/ofctl_rest.py | 10 | 26827 | # Copyright (C) 2012 Nippon Telegraph and Telephone Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import json
import ast
from webob import Response
from ryu.base import app_manager
from ryu.controller import ofp_event
from ryu.controller import dpset
from ryu.controller.handler import MAIN_DISPATCHER
from ryu.controller.handler import set_ev_cls
from ryu.ofproto import ofproto_v1_0
from ryu.ofproto import ofproto_v1_2
from ryu.ofproto import ofproto_v1_3
from ryu.lib import ofctl_v1_0
from ryu.lib import ofctl_v1_2
from ryu.lib import ofctl_v1_3
from ryu.app.wsgi import ControllerBase, WSGIApplication
LOG = logging.getLogger('ryu.app.ofctl_rest')
# supported ofctl versions in this restful app
supported_ofctl = {
ofproto_v1_0.OFP_VERSION: ofctl_v1_0,
ofproto_v1_2.OFP_VERSION: ofctl_v1_2,
ofproto_v1_3.OFP_VERSION: ofctl_v1_3,
}
# REST API
#
# Retrieve the switch stats
#
# get the list of all switches
# GET /stats/switches
#
# get the desc stats of the switch
# GET /stats/desc/<dpid>
#
# get flows stats of the switch
# GET /stats/flow/<dpid>
#
# get flows stats of the switch filtered by the fields
# POST /stats/flow/<dpid>
#
# get aggregate flows stats of the switch
# GET /stats/aggregateflow/<dpid>
#
# get aggregate flows stats of the switch filtered by the fields
# POST /stats/aggregateflow/<dpid>
#
# get ports stats of the switch
# GET /stats/port/<dpid>
#
# get queues stats of the switch
# GET /stats/queue/<dpid>
#
# get meter features stats of the switch
# GET /stats/meterfeatures/<dpid>
#
# get meter config stats of the switch
# GET /stats/meterconfig/<dpid>
#
# get meters stats of the switch
# GET /stats/meter/<dpid>
#
# get group features stats of the switch
# GET /stats/groupfeatures/<dpid>
#
# get groups desc stats of the switch
# GET /stats/groupdesc/<dpid>
#
# get groups stats of the switch
# GET /stats/group/<dpid>
#
# get ports description of the switch
# GET /stats/portdesc/<dpid>
# Update the switch stats
#
# add a flow entry
# POST /stats/flowentry/add
#
# modify all matching flow entries
# POST /stats/flowentry/modify
#
# modify flow entry strictly matching wildcards and priority
# POST /stats/flowentry/modify_strict
#
# delete all matching flow entries
# POST /stats/flowentry/delete
#
# delete flow entry strictly matching wildcards and priority
# POST /stats/flowentry/delete_strict
#
# delete all flow entries of the switch
# DELETE /stats/flowentry/clear/<dpid>
#
# add a meter entry
# POST /stats/meterentry/add
#
# modify a meter entry
# POST /stats/meterentry/modify
#
# delete a meter entry
# POST /stats/meterentry/delete
#
# add a group entry
# POST /stats/groupentry/add
#
# modify a group entry
# POST /stats/groupentry/modify
#
# delete a group entry
# POST /stats/groupentry/delete
#
# modify behavior of the physical port
# POST /stats/portdesc/modify
#
#
# send a experimeter message
# POST /stats/experimenter/<dpid>
class StatsController(ControllerBase):
def __init__(self, req, link, data, **config):
super(StatsController, self).__init__(req, link, data, **config)
self.dpset = data['dpset']
self.waiters = data['waiters']
def get_dpids(self, req, **_kwargs):
dps = self.dpset.dps.keys()
body = json.dumps(dps)
return Response(content_type='application/json', body=body)
def get_desc_stats(self, req, dpid, **_kwargs):
if type(dpid) == str and not dpid.isdigit():
LOG.debug('invalid dpid %s', dpid)
return Response(status=400)
dp = self.dpset.get(int(dpid))
if dp is None:
return Response(status=404)
_ofp_version = dp.ofproto.OFP_VERSION
_ofctl = supported_ofctl.get(_ofp_version, None)
if _ofctl is not None:
desc = _ofctl.get_desc_stats(dp, self.waiters)
else:
LOG.debug('Unsupported OF protocol')
return Response(status=501)
body = json.dumps(desc)
return Response(content_type='application/json', body=body)
def get_flow_stats(self, req, dpid, **_kwargs):
if req.body == '':
flow = {}
else:
try:
flow = ast.literal_eval(req.body)
except SyntaxError:
LOG.debug('invalid syntax %s', req.body)
return Response(status=400)
if type(dpid) == str and not dpid.isdigit():
LOG.debug('invalid dpid %s', dpid)
return Response(status=400)
dp = self.dpset.get(int(dpid))
if dp is None:
return Response(status=404)
_ofp_version = dp.ofproto.OFP_VERSION
_ofctl = supported_ofctl.get(_ofp_version, None)
if _ofctl is not None:
flows = _ofctl.get_flow_stats(dp, self.waiters, flow)
else:
LOG.debug('Unsupported OF protocol')
return Response(status=501)
body = json.dumps(flows)
return Response(content_type='application/json', body=body)
def get_aggregate_flow_stats(self, req, dpid, **_kwargs):
if req.body == '':
flow = {}
else:
try:
flow = ast.literal_eval(req.body)
except SyntaxError:
LOG.debug('invalid syntax %s', req.body)
return Response(status=400)
if type(dpid) == str and not dpid.isdigit():
LOG.debug('invalid dpid %s', dpid)
return Response(status=400)
dp = self.dpset.get(int(dpid))
if dp is None:
return Response(status=404)
_ofp_version = dp.ofproto.OFP_VERSION
_ofctl = supported_ofctl.get(_ofp_version, None)
if _ofctl is not None:
flows = _ofctl.get_aggregate_flow_stats(dp, self.waiters, flow)
else:
LOG.debug('Unsupported OF protocol')
return Response(status=501)
body = json.dumps(flows)
return Response(content_type='application/json', body=body)
def get_port_stats(self, req, dpid, **_kwargs):
if type(dpid) == str and not dpid.isdigit():
LOG.debug('invalid dpid %s', dpid)
return Response(status=400)
dp = self.dpset.get(int(dpid))
if dp is None:
return Response(status=404)
_ofp_version = dp.ofproto.OFP_VERSION
_ofctl = supported_ofctl.get(_ofp_version, None)
if _ofctl is not None:
ports = _ofctl.get_port_stats(dp, self.waiters)
else:
LOG.debug('Unsupported OF protocol')
return Response(status=501)
body = json.dumps(ports)
return Response(content_type='application/json', body=body)
def get_queue_stats(self, req, dpid, **_kwargs):
if type(dpid) == str and not dpid.isdigit():
LOG.debug('invalid dpid %s', dpid)
return Response(status=400)
dp = self.dpset.get(int(dpid))
if dp is None:
return Response(status=404)
_ofp_version = dp.ofproto.OFP_VERSION
_ofctl = supported_ofctl.get(_ofp_version, None)
if _ofctl is not None:
queues = _ofctl.get_queue_stats(dp, self.waiters)
else:
LOG.debug('Unsupported OF protocol')
return Response(status=501)
body = json.dumps(queues)
return Response(content_type='application/json', body=body)
def get_meter_features(self, req, dpid, **_kwargs):
if type(dpid) == str and not dpid.isdigit():
LOG.debug('invalid dpid %s', dpid)
return Response(status=400)
dp = self.dpset.get(int(dpid))
if dp is None:
return Response(status=404)
_ofp_version = dp.ofproto.OFP_VERSION
_ofctl = supported_ofctl.get(_ofp_version, None)
if _ofctl is not None and hasattr(_ofctl, 'get_meter_features'):
meters = _ofctl.get_meter_features(dp, self.waiters)
else:
LOG.debug('Unsupported OF protocol or \
request not supported in this OF protocol version')
return Response(status=501)
body = json.dumps(meters)
return Response(content_type='application/json', body=body)
def get_meter_config(self, req, dpid, **_kwargs):
if type(dpid) == str and not dpid.isdigit():
LOG.debug('invalid dpid %s', dpid)
return Response(status=400)
dp = self.dpset.get(int(dpid))
if dp is None:
return Response(status=404)
_ofp_version = dp.ofproto.OFP_VERSION
_ofctl = supported_ofctl.get(_ofp_version, None)
if _ofctl is not None and hasattr(_ofctl, 'get_meter_config'):
meters = _ofctl.get_meter_config(dp, self.waiters)
else:
LOG.debug('Unsupported OF protocol or \
request not supported in this OF protocol version')
return Response(status=501)
body = json.dumps(meters)
return Response(content_type='application/json', body=body)
def get_meter_stats(self, req, dpid, **_kwargs):
if type(dpid) == str and not dpid.isdigit():
LOG.debug('invalid dpid %s', dpid)
return Response(status=400)
dp = self.dpset.get(int(dpid))
if dp is None:
return Response(status=404)
_ofp_version = dp.ofproto.OFP_VERSION
_ofctl = supported_ofctl.get(_ofp_version, None)
if _ofctl is not None and hasattr(_ofctl, 'get_meter_stats'):
meters = _ofctl.get_meter_stats(dp, self.waiters)
else:
LOG.debug('Unsupported OF protocol or \
request not supported in this OF protocol version')
return Response(status=501)
body = json.dumps(meters)
return Response(content_type='application/json', body=body)
def get_group_features(self, req, dpid, **_kwargs):
if type(dpid) == str and not dpid.isdigit():
LOG.debug('invalid dpid %s', dpid)
return Response(status=400)
dp = self.dpset.get(int(dpid))
if dp is None:
return Response(status=404)
_ofp_version = dp.ofproto.OFP_VERSION
_ofctl = supported_ofctl.get(_ofp_version, None)
if _ofctl is not None and hasattr(_ofctl, 'get_group_features'):
groups = _ofctl.get_group_features(dp, self.waiters)
else:
LOG.debug('Unsupported OF protocol or \
request not supported in this OF protocol version')
return Response(status=501)
body = json.dumps(groups)
return Response(content_type='application/json', body=body)
def get_group_desc(self, req, dpid, **_kwargs):
if type(dpid) == str and not dpid.isdigit():
LOG.debug('invalid dpid %s', dpid)
return Response(status=400)
dp = self.dpset.get(int(dpid))
if dp is None:
return Response(status=404)
_ofp_version = dp.ofproto.OFP_VERSION
_ofctl = supported_ofctl.get(_ofp_version, None)
if _ofctl is not None and hasattr(_ofctl, 'get_group_desc'):
groups = _ofctl.get_group_desc(dp, self.waiters)
else:
LOG.debug('Unsupported OF protocol or \
request not supported in this OF protocol version')
return Response(status=501)
body = json.dumps(groups)
return Response(content_type='application/json', body=body)
def get_group_stats(self, req, dpid, **_kwargs):
if type(dpid) == str and not dpid.isdigit():
LOG.debug('invalid dpid %s', dpid)
return Response(status=400)
dp = self.dpset.get(int(dpid))
if dp is None:
return Response(status=404)
_ofp_version = dp.ofproto.OFP_VERSION
_ofctl = supported_ofctl.get(_ofp_version, None)
if _ofctl is not None and hasattr(_ofctl, 'get_group_stats'):
groups = _ofctl.get_group_stats(dp, self.waiters)
else:
LOG.debug('Unsupported OF protocol or \
request not supported in this OF protocol version')
return Response(status=501)
body = json.dumps(groups)
return Response(content_type='application/json', body=body)
def get_port_desc(self, req, dpid, **_kwargs):
if type(dpid) == str and not dpid.isdigit():
LOG.debug('invalid dpid %s', dpid)
return Response(status=400)
dp = self.dpset.get(int(dpid))
if dp is None:
return Response(status=404)
_ofp_version = dp.ofproto.OFP_VERSION
_ofctl = supported_ofctl.get(_ofp_version, None)
if _ofctl is not None:
groups = _ofctl.get_port_desc(dp, self.waiters)
else:
LOG.debug('Unsupported OF protocol')
return Response(status=501)
body = json.dumps(groups)
return Response(content_type='application/json', body=body)
def mod_flow_entry(self, req, cmd, **_kwargs):
try:
flow = ast.literal_eval(req.body)
except SyntaxError:
LOG.debug('invalid syntax %s', req.body)
return Response(status=400)
dpid = flow.get('dpid')
if type(dpid) == str and not dpid.isdigit():
LOG.debug('invalid dpid %s', dpid)
return Response(status=400)
dp = self.dpset.get(int(dpid))
if dp is None:
return Response(status=404)
if cmd == 'add':
cmd = dp.ofproto.OFPFC_ADD
elif cmd == 'modify':
cmd = dp.ofproto.OFPFC_MODIFY
elif cmd == 'modify_strict':
cmd = dp.ofproto.OFPFC_MODIFY_STRICT
elif cmd == 'delete':
cmd = dp.ofproto.OFPFC_DELETE
elif cmd == 'delete_strict':
cmd = dp.ofproto.OFPFC_DELETE_STRICT
else:
return Response(status=404)
_ofp_version = dp.ofproto.OFP_VERSION
_ofctl = supported_ofctl.get(_ofp_version, None)
if _ofctl is not None:
_ofctl.mod_flow_entry(dp, flow, cmd)
else:
LOG.debug('Unsupported OF protocol')
return Response(status=501)
return Response(status=200)
def delete_flow_entry(self, req, dpid, **_kwargs):
if type(dpid) == str and not dpid.isdigit():
LOG.debug('invalid dpid %s', dpid)
return Response(status=400)
dp = self.dpset.get(int(dpid))
if dp is None:
return Response(status=404)
flow = {'table_id': dp.ofproto.OFPTT_ALL}
_ofp_version = dp.ofproto.OFP_VERSION
_ofctl = supported_ofctl.get(_ofp_version, None)
if _ofctl is not None:
_ofctl.mod_flow_entry(dp, flow, dp.ofproto.OFPFC_DELETE)
else:
LOG.debug('Unsupported OF protocol')
return Response(status=501)
return Response(status=200)
def mod_meter_entry(self, req, cmd, **_kwargs):
try:
flow = ast.literal_eval(req.body)
except SyntaxError:
LOG.debug('invalid syntax %s', req.body)
return Response(status=400)
dpid = flow.get('dpid')
if type(dpid) == str and not dpid.isdigit():
LOG.debug('invalid dpid %s', dpid)
return Response(status=400)
dp = self.dpset.get(int(dpid))
if dp is None:
return Response(status=404)
if cmd == 'add':
cmd = dp.ofproto.OFPMC_ADD
elif cmd == 'modify':
cmd = dp.ofproto.OFPMC_MODIFY
elif cmd == 'delete':
cmd = dp.ofproto.OFPMC_DELETE
else:
return Response(status=404)
_ofp_version = dp.ofproto.OFP_VERSION
_ofctl = supported_ofctl.get(_ofp_version, None)
if _ofctl is not None and hasattr(_ofctl, 'mod_meter_entry'):
_ofctl.mod_meter_entry(dp, flow, cmd)
else:
LOG.debug('Unsupported OF protocol or \
request not supported in this OF protocol version')
return Response(status=501)
return Response(status=200)
def mod_group_entry(self, req, cmd, **_kwargs):
try:
group = ast.literal_eval(req.body)
except SyntaxError:
LOG.debug('invalid syntax %s', req.body)
return Response(status=400)
dpid = group.get('dpid')
if type(dpid) == str and not dpid.isdigit():
LOG.debug('invalid dpid %s', dpid)
return Response(status=400)
dp = self.dpset.get(int(dpid))
if dp is None:
return Response(status=404)
if cmd == 'add':
cmd = dp.ofproto.OFPGC_ADD
elif cmd == 'modify':
cmd = dp.ofproto.OFPGC_MODIFY
elif cmd == 'delete':
cmd = dp.ofproto.OFPGC_DELETE
else:
return Response(status=404)
_ofp_version = dp.ofproto.OFP_VERSION
_ofctl = supported_ofctl.get(_ofp_version, None)
if _ofctl is not None and hasattr(_ofctl, 'mod_group_entry'):
_ofctl.mod_group_entry(dp, group, cmd)
else:
LOG.debug('Unsupported OF protocol or \
request not supported in this OF protocol version')
return Response(status=501)
return Response(status=200)
def mod_port_behavior(self, req, cmd, **_kwargs):
try:
port_config = ast.literal_eval(req.body)
except SyntaxError:
LOG.debug('invalid syntax %s', req.body)
return Response(status=400)
dpid = port_config.get('dpid')
if type(dpid) == str and not dpid.isdigit():
LOG.debug('invalid dpid %s', dpid)
return Response(status=400)
port_no = port_config.get('port_no', 0)
if type(port_no) == str and not port_no.isdigit():
LOG.debug('invalid port_no %s', port_no)
return Response(status=400)
port_info = self.dpset.port_state[int(dpid)].get(port_no)
if port_info:
port_config.setdefault('hw_addr', port_info.hw_addr)
port_config.setdefault('advertise', port_info.advertised)
else:
return Response(status=404)
dp = self.dpset.get(int(dpid))
if dp is None:
return Response(status=404)
if cmd != 'modify':
return Response(status=404)
_ofp_version = dp.ofproto.OFP_VERSION
_ofctl = supported_ofctl.get(_ofp_version, None)
if _ofctl is not None:
_ofctl.mod_port_behavior(dp, port_config)
else:
LOG.debug('Unsupported OF protocol')
return Response(status=501)
return Response(status=200)
def send_experimenter(self, req, dpid, **_kwargs):
if type(dpid) == str and not dpid.isdigit():
LOG.debug('invalid dpid %s', dpid)
return Response(status=400)
dp = self.dpset.get(int(dpid))
if dp is None:
return Response(status=404)
try:
exp = ast.literal_eval(req.body)
except SyntaxError:
LOG.debug('invalid syntax %s', req.body)
return Response(status=400)
_ofp_version = dp.ofproto.OFP_VERSION
_ofctl = supported_ofctl.get(_ofp_version, None)
if _ofctl is not None and hasattr(_ofctl, 'send_experimenter'):
_ofctl.send_experimenter(dp, exp)
else:
LOG.debug('Unsupported OF protocol')
return Response(status=501)
return Response(status=200)
class RestStatsApi(app_manager.RyuApp):
OFP_VERSIONS = [ofproto_v1_0.OFP_VERSION,
ofproto_v1_2.OFP_VERSION,
ofproto_v1_3.OFP_VERSION]
_CONTEXTS = {
'dpset': dpset.DPSet,
'wsgi': WSGIApplication
}
def __init__(self, *args, **kwargs):
super(RestStatsApi, self).__init__(*args, **kwargs)
self.dpset = kwargs['dpset']
wsgi = kwargs['wsgi']
self.waiters = {}
self.data = {}
self.data['dpset'] = self.dpset
self.data['waiters'] = self.waiters
mapper = wsgi.mapper
wsgi.registory['StatsController'] = self.data
path = '/stats'
uri = path + '/switches'
mapper.connect('stats', uri,
controller=StatsController, action='get_dpids',
conditions=dict(method=['GET']))
uri = path + '/desc/{dpid}'
mapper.connect('stats', uri,
controller=StatsController, action='get_desc_stats',
conditions=dict(method=['GET']))
uri = path + '/flow/{dpid}'
mapper.connect('stats', uri,
controller=StatsController, action='get_flow_stats',
conditions=dict(method=['GET', 'POST']))
uri = path + '/aggregateflow/{dpid}'
mapper.connect('stats', uri,
controller=StatsController,
action='get_aggregate_flow_stats',
conditions=dict(method=['GET', 'POST']))
uri = path + '/port/{dpid}'
mapper.connect('stats', uri,
controller=StatsController, action='get_port_stats',
conditions=dict(method=['GET']))
uri = path + '/queue/{dpid}'
mapper.connect('stats', uri,
controller=StatsController, action='get_queue_stats',
conditions=dict(method=['GET']))
uri = path + '/meterfeatures/{dpid}'
mapper.connect('stats', uri,
controller=StatsController, action='get_meter_features',
conditions=dict(method=['GET']))
uri = path + '/meterconfig/{dpid}'
mapper.connect('stats', uri,
controller=StatsController, action='get_meter_config',
conditions=dict(method=['GET']))
uri = path + '/meter/{dpid}'
mapper.connect('stats', uri,
controller=StatsController, action='get_meter_stats',
conditions=dict(method=['GET']))
uri = path + '/groupfeatures/{dpid}'
mapper.connect('stats', uri,
controller=StatsController, action='get_group_features',
conditions=dict(method=['GET']))
uri = path + '/groupdesc/{dpid}'
mapper.connect('stats', uri,
controller=StatsController, action='get_group_desc',
conditions=dict(method=['GET']))
uri = path + '/group/{dpid}'
mapper.connect('stats', uri,
controller=StatsController, action='get_group_stats',
conditions=dict(method=['GET']))
uri = path + '/portdesc/{dpid}'
mapper.connect('stats', uri,
controller=StatsController, action='get_port_desc',
conditions=dict(method=['GET']))
uri = path + '/flowentry/{cmd}'
mapper.connect('stats', uri,
controller=StatsController, action='mod_flow_entry',
conditions=dict(method=['POST']))
uri = path + '/flowentry/clear/{dpid}'
mapper.connect('stats', uri,
controller=StatsController, action='delete_flow_entry',
conditions=dict(method=['DELETE']))
uri = path + '/meterentry/{cmd}'
mapper.connect('stats', uri,
controller=StatsController, action='mod_meter_entry',
conditions=dict(method=['POST']))
uri = path + '/groupentry/{cmd}'
mapper.connect('stats', uri,
controller=StatsController, action='mod_group_entry',
conditions=dict(method=['POST']))
uri = path + '/portdesc/{cmd}'
mapper.connect('stats', uri,
controller=StatsController, action='mod_port_behavior',
conditions=dict(method=['POST']))
uri = path + '/experimenter/{dpid}'
mapper.connect('stats', uri,
controller=StatsController, action='send_experimenter',
conditions=dict(method=['POST']))
@set_ev_cls([ofp_event.EventOFPStatsReply,
ofp_event.EventOFPDescStatsReply,
ofp_event.EventOFPFlowStatsReply,
ofp_event.EventOFPAggregateStatsReply,
ofp_event.EventOFPPortStatsReply,
ofp_event.EventOFPQueueStatsReply,
ofp_event.EventOFPMeterStatsReply,
ofp_event.EventOFPMeterFeaturesStatsReply,
ofp_event.EventOFPMeterConfigStatsReply,
ofp_event.EventOFPGroupStatsReply,
ofp_event.EventOFPGroupFeaturesStatsReply,
ofp_event.EventOFPGroupDescStatsReply,
ofp_event.EventOFPPortDescStatsReply
], MAIN_DISPATCHER)
def stats_reply_handler(self, ev):
msg = ev.msg
dp = msg.datapath
if dp.id not in self.waiters:
return
if msg.xid not in self.waiters[dp.id]:
return
lock, msgs = self.waiters[dp.id][msg.xid]
msgs.append(msg)
flags = 0
if dp.ofproto.OFP_VERSION == ofproto_v1_0.OFP_VERSION:
flags = dp.ofproto.OFPSF_REPLY_MORE
elif dp.ofproto.OFP_VERSION == ofproto_v1_2.OFP_VERSION:
flags = dp.ofproto.OFPSF_REPLY_MORE
elif dp.ofproto.OFP_VERSION == ofproto_v1_3.OFP_VERSION:
flags = dp.ofproto.OFPMPF_REPLY_MORE
if msg.flags & flags:
return
del self.waiters[dp.id][msg.xid]
lock.set()
@set_ev_cls([ofp_event.EventOFPSwitchFeatures], MAIN_DISPATCHER)
def features_reply_handler(self, ev):
msg = ev.msg
dp = msg.datapath
if dp.id not in self.waiters:
return
if msg.xid not in self.waiters[dp.id]:
return
lock, msgs = self.waiters[dp.id][msg.xid]
msgs.append(msg)
del self.waiters[dp.id][msg.xid]
lock.set()
| apache-2.0 |
hmgaudecker/econ-project-templates | docs/bld/example/python/python_example/.mywaflib/waflib/extras/pytest.py | 53 | 8289 | #! /usr/bin/env python
# encoding: utf-8
# Calle Rosenquist, 2016-2018 (xbreak)
"""
Provides Python unit test support using :py:class:`waflib.Tools.waf_unit_test.utest`
task via the **pytest** feature.
To use pytest the following is needed:
1. Load `pytest` and the dependency `waf_unit_test` tools.
2. Create a task generator with feature `pytest` (not `test`) and customize behaviour with
the following attributes:
- `pytest_source`: Test input files.
- `ut_str`: Test runner command, e.g. ``${PYTHON} -B -m unittest discover`` or
if nose is used: ``${NOSETESTS} --no-byte-compile ${SRC}``.
- `ut_shell`: Determines if ``ut_str`` is executed in a shell. Default: False.
- `ut_cwd`: Working directory for test runner. Defaults to directory of
first ``pytest_source`` file.
Additionally the following `pytest` specific attributes are used in dependent taskgens:
- `pytest_path`: Node or string list of additional Python paths.
- `pytest_libpath`: Node or string list of additional library paths.
The `use` dependencies are used for both update calculation and to populate
the following environment variables for the `pytest` test runner:
1. `PYTHONPATH` (`sys.path`) of any dependent taskgen that has the feature `py`:
- `install_from` attribute is used to determine where the root of the Python sources
are located. If `install_from` is not specified the default is to use the taskgen path
as the root.
- `pytest_path` attribute is used to manually specify additional Python paths.
2. Dynamic linker search path variable (e.g. `LD_LIBRARY_PATH`) of any dependent taskgen with
non-static link_task.
- `pytest_libpath` attribute is used to manually specify additional linker paths.
Note: `pytest` cannot automatically determine the correct `PYTHONPATH` for `pyext` taskgens
because the extension might be part of a Python package or used standalone:
- When used as part of another `py` package, the `PYTHONPATH` is provided by
that taskgen so no additional action is required.
- When used as a standalone module, the user needs to specify the `PYTHONPATH` explicitly
via the `pytest_path` attribute on the `pyext` taskgen.
For details c.f. the pytest playground examples.
For example::
# A standalone Python C extension that demonstrates unit test environment population
# of PYTHONPATH and LD_LIBRARY_PATH/PATH/DYLD_LIBRARY_PATH.
#
# Note: `pytest_path` is provided here because pytest cannot automatically determine
# if the extension is part of another Python package or is used standalone.
bld(name = 'foo_ext',
features = 'c cshlib pyext',
source = 'src/foo_ext.c',
target = 'foo_ext',
pytest_path = [ bld.path.get_bld() ])
# Python package under test that also depend on the Python module `foo_ext`
#
# Note: `install_from` is added automatically to `PYTHONPATH`.
bld(name = 'foo',
features = 'py',
use = 'foo_ext',
source = bld.path.ant_glob('src/foo/*.py'),
install_from = 'src')
# Unit test example using the built in module unittest and let that discover
# any test cases.
bld(name = 'foo_test',
features = 'pytest',
use = 'foo',
pytest_source = bld.path.ant_glob('test/*.py'),
ut_str = '${PYTHON} -B -m unittest discover')
"""
import os
from waflib import Task, TaskGen, Errors, Utils, Logs
from waflib.Tools import ccroot
def _process_use_rec(self, name):
"""
Recursively process ``use`` for task generator with name ``name``..
Used by pytest_process_use.
"""
if name in self.pytest_use_not or name in self.pytest_use_seen:
return
try:
tg = self.bld.get_tgen_by_name(name)
except Errors.WafError:
self.pytest_use_not.add(name)
return
self.pytest_use_seen.append(name)
tg.post()
for n in self.to_list(getattr(tg, 'use', [])):
_process_use_rec(self, n)
@TaskGen.feature('pytest')
@TaskGen.after_method('process_source', 'apply_link')
def pytest_process_use(self):
"""
Process the ``use`` attribute which contains a list of task generator names and store
paths that later is used to populate the unit test runtime environment.
"""
self.pytest_use_not = set()
self.pytest_use_seen = []
self.pytest_paths = [] # strings or Nodes
self.pytest_libpaths = [] # strings or Nodes
self.pytest_dep_nodes = []
names = self.to_list(getattr(self, 'use', []))
for name in names:
_process_use_rec(self, name)
def extend_unique(lst, varlst):
ext = []
for x in varlst:
if x not in lst:
ext.append(x)
lst.extend(ext)
# Collect type specific info needed to construct a valid runtime environment
# for the test.
for name in self.pytest_use_seen:
tg = self.bld.get_tgen_by_name(name)
extend_unique(self.pytest_paths, Utils.to_list(getattr(tg, 'pytest_path', [])))
extend_unique(self.pytest_libpaths, Utils.to_list(getattr(tg, 'pytest_libpath', [])))
if 'py' in tg.features:
# Python dependencies are added to PYTHONPATH
pypath = getattr(tg, 'install_from', tg.path)
if 'buildcopy' in tg.features:
# Since buildcopy is used we assume that PYTHONPATH in build should be used,
# not source
extend_unique(self.pytest_paths, [pypath.get_bld().abspath()])
# Add buildcopy output nodes to dependencies
extend_unique(self.pytest_dep_nodes, [o for task in getattr(tg, 'tasks', []) \
for o in getattr(task, 'outputs', [])])
else:
# If buildcopy is not used, depend on sources instead
extend_unique(self.pytest_dep_nodes, tg.source)
extend_unique(self.pytest_paths, [pypath.abspath()])
if getattr(tg, 'link_task', None):
# For tasks with a link_task (C, C++, D et.c.) include their library paths:
if not isinstance(tg.link_task, ccroot.stlink_task):
extend_unique(self.pytest_dep_nodes, tg.link_task.outputs)
extend_unique(self.pytest_libpaths, tg.link_task.env.LIBPATH)
if 'pyext' in tg.features:
# If the taskgen is extending Python we also want to add the interpreter libpath.
extend_unique(self.pytest_libpaths, tg.link_task.env.LIBPATH_PYEXT)
else:
# Only add to libpath if the link task is not a Python extension
extend_unique(self.pytest_libpaths, [tg.link_task.outputs[0].parent.abspath()])
@TaskGen.feature('pytest')
@TaskGen.after_method('pytest_process_use')
def make_pytest(self):
"""
Creates a ``utest`` task with a populated environment for Python if not specified in ``ut_env``:
- Paths in `pytest_paths` attribute are used to populate PYTHONPATH
- Paths in `pytest_libpaths` attribute are used to populate the system library path (e.g. LD_LIBRARY_PATH)
"""
nodes = self.to_nodes(self.pytest_source)
tsk = self.create_task('utest', nodes)
tsk.dep_nodes.extend(self.pytest_dep_nodes)
if getattr(self, 'ut_str', None):
self.ut_run, lst = Task.compile_fun(self.ut_str, shell=getattr(self, 'ut_shell', False))
tsk.vars = lst + tsk.vars
if getattr(self, 'ut_cwd', None):
if isinstance(self.ut_cwd, str):
# we want a Node instance
if os.path.isabs(self.ut_cwd):
self.ut_cwd = self.bld.root.make_node(self.ut_cwd)
else:
self.ut_cwd = self.path.make_node(self.ut_cwd)
else:
if tsk.inputs:
self.ut_cwd = tsk.inputs[0].parent
else:
raise Errors.WafError("no valid input files for pytest task, check pytest_source value")
if not self.ut_cwd.exists():
self.ut_cwd.mkdir()
if not hasattr(self, 'ut_env'):
self.ut_env = dict(os.environ)
def add_paths(var, lst):
# Add list of paths to a variable, lst can contain strings or nodes
lst = [ str(n) for n in lst ]
Logs.debug("ut: %s: Adding paths %s=%s", self, var, lst)
self.ut_env[var] = os.pathsep.join(lst) + os.pathsep + self.ut_env.get(var, '')
# Prepend dependency paths to PYTHONPATH and LD_LIBRARY_PATH
add_paths('PYTHONPATH', self.pytest_paths)
if Utils.is_win32:
add_paths('PATH', self.pytest_libpaths)
elif Utils.unversioned_sys_platform() == 'darwin':
add_paths('DYLD_LIBRARY_PATH', self.pytest_libpaths)
add_paths('LD_LIBRARY_PATH', self.pytest_libpaths)
else:
add_paths('LD_LIBRARY_PATH', self.pytest_libpaths)
| bsd-3-clause |
again4you/retext | ReText/tablemode.py | 3 | 5647 | # This file is part of ReText
# Copyright: 2014 Maurice van der Pot
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import sys
from ReText import DOCTYPE_MARKDOWN, DOCTYPE_REST
from PyQt5.QtGui import QTextCursor
LARGER_THAN_ANYTHING = sys.maxsize
class Row:
def __init__(self, block=None, text=None, separatorline=False, paddingchar=' '):
self.block = block
self.text = text
self.separatorline = separatorline
self.paddingchar = paddingchar
def __repr__(self):
return "<Row '%s' %s '%s'>" % (self.text, self.separatorline, self.paddingchar)
def _getTableLines(doc, pos, docType):
startblock = doc.findBlock(pos)
editedlineindex = 0
offset = pos - startblock.position()
rows = [ Row(block = startblock,
text = startblock.text()) ]
block = startblock.previous()
while any(c in block.text() for c in '+|'):
rows.insert(0, Row(block = block,
text = block.text()))
editedlineindex += 1
block = block.previous()
block = startblock.next()
while any(c in block.text() for c in '+|'):
rows.append(Row(block = block,
text = block.text()))
block = block.next()
if docType == DOCTYPE_MARKDOWN:
for i, row in enumerate(rows):
if i == 1:
row.separatorline = True
row.paddingchar = '-'
elif docType == DOCTYPE_REST:
for i, row in enumerate(rows):
if i & 1 == 0: # i is even
row.separatorline = True
row.paddingchar = '=' if (i == 2) else '-'
row.text = row.text.replace('+', '|')
return rows, editedlineindex, offset
def _sortaUndoEdit(rows, editedlineindex, editsize):
aftertext = rows[editedlineindex].text
if editsize < 0:
beforetext = ' ' * -editsize + aftertext
else:
beforetext = aftertext[editsize:]
rows[editedlineindex].text = beforetext
def _determineRoomInCell(row, edge, shrinking, startposition=0):
if edge >= len(row.text) or row.text[edge] != '|':
room = LARGER_THAN_ANYTHING
else:
clearance = 0
cellwidth = 0
afterContent = True
for i in range(edge - 1, startposition - 1, -1):
if row.text[i] == '|':
break
else:
if row.text[i] == row.paddingchar and afterContent:
clearance += 1
else:
afterContent = False
cellwidth += 1
if row.separatorline:
if shrinking:
# do not shrink separator cells below 3
room = max(0, cellwidth - 3)
else:
# start expanding the cell if only the space for a right-align marker is left
room = max(0, cellwidth - 1)
else:
room = clearance
return room
def _performShift(row, rowShift, edge, shift):
editlist = []
if len(row.text) > edge and row.text[edge] == '|' and rowShift != shift:
editsize = -(rowShift - shift)
rowShift = shift
# Insert one position further to the left on separator lines, because
# there may be a space (for esthetical reasons) or an alignment marker
# on the last position before the edge and that should stay next to the
# edge.
if row.separatorline:
edge -= 1
editlist.append((edge, editsize))
return editlist, rowShift
def _determineNextEdge(rows, rowShifts, offset):
nextedge = None
for row, rowShift in zip(rows, rowShifts):
if rowShift != 0:
edge = row.text.find('|', offset)
if edge != -1 and (nextedge == None or edge < nextedge):
nextedge = edge
return nextedge
def _determineEditLists(rows, editedlineindex, offset, editsize):
rowShifts = [0 for _ in rows]
rowShifts[editedlineindex] = editsize
editLists = [[] for _ in rows]
currentedge = _determineNextEdge(rows, rowShifts, offset)
firstEdge = True
while currentedge:
if editsize < 0:
leastLeftShift = min((-rowShift + _determineRoomInCell(row, currentedge, True)
for row, rowShift in zip(rows, rowShifts)))
shift = max(editsize, -leastLeftShift)
else:
if firstEdge:
room = _determineRoomInCell(rows[editedlineindex], currentedge, False, offset)
shift = max(0, editsize - room)
for i, row in enumerate(rows):
editList, newRowShift = _performShift(row, rowShifts[i], currentedge, shift)
rowShifts[i] = newRowShift
editLists[i].extend(editList)
currentedge = _determineNextEdge(rows, rowShifts, currentedge + 1)
firstEdge = False
return editLists
def _performEdits(cursor, rows, editLists, linewithoffset, offset):
cursor.joinPreviousEditBlock()
for i, (row, editList) in enumerate(zip(rows, editLists)):
for editpos, editsize in sorted(editList, reverse=True):
if i == linewithoffset:
editpos += offset
cursor.setPosition(row.block.position() + editpos)
if editsize > 0:
cursor.insertText(editsize * row.paddingchar)
else:
for _ in range(-editsize):
cursor.deletePreviousChar()
cursor.endEditBlock()
def adjustTableToChanges(doc, pos, editsize, docType):
if docType in (DOCTYPE_MARKDOWN, DOCTYPE_REST):
rows, editedlineindex, offset = _getTableLines(doc, pos, docType)
_sortaUndoEdit(rows, editedlineindex, editsize)
editLists = _determineEditLists(rows, editedlineindex, offset, editsize)
cursor = QTextCursor(doc)
_performEdits(cursor, rows, editLists, editedlineindex, editsize)
| gpl-3.0 |
curiosityio/taiga-docker | taiga-back/taiga-back/tests/integration/test_webhooks_tasks.py | 1 | 10425 | # Copyright (C) 2014-2016 Andrey Antukh <[email protected]>
# Copyright (C) 2014-2016 Jesús Espino <[email protected]>
# Copyright (C) 2014-2016 David Barragán <[email protected]>
# Copyright (C) 2014-2016 Alejandro Alonso <[email protected]>
# Copyright (C) 2014-2016 Anler Hernández <[email protected]>
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import pytest
from unittest.mock import patch
from unittest.mock import Mock
from .. import factories as f
from taiga.projects.history import services
pytestmark = pytest.mark.django_db(transaction=True)
from taiga.base.utils import json
def test_webhooks_when_create_task(settings):
settings.WEBHOOKS_ENABLED = True
project = f.ProjectFactory()
f.WebhookFactory.create(project=project)
f.WebhookFactory.create(project=project)
obj = f.TaskFactory.create(project=project)
with patch('taiga.webhooks.tasks._send_request') as send_request_mock:
services.take_snapshot(obj, user=obj.owner)
assert send_request_mock.call_count == 2
(webhook_id, url, key, data) = send_request_mock.call_args[0]
assert data["action"] == "create"
assert data["type"] == "task"
assert data["by"]["id"] == obj.owner.id
assert "date" in data
assert data["data"]["id"] == obj.id
def test_webhooks_when_update_task(settings):
settings.WEBHOOKS_ENABLED = True
project = f.ProjectFactory()
f.WebhookFactory.create(project=project)
f.WebhookFactory.create(project=project)
obj = f.TaskFactory.create(project=project)
with patch('taiga.webhooks.tasks._send_request') as send_request_mock:
services.take_snapshot(obj, user=obj.owner)
assert send_request_mock.call_count == 2
obj.subject = "test webhook update"
obj.save()
with patch('taiga.webhooks.tasks._send_request') as send_request_mock:
services.take_snapshot(obj, user=obj.owner, comment="test_comment")
assert send_request_mock.call_count == 2
(webhook_id, url, key, data) = send_request_mock.call_args[0]
assert data["action"] == "change"
assert data["type"] == "task"
assert data["by"]["id"] == obj.owner.id
assert "date" in data
assert data["data"]["id"] == obj.id
assert data["data"]["subject"] == obj.subject
assert data["change"]["comment"] == "test_comment"
assert data["change"]["diff"]["subject"]["to"] == data["data"]["subject"]
assert data["change"]["diff"]["subject"]["from"] != data["data"]["subject"]
def test_webhooks_when_delete_task(settings):
settings.WEBHOOKS_ENABLED = True
project = f.ProjectFactory()
f.WebhookFactory.create(project=project)
f.WebhookFactory.create(project=project)
obj = f.TaskFactory.create(project=project)
with patch('taiga.webhooks.tasks._send_request') as send_request_mock:
services.take_snapshot(obj, user=obj.owner, delete=True)
assert send_request_mock.call_count == 2
(webhook_id, url, key, data) = send_request_mock.call_args[0]
assert data["action"] == "delete"
assert data["type"] == "task"
assert data["by"]["id"] == obj.owner.id
assert "date" in data
assert "data" in data
def test_webhooks_when_update_task_attachments(settings):
settings.WEBHOOKS_ENABLED = True
project = f.ProjectFactory()
f.WebhookFactory.create(project=project)
f.WebhookFactory.create(project=project)
obj = f.TaskFactory.create(project=project)
with patch('taiga.webhooks.tasks._send_request') as send_request_mock:
services.take_snapshot(obj, user=obj.owner)
assert send_request_mock.call_count == 2
# Create attachments
attachment1 = f.TaskAttachmentFactory(project=obj.project, content_object=obj, owner=obj.owner)
attachment2 = f.TaskAttachmentFactory(project=obj.project, content_object=obj, owner=obj.owner)
with patch('taiga.webhooks.tasks._send_request') as send_request_mock:
services.take_snapshot(obj, user=obj.owner, comment="test_comment")
assert send_request_mock.call_count == 2
(webhook_id, url, key, data) = send_request_mock.call_args[0]
assert data["action"] == "change"
assert data["type"] == "task"
assert data["by"]["id"] == obj.owner.id
assert "date" in data
assert data["data"]["id"] == obj.id
assert data["change"]["comment"] == "test_comment"
assert len(data["change"]["diff"]["attachments"]["new"]) == 2
assert len(data["change"]["diff"]["attachments"]["changed"]) == 0
assert len(data["change"]["diff"]["attachments"]["deleted"]) == 0
# Update attachment
attachment1.description = "new attachment description"
attachment1.save()
with patch('taiga.webhooks.tasks._send_request') as send_request_mock:
services.take_snapshot(obj, user=obj.owner, comment="test_comment")
assert send_request_mock.call_count == 2
(webhook_id, url, key, data) = send_request_mock.call_args[0]
assert data["action"] == "change"
assert data["type"] == "task"
assert data["by"]["id"] == obj.owner.id
assert "date" in data
assert data["data"]["id"] == obj.id
assert data["change"]["comment"] == "test_comment"
assert len(data["change"]["diff"]["attachments"]["new"]) == 0
assert len(data["change"]["diff"]["attachments"]["changed"]) == 1
assert len(data["change"]["diff"]["attachments"]["deleted"]) == 0
# Delete attachment
attachment2.delete()
with patch('taiga.webhooks.tasks._send_request') as send_request_mock:
services.take_snapshot(obj, user=obj.owner, comment="test_comment")
assert send_request_mock.call_count == 2
(webhook_id, url, key, data) = send_request_mock.call_args[0]
assert data["action"] == "change"
assert data["type"] == "task"
assert data["by"]["id"] == obj.owner.id
assert "date" in data
assert data["data"]["id"] == obj.id
assert data["change"]["comment"] == "test_comment"
assert len(data["change"]["diff"]["attachments"]["new"]) == 0
assert len(data["change"]["diff"]["attachments"]["changed"]) == 0
assert len(data["change"]["diff"]["attachments"]["deleted"]) == 1
def test_webhooks_when_update_task_custom_attributes(settings):
settings.WEBHOOKS_ENABLED = True
project = f.ProjectFactory()
f.WebhookFactory.create(project=project)
f.WebhookFactory.create(project=project)
obj = f.TaskFactory.create(project=project)
custom_attr_1 = f.TaskCustomAttributeFactory(project=obj.project)
ct1_id = "{}".format(custom_attr_1.id)
custom_attr_2 = f.TaskCustomAttributeFactory(project=obj.project)
ct2_id = "{}".format(custom_attr_2.id)
with patch('taiga.webhooks.tasks._send_request') as send_request_mock:
services.take_snapshot(obj, user=obj.owner)
assert send_request_mock.call_count == 2
# Create custom attributes
obj.custom_attributes_values.attributes_values = {
ct1_id: "test_1_updated",
ct2_id: "test_2_updated"
}
obj.custom_attributes_values.save()
with patch('taiga.webhooks.tasks._send_request') as send_request_mock:
services.take_snapshot(obj, user=obj.owner, comment="test_comment")
assert send_request_mock.call_count == 2
(webhook_id, url, key, data) = send_request_mock.call_args[0]
assert data["action"] == "change"
assert data["type"] == "task"
assert data["by"]["id"] == obj.owner.id
assert "date" in data
assert data["data"]["id"] == obj.id
assert data["change"]["comment"] == "test_comment"
assert len(data["change"]["diff"]["custom_attributes"]["new"]) == 2
assert len(data["change"]["diff"]["custom_attributes"]["changed"]) == 0
assert len(data["change"]["diff"]["custom_attributes"]["deleted"]) == 0
# Update custom attributes
obj.custom_attributes_values.attributes_values[ct1_id] = "test_2_updated"
obj.custom_attributes_values.save()
with patch('taiga.webhooks.tasks._send_request') as send_request_mock:
services.take_snapshot(obj, user=obj.owner, comment="test_comment")
assert send_request_mock.call_count == 2
(webhook_id, url, key, data) = send_request_mock.call_args[0]
assert data["action"] == "change"
assert data["type"] == "task"
assert data["by"]["id"] == obj.owner.id
assert "date" in data
assert data["data"]["id"] == obj.id
assert data["change"]["comment"] == "test_comment"
assert len(data["change"]["diff"]["custom_attributes"]["new"]) == 0
assert len(data["change"]["diff"]["custom_attributes"]["changed"]) == 1
assert len(data["change"]["diff"]["custom_attributes"]["deleted"]) == 0
# Delete custom attributes
del obj.custom_attributes_values.attributes_values[ct1_id]
obj.custom_attributes_values.save()
with patch('taiga.webhooks.tasks._send_request') as send_request_mock:
services.take_snapshot(obj, user=obj.owner, comment="test_comment")
assert send_request_mock.call_count == 2
(webhook_id, url, key, data) = send_request_mock.call_args[0]
assert data["action"] == "change"
assert data["type"] == "task"
assert data["by"]["id"] == obj.owner.id
assert "date" in data
assert data["data"]["id"] == obj.id
assert data["change"]["comment"] == "test_comment"
assert len(data["change"]["diff"]["custom_attributes"]["new"]) == 0
assert len(data["change"]["diff"]["custom_attributes"]["changed"]) == 0
assert len(data["change"]["diff"]["custom_attributes"]["deleted"]) == 1
| mit |
diverted247/signer | libs/xhtml2pdf/xhtml2pdf/w3c/css.py | 25 | 28101 | #!/usr/bin/env python
##~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
##~ Copyright (C) 2002-2004 TechGame Networks, LLC.
##~
##~ This library is free software; you can redistribute it and/or
##~ modify it under the terms of the BSD style License as found in the
##~ LICENSE file included with this distribution.
##
## Modified by Dirk Holtwick <[email protected]>, 2007-2008
##~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
"""CSS-2.1 engine
Primary classes:
* CSSElementInterfaceAbstract
Provide a concrete implementation for the XML element model used.
* CSSCascadeStrategy
Implements the CSS-2.1 engine's attribute lookup rules.
* CSSParser
Parses CSS source forms into usable results using CSSBuilder and
CSSMutableSelector. You may want to override parseExternal()
* CSSBuilder (and CSSMutableSelector)
A concrete implementation for cssParser.CSSBuilderAbstract (and
cssParser.CSSSelectorAbstract) to provide usable results to
CSSParser requests.
Dependencies:
python 2.3 (or greater)
sets, cssParser, re (via cssParser)
"""
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#~ To replace any for with list comprehension
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def stopIter(value):
raise StopIteration, value
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#~ Imports
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
import copy
try:
set
except NameError:
from sets import Set as set
import cssParser
import cssSpecial
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#~ Constants / Variables / Etc.
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
CSSParseError = cssParser.CSSParseError
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#~ Definitions
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
class CSSElementInterfaceAbstract(object):
def getAttr(self, name, default=NotImplemented):
raise NotImplementedError('Subclass responsibility')
def getIdAttr(self):
return self.getAttr('id', '')
def getClassAttr(self):
return self.getAttr('class', '')
def getInlineStyle(self):
raise NotImplementedError('Subclass responsibility')
def matchesNode(self):
raise NotImplementedError('Subclass responsibility')
def inPseudoState(self, name, params=()):
raise NotImplementedError('Subclass responsibility')
def iterXMLParents(self):
"""Results must be compatible with CSSElementInterfaceAbstract"""
raise NotImplementedError('Subclass responsibility')
def getPreviousSibling(self):
raise NotImplementedError('Subclass responsibility')
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
class CSSCascadeStrategy(object):
author = None
user = None
userAgenr = None
def __init__(self, author=None, user=None, userAgent=None):
if author is not None:
self.author = author
if user is not None:
self.user = user
if userAgent is not None:
self.userAgenr = userAgent
def copyWithUpdate(self, author=None, user=None, userAgent=None):
if author is None:
author = self.author
if user is None:
user = self.user
if userAgent is None:
userAgent = self.userAgenr
return self.__class__(author, user, userAgent)
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def iterCSSRulesets(self, inline=None):
if self.userAgenr is not None:
yield self.userAgenr[0]
yield self.userAgenr[1]
if self.user is not None:
yield self.user[0]
if self.author is not None:
yield self.author[0]
yield self.author[1]
if inline:
yield inline[0]
yield inline[1]
if self.user is not None:
yield self.user[1]
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def findStyleFor(self, element, attrName, default=NotImplemented):
"""Attempts to find the style setting for attrName in the CSSRulesets.
Note: This method does not attempt to resolve rules that return
"inherited", "default", or values that have units (including "%").
This is left up to the client app to re-query the CSS in order to
implement these semantics.
"""
rule = self.findCSSRulesFor(element, attrName)
return self._extractStyleForRule(rule, attrName, default)
def findStylesForEach(self, element, attrNames, default=NotImplemented):
"""Attempts to find the style setting for attrName in the CSSRulesets.
Note: This method does not attempt to resolve rules that return
"inherited", "default", or values that have units (including "%").
This is left up to the client app to re-query the CSS in order to
implement these semantics.
"""
rules = self.findCSSRulesForEach(element, attrNames)
return [(attrName, self._extractStyleForRule(rule, attrName, default))
for attrName, rule in rules.iteritems()]
def findCSSRulesFor(self, element, attrName):
rules = []
inline = element.getInlineStyle()
# Generator are wonderfull but sometime slow...
#for ruleset in self.iterCSSRulesets(inline):
# rules += ruleset.findCSSRuleFor(element, attrName)
if self.userAgenr is not None:
rules += self.userAgenr[0].findCSSRuleFor(element, attrName)
rules += self.userAgenr[1].findCSSRuleFor(element, attrName)
if self.user is not None:
rules += self.user[0].findCSSRuleFor(element, attrName)
if self.author is not None:
rules += self.author[0].findCSSRuleFor(element, attrName)
rules += self.author[1].findCSSRuleFor(element, attrName)
if inline:
rules += inline[0].findCSSRuleFor(element, attrName)
rules += inline[1].findCSSRuleFor(element, attrName)
if self.user is not None:
rules += self.user[1].findCSSRuleFor(element, attrName)
rules.sort()
return rules
def findCSSRulesForEach(self, element, attrNames):
rules = dict((name, []) for name in attrNames)
inline = element.getInlineStyle()
for ruleset in self.iterCSSRulesets(inline):
for attrName, attrRules in rules.iteritems():
attrRules += ruleset.findCSSRuleFor(element, attrName)
for attrRules in rules.itervalues():
attrRules.sort()
return rules
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def _extractStyleForRule(self, rule, attrName, default=NotImplemented):
if rule:
# rule is packed in a list to differentiate from "no rule" vs "rule
# whose value evalutates as False"
style = rule[-1][1]
return style[attrName]
elif default is not NotImplemented:
return default
raise LookupError("Could not find style for '%s' in %r" % (attrName, rule))
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#~ CSS Selectors
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
class CSSSelectorBase(object):
inline = False
_hash = None
_specificity = None
def __init__(self, completeName='*'):
if not isinstance(completeName, tuple):
completeName = (None, '*', completeName)
self.completeName = completeName
def _updateHash(self):
self._hash = hash((self.fullName, self.specificity(), self.qualifiers))
def __hash__(self):
if self._hash is None:
return object.__hash__(self)
return self._hash
def getNSPrefix(self):
return self.completeName[0]
nsPrefix = property(getNSPrefix)
def getName(self):
return self.completeName[2]
name = property(getName)
def getNamespace(self):
return self.completeName[1]
namespace = property(getNamespace)
def getFullName(self):
return self.completeName[1:3]
fullName = property(getFullName)
def __repr__(self):
strArgs = (self.__class__.__name__,) + self.specificity() + (self.asString(),)
return '<%s %d:%d:%d:%d %s >' % strArgs
def __str__(self):
return self.asString()
def __cmp__(self, other):
result = cmp(self.specificity(), other.specificity())
if result != 0:
return result
result = cmp(self.fullName, other.fullName)
if result != 0:
return result
result = cmp(self.qualifiers, other.qualifiers)
return result
def specificity(self):
if self._specificity is None:
self._specificity = self._calcSpecificity()
return self._specificity
def _calcSpecificity(self):
"""from http://www.w3.org/TR/CSS21/cascade.html#specificity"""
hashCount = 0
qualifierCount = 0
elementCount = int(self.name != '*')
for q in self.qualifiers:
if q.isHash():
hashCount += 1
elif q.isClass():
qualifierCount += 1
elif q.isAttr():
qualifierCount += 1
elif q.isPseudo():
elementCount += 1
elif q.isCombiner():
i, h, q, e = q.selector.specificity()
hashCount += h
qualifierCount += q
elementCount += e
return self.inline, hashCount, qualifierCount, elementCount
def matches(self, element=None):
if element is None:
return False
# with CSSDOMElementInterface.matchesNode(self, (namespace, tagName)) replacement:
if self.fullName[1] not in ('*', element.domElement.tagName):
return False
if self.fullName[0] not in (None, '', '*') and self.fullName[0] != element.domElement.namespaceURI:
return False
for qualifier in self.qualifiers:
if not qualifier.matches(element):
return False
else:
return True
def asString(self):
result = []
if self.nsPrefix is not None:
result.append('%s|%s' % (self.nsPrefix, self.name))
else:
result.append(self.name)
for q in self.qualifiers:
if q.isCombiner():
result.insert(0, q.asString())
else:
result.append(q.asString())
return ''.join(result)
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
class CSSInlineSelector(CSSSelectorBase):
inline = True
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
class CSSMutableSelector(CSSSelectorBase, cssParser.CSSSelectorAbstract):
qualifiers = []
def asImmutable(self):
return CSSImmutableSelector(self.completeName, [q.asImmutable() for q in self.qualifiers])
def combineSelectors(klass, selectorA, op, selectorB):
selectorB.addCombination(op, selectorA)
return selectorB
combineSelectors = classmethod(combineSelectors)
def addCombination(self, op, other):
self._addQualifier(CSSSelectorCombinationQualifier(op, other))
def addHashId(self, hashId):
self._addQualifier(CSSSelectorHashQualifier(hashId))
def addClass(self, class_):
self._addQualifier(CSSSelectorClassQualifier(class_))
def addAttribute(self, attrName):
self._addQualifier(CSSSelectorAttributeQualifier(attrName))
def addAttributeOperation(self, attrName, op, attrValue):
self._addQualifier(CSSSelectorAttributeQualifier(attrName, op, attrValue))
def addPseudo(self, name):
self._addQualifier(CSSSelectorPseudoQualifier(name))
def addPseudoFunction(self, name, params):
self._addQualifier(CSSSelectorPseudoQualifier(name, params))
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def _addQualifier(self, qualifier):
if self.qualifiers:
self.qualifiers.append(qualifier)
else:
self.qualifiers = [qualifier]
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
class CSSImmutableSelector(CSSSelectorBase):
def __init__(self, completeName='*', qualifiers=()):
# print completeName, qualifiers
self.qualifiers = tuple(qualifiers)
CSSSelectorBase.__init__(self, completeName)
self._updateHash()
def fromSelector(klass, selector):
return klass(selector.completeName, selector.qualifiers)
fromSelector = classmethod(fromSelector)
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#~ CSS Selector Qualifiers -- see CSSImmutableSelector
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
class CSSSelectorQualifierBase(object):
def isHash(self):
return False
def isClass(self):
return False
def isAttr(self):
return False
def isPseudo(self):
return False
def isCombiner(self):
return False
def asImmutable(self):
return self
def __str__(self):
return self.asString()
class CSSSelectorHashQualifier(CSSSelectorQualifierBase):
def __init__(self, hashId):
self.hashId = hashId
def isHash(self):
return True
def __hash__(self):
return hash((self.hashId,))
def asString(self):
return '#' + self.hashId
def matches(self, element):
return element.getIdAttr() == self.hashId
class CSSSelectorClassQualifier(CSSSelectorQualifierBase):
def __init__(self, classId):
self.classId = classId
def isClass(self):
return True
def __hash__(self):
return hash((self.classId,))
def asString(self):
return '.' + self.classId
def matches(self, element):
#return self.classId in element.getClassAttr().split()
attrValue = element.domElement.attributes.get('class')
if attrValue is not None:
return self.classId in attrValue.value.split()
return False
class CSSSelectorAttributeQualifier(CSSSelectorQualifierBase):
name, op, value = None, None, NotImplemented
def __init__(self, attrName, op=None, attrValue=NotImplemented):
self.name = attrName
if op is not self.op:
self.op = op
if attrValue is not self.value:
self.value = attrValue
def isAttr(self):
return True
def __hash__(self):
return hash((self.name, self.op, self.value))
def asString(self):
if self.value is NotImplemented:
return '[%s]' % (self.name,)
return '[%s%s%s]' % (self.name, self.op, self.value)
def matches(self, element):
if self.op is None:
return element.getAttr(self.name, NotImplemented) != NotImplemented
elif self.op == '=':
return self.value == element.getAttr(self.name, NotImplemented)
elif self.op == '~=':
#return self.value in element.getAttr(self.name, '').split()
attrValue = element.domElement.attributes.get(self.name)
if attrValue is not None:
return self.value in attrValue.value.split()
return False
elif self.op == '|=':
#return self.value in element.getAttr(self.name, '').split('-')
attrValue = element.domElement.attributes.get(self.name)
if attrValue is not None:
return self.value in attrValue.value.split('-')
return False
raise RuntimeError("Unknown operator %r for %r" % (self.op, self))
class CSSSelectorPseudoQualifier(CSSSelectorQualifierBase):
def __init__(self, attrName, params=()):
self.name = attrName
self.params = tuple(params)
def isPseudo(self):
return True
def __hash__(self):
return hash((self.name, self.params))
def asString(self):
if self.params:
return ':' + self.name
else:
return ':%s(%s)' % (self.name, self.params)
def matches(self, element):
return element.inPseudoState(self.name, self.params)
class CSSSelectorCombinationQualifier(CSSSelectorQualifierBase):
def __init__(self, op, selector):
self.op = op
self.selector = selector
def isCombiner(self):
return True
def __hash__(self):
return hash((self.op, self.selector))
def asImmutable(self):
return self.__class__(self.op, self.selector.asImmutable())
def asString(self):
return '%s%s' % (self.selector.asString(), self.op)
def matches(self, element):
if self.op == ' ':
if element is not None:
if element.matchesNode(self.selector.fullName):
try:
[None for qualifier in self.selector.qualifiers if
qualifier.matches(element) and stopIter(None)]
except StopIteration:
return True
return False
elif self.op == '>':
if element is not None:
if element.matchesNode(self.selector.fullName):
if self.selector.qualifiers[0].matches(element):
return True
return False
elif self.op == '+':
return self.selector.matches(element.getPreviousSibling())
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#~ CSS Misc
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
class CSSTerminalFunction(object):
def __init__(self, name, params):
self.name = name
self.params = params
def __repr__(self):
return '<CSS function: %s(%s)>' % (self.name, ', '.join(self.params))
class CSSTerminalOperator(tuple):
def __new__(klass, *args):
return tuple.__new__(klass, args)
def __repr__(self):
return 'op' + tuple.__repr__(self)
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#~ CSS Objects
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
class CSSDeclarations(dict):
pass
class CSSRuleset(dict):
def findCSSRulesFor(self, element, attrName):
ruleResults = [(nodeFilter, declarations) for nodeFilter, declarations in self.iteritems() if
(attrName in declarations) and (nodeFilter.matches(element))]
ruleResults.sort()
return ruleResults
def findCSSRuleFor(self, element, attrName):
try:
return [None for nodeFilter, declarations in self.iteritems() if
(attrName in declarations) and (nodeFilter.matches(element)) and stopIter(
(nodeFilter, declarations))]
except StopIteration, value:
return [value]
def mergeStyles(self, styles):
" XXX Bugfix for use in PISA "
for k, v in styles.iteritems():
if k in self and self[k]:
self[k] = copy.copy(self[k])
self[k].update(v)
else:
self[k] = v
class CSSInlineRuleset(CSSRuleset, CSSDeclarations):
def findCSSRulesFor(self, element, attrName):
if attrName in self:
return [(CSSInlineSelector(), self)]
return []
def findCSSRuleFor(self, *args, **kw):
# rule is packed in a list to differentiate from "no rule" vs "rule
# whose value evalutates as False"
return self.findCSSRulesFor(*args, **kw)[-1:]
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#~ CSS Builder
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
class CSSBuilder(cssParser.CSSBuilderAbstract):
RulesetFactory = CSSRuleset
SelectorFactory = CSSMutableSelector
MediumSetFactory = set
DeclarationsFactory = CSSDeclarations
TermFunctionFactory = CSSTerminalFunction
TermOperatorFactory = CSSTerminalOperator
xmlnsSynonyms = {}
mediumSet = None
trackImportance = True
charset = None
def __init__(self, mediumSet=mediumSet, trackImportance=trackImportance):
self.setMediumSet(mediumSet)
self.setTrackImportance(trackImportance)
def isValidMedium(self, mediums):
if not mediums:
return False
if 'all' in mediums:
return True
mediums = self.MediumSetFactory(mediums)
return bool(self.getMediumSet().intersection(mediums))
def getMediumSet(self):
return self.mediumSet
def setMediumSet(self, mediumSet):
self.mediumSet = self.MediumSetFactory(mediumSet)
def updateMediumSet(self, mediumSet):
self.getMediumSet().update(mediumSet)
def getTrackImportance(self):
return self.trackImportance
def setTrackImportance(self, trackImportance=True):
self.trackImportance = trackImportance
#~ helpers ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def _pushState(self):
_restoreState = self.__dict__
self.__dict__ = self.__dict__.copy()
self._restoreState = _restoreState
self.namespaces = {}
def _popState(self):
self.__dict__ = self._restoreState
def _declarations(self, declarations, DeclarationsFactory=None):
DeclarationsFactory = DeclarationsFactory or self.DeclarationsFactory
if self.trackImportance:
normal, important = [], []
for d in declarations:
if d[-1]:
important.append(d[:-1])
else:
normal.append(d[:-1])
return DeclarationsFactory(normal), DeclarationsFactory(important)
else:
return DeclarationsFactory(declarations)
def _xmlnsGetSynonym(self, uri):
# Don't forget to substitute our namespace synonyms!
return self.xmlnsSynonyms.get(uri or None, uri) or None
#~ css results ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def beginStylesheet(self):
self._pushState()
def endStylesheet(self):
self._popState()
def stylesheet(self, stylesheetElements, stylesheetImports):
# XXX Updated for PISA
if self.trackImportance:
normal, important = self.RulesetFactory(), self.RulesetFactory()
for normalStylesheet, importantStylesheet in stylesheetImports:
normal.mergeStyles(normalStylesheet)
important.mergeStyles(importantStylesheet)
for normalStyleElement, importantStyleElement in stylesheetElements:
normal.mergeStyles(normalStyleElement)
important.mergeStyles(importantStyleElement)
return normal, important
else:
result = self.RulesetFactory()
for stylesheet in stylesheetImports:
result.mergeStyles(stylesheet)
for styleElement in stylesheetElements:
result.mergeStyles(styleElement)
return result
def beginInline(self):
self._pushState()
def endInline(self):
self._popState()
def specialRules(self, declarations):
return cssSpecial.parseSpecialRules(declarations)
def inline(self, declarations):
declarations = self.specialRules(declarations)
return self._declarations(declarations, CSSInlineRuleset)
def ruleset(self, selectors, declarations):
# XXX Modified for pisa!
declarations = self.specialRules(declarations)
# XXX Modified for pisa!
if self.trackImportance:
normalDecl, importantDecl = self._declarations(declarations)
normal, important = self.RulesetFactory(), self.RulesetFactory()
for s in selectors:
s = s.asImmutable()
if normalDecl:
normal[s] = normalDecl
if importantDecl:
important[s] = importantDecl
return normal, important
else:
declarations = self._declarations(declarations)
result = [(s.asImmutable(), declarations) for s in selectors]
return self.RulesetFactory(result)
#~ css namespaces ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def resolveNamespacePrefix(self, nsPrefix, name):
if nsPrefix == '*':
return (nsPrefix, '*', name)
xmlns = self.namespaces.get(nsPrefix, None)
xmlns = self._xmlnsGetSynonym(xmlns)
return (nsPrefix, xmlns, name)
#~ css @ directives ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def atCharset(self, charset):
self.charset = charset
def atImport(self, import_, mediums, cssParser):
if self.isValidMedium(mediums):
return cssParser.parseExternal(import_)
return None
def atNamespace(self, nsprefix, uri):
self.namespaces[nsprefix] = uri
def atMedia(self, mediums, ruleset):
if self.isValidMedium(mediums):
return ruleset
return None
def atPage(self, page, pseudopage, declarations):
"""
This is overriden by xhtml2pdf.context.pisaCSSBuilder
"""
return self.ruleset([self.selector('*')], declarations)
def atFontFace(self, declarations):
"""
This is overriden by xhtml2pdf.context.pisaCSSBuilder
"""
return self.ruleset([self.selector('*')], declarations)
def atIdent(self, atIdent, cssParser, src):
return src, NotImplemented
#~ css selectors ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def selector(self, name):
return self.SelectorFactory(name)
def combineSelectors(self, selectorA, op, selectorB):
return self.SelectorFactory.combineSelectors(selectorA, op, selectorB)
#~ css declarations ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def property(self, name, value, important=False):
if self.trackImportance:
return (name, value, important)
return (name, value)
def combineTerms(self, termA, op, termB):
if op in (',', ' '):
if isinstance(termA, list):
termA.append(termB)
return termA
return [termA, termB]
elif op is None and termB is None:
return [termA]
else:
if isinstance(termA, list):
# Bind these "closer" than the list operators -- i.e. work on
# the (recursively) last element of the list
termA[-1] = self.combineTerms(termA[-1], op, termB)
return termA
return self.TermOperatorFactory(termA, op, termB)
def termIdent(self, value):
return value
def termNumber(self, value, units=None):
if units:
return value, units
return value
def termRGB(self, value):
return value
def termURI(self, value):
return value
def termString(self, value):
return value
def termUnicodeRange(self, value):
return value
def termFunction(self, name, value):
return self.TermFunctionFactory(name, value)
def termUnknown(self, src):
return src, NotImplemented
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#~ CSS Parser -- finally!
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
class CSSParser(cssParser.CSSParser):
CSSBuilderFactory = CSSBuilder
def __init__(self, cssBuilder=None, create=True, **kw):
if not cssBuilder and create:
assert cssBuilder is None
cssBuilder = self.createCSSBuilder(**kw)
cssParser.CSSParser.__init__(self, cssBuilder)
def createCSSBuilder(self, **kw):
return self.CSSBuilderFactory(**kw)
def parseExternal(self, cssResourceName):
if os.path.isfile(cssResourceName):
cssFile = file(cssResourceName, 'r')
return self.parseFile(cssFile, True)
raise RuntimeError("Cannot resolve external CSS file: \"%s\"" % cssResourceName)
| mit |
zachriggle/idapython | pywraps/sidc.py | 15 | 15035 | # ----------------------------------------------------------------------
#
# Misc constants
#
UA_MAXOP = 6
# ----------------------------------------------------------------------
# instruc_t related constants
#
# instruc_t.feature
#
CF_STOP = 0x00001 # Instruction doesn't pass execution to the next instruction
CF_CALL = 0x00002 # CALL instruction (should make a procedure here)
CF_CHG1 = 0x00004 # The instruction modifies the first operand
CF_CHG2 = 0x00008 # The instruction modifies the second operand
CF_CHG3 = 0x00010 # The instruction modifies the third operand
CF_CHG4 = 0x00020 # The instruction modifies 4 operand
CF_CHG5 = 0x00040 # The instruction modifies 5 operand
CF_CHG6 = 0x00080 # The instruction modifies 6 operand
CF_USE1 = 0x00100 # The instruction uses value of the first operand
CF_USE2 = 0x00200 # The instruction uses value of the second operand
CF_USE3 = 0x00400 # The instruction uses value of the third operand
CF_USE4 = 0x00800 # The instruction uses value of the 4 operand
CF_USE5 = 0x01000 # The instruction uses value of the 5 operand
CF_USE6 = 0x02000 # The instruction uses value of the 6 operand
CF_JUMP = 0x04000 # The instruction passes execution using indirect jump or call (thus needs additional analysis)
CF_SHFT = 0x08000 # Bit-shift instruction (shl,shr...)
CF_HLL = 0x10000 # Instruction may be present in a high level language function.
# ----------------------------------------------------------------------
# op_t related constants
#
# op_t.type
# Description Data field
o_void = 0 # No Operand ----------
o_reg = 1 # General Register (al,ax,es,ds...) reg
o_mem = 2 # Direct Memory Reference (DATA) addr
o_phrase = 3 # Memory Ref [Base Reg + Index Reg] phrase
o_displ = 4 # Memory Reg [Base Reg + Index Reg + Displacement] phrase+addr
o_imm = 5 # Immediate Value value
o_far = 6 # Immediate Far Address (CODE) addr
o_near = 7 # Immediate Near Address (CODE) addr
o_idpspec0 = 8 # Processor specific type
o_idpspec1 = 9 # Processor specific type
o_idpspec2 = 10 # Processor specific type
o_idpspec3 = 11 # Processor specific type
o_idpspec4 = 12 # Processor specific type
o_idpspec5 = 13 # Processor specific type
# There can be more processor specific types
#
# op_t.dtyp
#
dt_byte = 0 # 8 bit
dt_word = 1 # 16 bit
dt_dword = 2 # 32 bit
dt_float = 3 # 4 byte
dt_double = 4 # 8 byte
dt_tbyte = 5 # variable size (ph.tbyte_size)
dt_packreal = 6 # packed real format for mc68040
dt_qword = 7 # 64 bit
dt_byte16 = 8 # 128 bit
dt_code = 9 # ptr to code (not used?)
dt_void = 10 # none
dt_fword = 11 # 48 bit
dt_bitfild = 12 # bit field (mc680x0)
dt_string = 13 # pointer to asciiz string
dt_unicode = 14 # pointer to unicode string
dt_3byte = 15 # 3-byte data
dt_ldbl = 16 # long double (which may be different from tbyte)
dt_byte32 = 17 # 256 bit
dt_byte64 = 18 # 512 bit
#
# op_t.flags
#
OF_NO_BASE_DISP = 0x80 # o_displ: base displacement doesn't exist meaningful only for o_displ type if set, base displacement (x.addr) doesn't exist.
OF_OUTER_DISP = 0x40 # o_displ: outer displacement exists meaningful only for o_displ type if set, outer displacement (x.value) exists.
PACK_FORM_DEF = 0x20 # !o_reg + dt_packreal: packed factor defined
OF_NUMBER = 0x10 # can be output as number only if set, the operand can be converted to a number only
OF_SHOW = 0x08 # should the operand be displayed? if clear, the operand is hidden and should not be displayed
#
# insn_t.flags
#
INSN_MACRO = 0x01 # macro instruction
INSN_MODMAC = 0x02 # macros: may modify the database to make room for the macro insn
# ----------------------------------------------------------------------
# asm_t related constants
#
# asm_t.flag
#
AS_OFFST = 0x00000001 # offsets are 'offset xxx' ?
AS_COLON = 0x00000002 # create colons after data names ?
AS_UDATA = 0x00000004 # can use '?' in data directives
AS_2CHRE = 0x00000008 # double char constants are: "xy
AS_NCHRE = 0x00000010 # char constants are: 'x
AS_N2CHR = 0x00000020 # can't have 2 byte char consts
# ASCII directives:
AS_1TEXT = 0x00000040 # 1 text per line, no bytes
AS_NHIAS = 0x00000080 # no characters with high bit
AS_NCMAS = 0x00000100 # no commas in ascii directives
AS_HEXFM = 0x00000E00 # format of hex numbers:
ASH_HEXF0 = 0x00000000 # 34h
ASH_HEXF1 = 0x00000200 # h'34
ASH_HEXF2 = 0x00000400 # 34
ASH_HEXF3 = 0x00000600 # 0x34
ASH_HEXF4 = 0x00000800 # $34
ASH_HEXF5 = 0x00000A00 # <^R > (radix)
AS_DECFM = 0x00003000 # format of dec numbers:
ASD_DECF0 = 0x00000000 # 34
ASD_DECF1 = 0x00001000 # #34
ASD_DECF2 = 0x00002000 # 34.
ASD_DECF3 = 0x00003000 # .34
AS_OCTFM = 0x0001C000 # format of octal numbers:
ASO_OCTF0 = 0x00000000 # 123o
ASO_OCTF1 = 0x00004000 # 0123
ASO_OCTF2 = 0x00008000 # 123
ASO_OCTF3 = 0x0000C000 # @123
ASO_OCTF4 = 0x00010000 # o'123
ASO_OCTF5 = 0x00014000 # 123q
ASO_OCTF6 = 0x00018000 # ~123
AS_BINFM = 0x000E0000 # format of binary numbers:
ASB_BINF0 = 0x00000000 # 010101b
ASB_BINF1 = 0x00020000 # ^B010101
ASB_BINF2 = 0x00040000 # %010101
ASB_BINF3 = 0x00060000 # 0b1010101
ASB_BINF4 = 0x00080000 # b'1010101
ASB_BINF5 = 0x000A0000 # b'1010101'
AS_UNEQU = 0x00100000 # replace undefined data items
# with EQU (for ANTA's A80)
AS_ONEDUP = 0x00200000 # One array definition per line
AS_NOXRF = 0x00400000 # Disable xrefs during the output file generation
AS_XTRNTYPE = 0x00800000 # Assembler understands type of extrn
# symbols as ":type" suffix
AS_RELSUP = 0x01000000 # Checkarg: 'and','or','xor' operations
# with addresses are possible
AS_LALIGN = 0x02000000 # Labels at "align" keyword
# are supported.
AS_NOCODECLN = 0x04000000 # don't create colons after code names
AS_NOTAB = 0x08000000 # Disable tabulation symbols during the output file generation
AS_NOSPACE = 0x10000000 # No spaces in expressions
AS_ALIGN2 = 0x20000000 # .align directive expects an exponent rather than a power of 2
# (.align 5 means to align at 32byte boundary)
AS_ASCIIC = 0x40000000 # ascii directive accepts C-like
# escape sequences (\n,\x01 and similar)
AS_ASCIIZ = 0x80000000 # ascii directive inserts implicit
# zero byte at the end
# ----------------------------------------------------------------------
# processor_t related constants
IDP_INTERFACE_VERSION = 76
CUSTOM_CMD_ITYPE = 0x8000
REG_SPOIL = 0x80000000
REAL_ERROR_FORMAT = -1 # not supported format for current .idp
REAL_ERROR_RANGE = -2 # number too big (small) for store (mem NOT modifyed)
REAL_ERROR_BADDATA = -3 # illegal real data for load (IEEE data not filled)
#
# Check whether the operand is relative to stack pointer or frame pointer.
# This function is used to determine how to output a stack variable
# This function may be absent. If it is absent, then all operands
# are sp based by default.
# Define this function only if some stack references use frame pointer
# instead of stack pointer.
# returns flags:
OP_FP_BASED = 0x00000000 # operand is FP based
OP_SP_BASED = 0x00000001 # operand is SP based
OP_SP_ADD = 0x00000000 # operand value is added to the pointer
OP_SP_SUB = 0x00000002 # operand value is substracted from the pointer
#
# processor_t.flag
#
PR_SEGS = 0x000001 # has segment registers?
PR_USE32 = 0x000002 # supports 32-bit addressing?
PR_DEFSEG32 = 0x000004 # segments are 32-bit by default
PR_RNAMESOK = 0x000008 # allow to user register names for location names
PR_ADJSEGS = 0x000020 # IDA may adjust segments moving their starting/ending addresses.
PR_DEFNUM = 0x0000C0 # default number representation:
PRN_HEX = 0x000000 # hex
PRN_OCT = 0x000040 # octal
PRN_DEC = 0x000080 # decimal
PRN_BIN = 0x0000C0 # binary
PR_WORD_INS = 0x000100 # instruction codes are grouped 2bytes in binrary line prefix
PR_NOCHANGE = 0x000200 # The user can't change segments and code/data attributes (display only)
PR_ASSEMBLE = 0x000400 # Module has a built-in assembler and understands IDP_ASSEMBLE
PR_ALIGN = 0x000800 # All data items should be aligned properly
PR_TYPEINFO = 0x001000 # the processor module supports
# type information callbacks
# ALL OF THEM SHOULD BE IMPLEMENTED!
# (the ones >= decorate_name)
PR_USE64 = 0x002000 # supports 64-bit addressing?
PR_SGROTHER = 0x004000 # the segment registers don't contain
# the segment selectors, something else
PR_STACK_UP = 0x008000 # the stack grows up
PR_BINMEM = 0x010000 # the processor module provides correct
# segmentation for binary files
# (i.e. it creates additional segments)
# The kernel will not ask the user
# to specify the RAM/ROM sizes
PR_SEGTRANS = 0x020000 # the processor module supports
# the segment translation feature
# (it means it calculates the code
# addresses using the codeSeg() function)
PR_CHK_XREF = 0x040000 # don't allow near xrefs between segments
# with different bases
PR_NO_SEGMOVE = 0x080000 # the processor module doesn't support move_segm()
# (i.e. the user can't move segments)
PR_FULL_HIFXP = 0x100000 # REF_VHIGH operand value contains full operand
# not only the high bits. Meaningful if ph.high_fixup_bits
PR_USE_ARG_TYPES = 0x200000 # use ph.use_arg_types callback
PR_SCALE_STKVARS = 0x400000 # use ph.get_stkvar_scale callback
PR_DELAYED = 0x800000 # has delayed jumps and calls
PR_ALIGN_INSN = 0x1000000 # allow ida to create alignment instructions
# arbirtrarily. Since these instructions
# might lead to other wrong instructions
# and spoil the listing, IDA does not create
# them by default anymore
PR_PURGING = 0x2000000 # there are calling conventions which may
# purge bytes from the stack
PR_CNDINSNS = 0x4000000 # has conditional instructions
PR_USE_TBYTE = 0x8000000 # BTMT_SPECFLT means _TBYTE type
PR_DEFSEG64 = 0x10000000 # segments are 64-bit by default
# ----------------------------------------------------------------------
OOF_SIGNMASK = 0x0003 # sign symbol (+/-) output:
OOFS_IFSIGN = 0x0000 # output sign if needed
OOFS_NOSIGN = 0x0001 # don't output sign, forbid the user to change the sign
OOFS_NEEDSIGN = 0x0002 # always out sign (+-)
OOF_SIGNED = 0x0004 # output as signed if < 0
OOF_NUMBER = 0x0008 # always as a number
OOF_WIDTHMASK = 0x0070 # width of value in bits:
OOFW_IMM = 0x0000 # take from x.dtyp
OOFW_8 = 0x0010 # 8 bit width
OOFW_16 = 0x0020 # 16 bit width
OOFW_24 = 0x0030 # 24 bit width
OOFW_32 = 0x0040 # 32 bit width
OOFW_64 = 0x0050 # 32 bit width
OOF_ADDR = 0x0080 # output x.addr, otherwise x.value
OOF_OUTER = 0x0100 # output outer operand
OOF_ZSTROFF = 0x0200 # meaningful only if isStroff(uFlag)
# append a struct field name if
# the field offset is zero?
# if AFL_ZSTROFF is set, then this flag
# is ignored.
OOF_NOBNOT = 0x0400 # prohibit use of binary not
OOF_SPACES = 0x0800 # do not suppress leading spaces
# currently works only for floating point numbers
# ----------------------------------------------------------------------
class insn_t(object):
def __init__(self, noperands = UA_MAXOP):
self.auxpref = 0
self.cs = 0
self.ea = 0
self.flags = 0
self.insnpref = 0
self.ip = 0
self.itype = 0
self.n = 0
self.segpref = 0
self.size = 0
self.Operands = []
# store the number of operands
self.n = noperands
# create operands
for i in xrange(0, noperands):
op = op_t()
op.n = i
self.Operands.append(op)
setattr(self, 'Op%d' % (i+1), op)
def __getitem__(self, i):
return self.Operands[i]
# ----------------------------------------------------------------------
class op_t(object):
def __init__(self):
self.addr = 0
self.dtyp = 0
self.flags = 0
self.n = 0
self.offb = 0
self.offo = 0
self.reg = 0
self.specval = 0
self.specflag1 = 0
self.specflag2 = 0
self.specflag3 = 0
self.specflag4 = 0
self.type = 0
self.value = 0
# make sure reg and phrase have the same value
def __setattr__(self, name, value):
if name == 'reg' or name == 'phrase':
object.__setattr__(self, 'reg', value)
object.__setattr__(self, 'phrase', value)
else:
object.__setattr__(self, name, value)
| bsd-3-clause |
tryton/webdav | protocol.py | 1 | 21510 | # This file is part of Tryton. The COPYRIGHT file at the top level of
# this repository contains the full copyright notices and license terms.
import SocketServer
import socket
import BaseHTTPServer
import urlparse
import time
import urllib
import logging
from threading import local, Thread
import xml.dom.minidom
import base64
from pywebdav.lib import WebDAVServer, iface
from pywebdav.lib.errors import DAV_Error, DAV_NotFound, DAV_Secret, \
DAV_Forbidden, DAV_Requested_Range_Not_Satisfiable
from pywebdav.lib.constants import COLLECTION, DAV_VERSION_1, DAV_VERSION_2
from pywebdav.lib.utils import get_urifilename, quote_uri
from pywebdav.lib.davcmd import copyone, copytree, moveone, movetree, \
delone, deltree
from trytond.security import login
from trytond import __version__
from trytond.pool import Pool
from trytond.transaction import Transaction
from trytond.cache import Cache
from trytond.config import config
from trytond.exceptions import UserError, UserWarning, ConcurrencyException
domimpl = xml.dom.minidom.getDOMImplementation()
DAV_VERSION_1['version'] += ',access-control'
DAV_VERSION_2['version'] += ',access-control'
logger = logging.getLogger(__name__)
def SSLSocket(socket):
# Let the import error raise only when used
import ssl
return ssl.wrap_socket(socket,
server_side=True,
certfile=config.get('ssl', 'certificate'),
keyfile=config.get('ssl', 'privatekey'),
ssl_version=ssl.PROTOCOL_SSLv23)
class Local(local):
def __init__(self):
super(Local, self).__init__()
self.cache = {}
LOCAL = Local()
def setupConfig():
class ConfigDAV:
lockemulation = False
verbose = False
baseurl = ''
def getboolean(self, name):
return bool(self.get(name))
def get(self, name, default=None):
return getattr(self, name, default)
class Config:
DAV = ConfigDAV()
return Config()
class BaseThreadedHTTPServer(SocketServer.ThreadingMixIn,
BaseHTTPServer.HTTPServer):
timeout = 1
def server_bind(self):
self.socket.setsockopt(socket.SOL_SOCKET,
socket.SO_REUSEADDR, 1)
self.socket.setsockopt(socket.SOL_SOCKET,
socket.SO_KEEPALIVE, 1)
BaseHTTPServer.HTTPServer.server_bind(self)
class SecureThreadedHTTPServer(BaseThreadedHTTPServer):
def __init__(self, server_address, HandlerClass):
BaseThreadedHTTPServer.__init__(self, server_address, HandlerClass)
self.socket = socket.socket(self.address_family, self.socket_type)
self.server_bind()
self.server_activate()
class WebDAVServerThread(Thread):
def __init__(self, interface, port, secure=False):
Thread.__init__(self, name='WebDAVServerThread')
self.secure = secure
self.ipv6 = False
for family, _, _, _, _ in socket.getaddrinfo(interface or None, port,
socket.AF_UNSPEC, socket.SOCK_STREAM):
if family == socket.AF_INET6:
self.ipv6 = True
break
if self.secure:
handler_class = SecureWebDAVAuthRequestHandler
server_class = SecureThreadedHTTPServer
if self.ipv6:
server_class = SecureThreadedHTTPServer6
else:
handler_class = WebDAVAuthRequestHandler
server_class = BaseThreadedHTTPServer
if self.ipv6:
server_class = BaseThreadedHTTPServer6
handler_class._config = setupConfig()
handler_class.IFACE_CLASS = TrytonDAVInterface(interface, port, secure)
handler_class.IFACE_CLASS.baseurl = handler_class._config.DAV.baseurl
self.server = server_class((interface, port), handler_class)
def stop(self):
self.server.shutdown()
self.server.socket.shutdown(socket.SHUT_RDWR)
self.server.server_close()
return
def run(self):
self.server.serve_forever()
return True
class BaseThreadedHTTPServer6(BaseThreadedHTTPServer):
address_family = socket.AF_INET6
class SecureThreadedHTTPServer6(SecureThreadedHTTPServer):
address_family = socket.AF_INET6
class TrytonDAVInterface(iface.dav_interface):
def __init__(self, interface, port, secure=False):
if secure:
protocol = 'https'
else:
protocol = 'http'
self.baseuri = '%s://%s:%s/' % (protocol, interface or
socket.gethostname(), port)
self.verbose = False
def _log_exception(self, exception):
if isinstance(exception, (ConcurrencyException, UserError,
UserWarning, DAV_Error, DAV_NotFound, DAV_Secret,
DAV_Forbidden)):
logger.debug('Exception %s', exception, exc_info=True)
else:
logger.error('Exception %s', exception, exc_info=True)
@staticmethod
def get_dburi(uri):
uri = urlparse.urlsplit(uri)[2]
if uri and uri[0] == '/':
uri = uri[1:]
dbname, uri = (uri.split('/', 1) + [None])[0:2]
if dbname:
dbname = urllib.unquote_plus(dbname)
if uri:
uri = urllib.unquote_plus(uri)
return dbname, uri
def _get_dburi(self, uri):
return TrytonDAVInterface.get_dburi(uri)
def get_childs(self, uri, filter=None):
res = []
dbname, dburi = self._get_dburi(uri)
if not dbname:
with Transaction().start(None, 0, close=True) as transaction:
list_ = transaction.database.list()
for dbname in list_:
res.append(urlparse.urljoin(uri, dbname))
return res
pool = Pool(Transaction().database.name)
try:
Collection = pool.get('webdav.collection')
scheme, netloc, path, params, query, fragment = \
urlparse.urlparse(uri)
if path[-1:] != '/':
path += '/'
for child in Collection.get_childs(dburi, filter=filter,
cache=LOCAL.cache):
res.append(urlparse.urlunparse((scheme, netloc,
path + child.encode('utf-8'), params, query,
fragment)))
except KeyError:
return res
except (DAV_Error, DAV_NotFound, DAV_Secret, DAV_Forbidden), exception:
self._log_exception(exception)
raise
except Exception, exception:
self._log_exception(exception)
raise DAV_Error(500)
return res
def get_data(self, uri, range=None):
dbname, dburi = self._get_dburi(uri)
if not dbname or (self.exists(uri) and self.is_collection(uri)):
res = ('<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.0 '
'Transitional//EN">')
res += '<html>'
res += '<head>'
res += ('<meta http-equiv="Content-Type" content="text/html; '
'charset=utf-8">')
res += '<title>Tryton - WebDAV - %s</title>' % dbname or 'root'
res += '</head>'
res += '<body>'
res += '<h2>Collection: %s</h2>' % (get_urifilename(uri) or '/')
res += '<ul>'
if dbname:
scheme, netloc, path, params, query, fragment = \
urlparse.urlparse(uri)
if path[-1:] != '/':
path += '/'
res += ('<li><a href="%s">..</a></li>'
% urlparse.urlunparse((scheme, netloc, path + '..',
params, query, fragment)))
childs = self.get_childs(uri)
childs.sort()
for child in childs:
res += ('<li><a href="%s">%s</a></li>'
% (quote_uri(child), get_urifilename(child)))
res += '</ul>'
res += '<hr noshade>'
res += ('<em>Powered by <a href="http://www.tryton.org/">'
'Tryton</a> version %s</em>' % __version__)
res += '</body>'
res += '</html>'
return res
pool = Pool(Transaction().database.name)
Collection = pool.get('webdav.collection')
try:
res = Collection.get_data(dburi, cache=LOCAL.cache)
except (DAV_Error, DAV_NotFound, DAV_Secret, DAV_Forbidden), exception:
self._log_exception(exception)
raise
except Exception, exception:
self._log_exception(exception)
raise DAV_Error(500)
if range is None:
return res
size = len(res)
if range[1] == '':
range[1] = size
else:
range[1] = int(range[1])
if range[1] > size:
range[1] = size
if range[0] == '':
range[0] = size - range[1]
else:
range[0] = int(range[0])
if range[0] > size:
raise DAV_Requested_Range_Not_Satisfiable
return res[range[0]:range[1]]
def put(self, uri, data, content_type=''):
dbname, dburi = self._get_dburi(uri)
if not dbname or not dburi:
raise DAV_Forbidden
pool = Pool(Transaction().database.name)
Collection = pool.get('webdav.collection')
try:
res = Collection.put(dburi, data, content_type, cache=LOCAL.cache)
Transaction().commit()
except (DAV_Error, DAV_NotFound, DAV_Secret, DAV_Forbidden), exception:
self._log_exception(exception)
Transaction().rollback()
raise
except Exception, exception:
self._log_exception(exception)
Transaction().rollback()
raise DAV_Error(500)
if res:
uparts = list(urlparse.urlsplit(uri))
uparts[2] = res
res = urlparse.urlunsplit(uparts)
return res
def mkcol(self, uri):
dbname, dburi = self._get_dburi(uri)
if not dbname or not dburi:
raise DAV_Forbidden
pool = Pool(Transaction().database.name)
Collection = pool.get('webdav.collection')
try:
res = Collection.mkcol(dburi, cache=LOCAL.cache)
Transaction().commit()
except (DAV_Error, DAV_NotFound, DAV_Secret, DAV_Forbidden), exception:
self._log_exception(exception)
Transaction().rollback()
raise
except Exception, exception:
self._log_exception(exception)
Transaction().rollback()
raise DAV_Error(500)
return res
def _get_dav_resourcetype(self, uri):
dbname, dburi = self._get_dburi(uri)
if not dbname or not dburi:
return COLLECTION
pool = Pool(Transaction().database.name)
Collection = pool.get('webdav.collection')
try:
res = Collection.get_resourcetype(dburi, cache=LOCAL.cache)
except (DAV_Error, DAV_NotFound, DAV_Secret, DAV_Forbidden), exception:
self._log_exception(exception)
raise
except Exception, exception:
self._log_exception(exception)
raise DAV_Error(500)
return res
def _get_dav_displayname(self, uri):
dbname, dburi = self._get_dburi(uri)
if not dbname or not dburi:
return uri.split('/')[-1]
pool = Pool(Transaction().database.name)
try:
Collection = pool.get('webdav.collection')
res = Collection.get_displayname(dburi, cache=LOCAL.cache)
except KeyError:
raise DAV_NotFound
except (DAV_Error, DAV_NotFound, DAV_Secret, DAV_Forbidden), exception:
self._log_exception(exception)
raise
except Exception, exception:
self._log_exception(exception)
raise DAV_Error(500)
return res
def _get_dav_getcontentlength(self, uri):
dbname, dburi = self._get_dburi(uri)
if not dbname or not dburi:
return '0'
pool = Pool(Transaction().database.name)
Collection = pool.get('webdav.collection')
try:
res = Collection.get_contentlength(dburi, cache=LOCAL.cache)
except (DAV_Error, DAV_NotFound, DAV_Secret, DAV_Forbidden), exception:
self._log_exception(exception)
raise
except Exception, exception:
self._log_exception(exception)
raise DAV_Error(500)
return res
def _get_dav_getcontenttype(self, uri):
dbname, dburi = self._get_dburi(uri)
if not dbname or self.is_collection(uri):
return "text/html"
pool = Pool(Transaction().database.name)
Collection = pool.get('webdav.collection')
try:
res = Collection.get_contenttype(dburi, cache=LOCAL.cache)
except (DAV_Error, DAV_NotFound, DAV_Secret, DAV_Forbidden), exception:
self._log_exception(exception)
raise
except Exception, exception:
self._log_exception(exception)
raise DAV_Error(500)
return res
def _get_dav_getetag(self, uri):
return '"' + str(self.get_lastmodified(uri)) + '"'
def get_creationdate(self, uri):
dbname, dburi = self._get_dburi(uri)
if not dbname or not dburi:
return time.time()
pool = Pool(Transaction().database.name)
Collection = pool.get('webdav.collection')
try:
res = Collection.get_creationdate(dburi, cache=LOCAL.cache)
except (DAV_Error, DAV_NotFound, DAV_Secret, DAV_Forbidden), exception:
self._log_exception(exception)
raise
except Exception, exception:
self._log_exception(exception)
raise DAV_Error(500)
return res
def get_lastmodified(self, uri):
dbname, dburi = self._get_dburi(uri)
if not dbname or not dburi:
return time.time()
pool = Pool(Transaction().database.name)
Collection = pool.get('webdav.collection')
try:
res = Collection.get_lastmodified(dburi, cache=LOCAL.cache)
except (DAV_Error, DAV_NotFound, DAV_Secret, DAV_Forbidden), exception:
self._log_exception(exception)
raise
except Exception, exception:
self._log_exception(exception)
raise DAV_Error(500)
return res
def rmcol(self, uri):
dbname, dburi = self._get_dburi(uri)
if not dbname or not dburi:
return 403
pool = Pool(Transaction().database.name)
Collection = pool.get('webdav.collection')
try:
res = Collection.rmcol(dburi, cache=LOCAL.cache)
Transaction().commit()
except Exception, exception:
self._log_exception(exception)
Transaction().rollback()
return 500
return res
def rm(self, uri):
dbname, dburi = self._get_dburi(uri)
if not dbname or not dburi:
return 403
pool = Pool(Transaction().database.name)
Collection = pool.get('webdav.collection')
try:
res = Collection.rm(dburi, cache=LOCAL.cache)
Transaction().commit()
except Exception, exception:
self._log_exception(exception)
Transaction().rollback()
return 500
return res
def exists(self, uri):
dbname, dburi = self._get_dburi(uri)
if not dbname or not dburi:
return 1
pool = Pool(Transaction().database.name)
Collection = pool.get('webdav.collection')
try:
res = Collection.exists(dburi, cache=LOCAL.cache)
except (DAV_Error, DAV_NotFound, DAV_Secret, DAV_Forbidden), exception:
self._log_exception(exception)
raise
except Exception, exception:
self._log_exception(exception)
raise DAV_Error(500)
return res
def is_collection(self, uri):
if self._get_dav_resourcetype(uri) == COLLECTION:
return 1
return 0
def copyone(self, src, dst, overwrite):
return copyone(self, src, dst, overwrite)
def copytree(self, src, dst, overwrite):
return copytree(self, src, dst, overwrite)
def moveone(self, src, dst, overwrite):
return moveone(self, src, dst, overwrite)
def movetree(self, src, dst, overwrite):
return movetree(self, src, dst, overwrite)
def delone(self, uri):
return delone(self, uri)
def deltree(self, uri):
return deltree(self, uri)
def copy(self, src, dst):
content = self._get_dav_getcontenttype(src)
data = self.get_data(src)
self.put(dst, data, content)
return 201
def copycol(self, src, dst):
return self.mkcol(dst)
def _get_dav_current_user_privilege_set(self, uri):
dbname, dburi = self._get_dburi(uri)
privileges = []
if not dbname or not dburi:
privileges = ['create', 'read', 'write', 'delete']
else:
pool = Pool(Transaction().database.name)
try:
Collection = pool.get('webdav.collection')
privileges = Collection.current_user_privilege_set(dburi,
cache=LOCAL.cache)
except KeyError:
pass
except Exception, exception:
self._log_exception(exception)
pass
doc = domimpl.createDocument(None, 'privilege', None)
privilege = doc.documentElement
privilege.tagName = 'D:privilege'
if 'create' in privileges:
bind = doc.createElement('D:bind')
privilege.appendChild(bind)
if 'read' in privileges:
read = doc.createElement('D:read')
privilege.appendChild(read)
read_acl = doc.createElement('D:read-acl')
privilege.appendChild(read_acl)
if 'write' in privileges:
write = doc.createElement('D:write')
privilege.appendChild(write)
write_content = doc.createElement('D:write-content')
privilege.appendChild(write_content)
write_properties = doc.createElement('D:write-properties')
privilege.appendChild(write_properties)
if 'delete' in privileges:
unbind = doc.createElement('D:unbind')
privilege.appendChild(unbind)
return privilege
TrytonDAVInterface.PROPS['DAV:'] = tuple(list(TrytonDAVInterface.PROPS['DAV:']
) + ['current-user-privilege-set'])
class WebDAVAuthRequestHandler(WebDAVServer.DAVRequestHandler):
def finish(self):
WebDAVServer.DAVRequestHandler.finish(self)
if not Transaction().connection:
return
dbname = Transaction().database.name
Transaction().__exit__(None, None, None)
if dbname:
with Transaction().start(dbname, 0):
Cache.resets(dbname)
def parse_request(self):
if not BaseHTTPServer.BaseHTTPRequestHandler.parse_request(self):
return False
authorization = self.headers.get('Authorization', '')
if authorization:
scheme, credentials = authorization.split()
if scheme != 'Basic':
self.send_error(501)
return False
credentials = base64.decodestring(credentials)
user, password = credentials.split(':', 2)
if not self.get_userinfo(user, password, self.command):
self.send_autherror(401, "Authorization Required")
return False
else:
if not self.get_userinfo(None, None, self.command):
self.send_autherror(401, "Authorization Required")
return False
return True
def get_userinfo(self, user, password, command=''):
path = urlparse.urlparse(self.path).path
dbname = urllib.unquote_plus(path.split('/', 2)[1])
with Transaction().start(None, 0, close=True) as transaction:
databases = transaction.database.list()
if not dbname or dbname not in databases:
return True
if user:
parameters = {'password': password}
user = login(dbname, user, parameters, cache=False)
if not user:
return None
user = int(user)
else:
url = urlparse.urlparse(self.path)
query = urlparse.parse_qs(url.query)
path = url.path[len(dbname) + 2:]
if 'key' in query:
key, = query['key']
with Transaction().start(dbname, 0) as transaction:
database_list = Pool.database_list()
pool = Pool(dbname)
if dbname not in database_list:
pool.init()
Share = pool.get('webdav.share')
user = Share.get_login(key, command, path)
transaction.commit()
if not user:
return None
Transaction().start(dbname, user, context={
'_check_access': True,
}, autocommit=True)
Cache.clean(dbname)
return user
class SecureWebDAVAuthRequestHandler(WebDAVAuthRequestHandler):
def setup(self):
self.request = SSLSocket(self.request)
WebDAVAuthRequestHandler.setup(self)
| gpl-3.0 |
chrxr/wagtail | wagtail/contrib/wagtailapi/tests/test_pages.py | 7 | 28976 | from __future__ import absolute_import, unicode_literals
import collections
import json
import mock
from django.core.urlresolvers import reverse
from django.test import TestCase
from django.test.utils import override_settings
from wagtail.contrib.wagtailapi import signal_handlers
from wagtail.tests.demosite import models
from wagtail.tests.testapp.models import StreamPage
from wagtail.wagtailcore.models import Page
def get_total_page_count():
# Need to take away 1 as the root page is invisible over the API
return Page.objects.live().public().count() - 1
class TestPageListing(TestCase):
fixtures = ['demosite.json']
def get_response(self, **params):
return self.client.get(reverse('wagtailapi_v1:pages:listing'), params)
def get_page_id_list(self, content):
return [page['id'] for page in content['pages']]
# BASIC TESTS
def test_basic(self):
response = self.get_response()
self.assertEqual(response.status_code, 200)
self.assertEqual(response['Content-type'], 'application/json')
# Will crash if the JSON is invalid
content = json.loads(response.content.decode('UTF-8'))
# Check that the meta section is there
self.assertIn('meta', content)
self.assertIsInstance(content['meta'], dict)
# Check that the total count is there and correct
self.assertIn('total_count', content['meta'])
self.assertIsInstance(content['meta']['total_count'], int)
self.assertEqual(content['meta']['total_count'], get_total_page_count())
# Check that the pages section is there
self.assertIn('pages', content)
self.assertIsInstance(content['pages'], list)
# Check that each page has a meta section with type and detail_url attributes
for page in content['pages']:
self.assertIn('meta', page)
self.assertIsInstance(page['meta'], dict)
self.assertEqual(set(page['meta'].keys()), {'type', 'detail_url'})
def test_unpublished_pages_dont_appear_in_list(self):
total_count = get_total_page_count()
page = models.BlogEntryPage.objects.get(id=16)
page.unpublish()
response = self.get_response()
content = json.loads(response.content.decode('UTF-8'))
self.assertEqual(content['meta']['total_count'], total_count - 1)
def test_private_pages_dont_appear_in_list(self):
total_count = get_total_page_count()
page = models.BlogIndexPage.objects.get(id=5)
page.view_restrictions.create(password='test')
new_total_count = get_total_page_count()
self.assertNotEqual(total_count, new_total_count)
response = self.get_response()
content = json.loads(response.content.decode('UTF-8'))
self.assertEqual(content['meta']['total_count'], new_total_count)
# TYPE FILTER
def test_type_filter_results_are_all_blog_entries(self):
response = self.get_response(type='demosite.BlogEntryPage')
content = json.loads(response.content.decode('UTF-8'))
for page in content['pages']:
self.assertEqual(page['meta']['type'], 'demosite.BlogEntryPage')
def test_type_filter_total_count(self):
response = self.get_response(type='demosite.BlogEntryPage')
content = json.loads(response.content.decode('UTF-8'))
# Total count must be reduced as this filters the results
self.assertEqual(content['meta']['total_count'], 3)
def test_non_existant_type_gives_error(self):
response = self.get_response(type='demosite.IDontExist')
content = json.loads(response.content.decode('UTF-8'))
self.assertEqual(response.status_code, 400)
self.assertEqual(content, {'message': "type doesn't exist"})
def test_non_page_type_gives_error(self):
response = self.get_response(type='auth.User')
content = json.loads(response.content.decode('UTF-8'))
self.assertEqual(response.status_code, 400)
self.assertEqual(content, {'message': "type doesn't exist"})
# EXTRA FIELDS
def test_extra_fields_default(self):
response = self.get_response(type='demosite.BlogEntryPage')
content = json.loads(response.content.decode('UTF-8'))
for page in content['pages']:
self.assertEqual(set(page.keys()), {'id', 'meta', 'title'})
def test_extra_fields(self):
response = self.get_response(type='demosite.BlogEntryPage', fields='title,date,feed_image')
content = json.loads(response.content.decode('UTF-8'))
for page in content['pages']:
self.assertEqual(set(page.keys()), {'id', 'meta', 'title', 'date', 'feed_image'})
def test_extra_fields_child_relation(self):
response = self.get_response(type='demosite.BlogEntryPage', fields='title,related_links')
content = json.loads(response.content.decode('UTF-8'))
for page in content['pages']:
self.assertEqual(set(page.keys()), {'id', 'meta', 'title', 'related_links'})
self.assertIsInstance(page['related_links'], list)
def test_extra_fields_foreign_key(self):
response = self.get_response(type='demosite.BlogEntryPage', fields='title,date,feed_image')
content = json.loads(response.content.decode('UTF-8'))
for page in content['pages']:
feed_image = page['feed_image']
if feed_image is not None:
self.assertIsInstance(feed_image, dict)
self.assertEqual(set(feed_image.keys()), {'id', 'meta'})
self.assertIsInstance(feed_image['id'], int)
self.assertIsInstance(feed_image['meta'], dict)
self.assertEqual(set(feed_image['meta'].keys()), {'type', 'detail_url'})
self.assertEqual(feed_image['meta']['type'], 'wagtailimages.Image')
self.assertEqual(
feed_image['meta']['detail_url'], 'http://localhost/api/v1/images/%d/' % feed_image['id']
)
def test_extra_fields_tags(self):
response = self.get_response(type='demosite.BlogEntryPage', fields='tags')
content = json.loads(response.content.decode('UTF-8'))
for page in content['pages']:
self.assertEqual(set(page.keys()), {'id', 'meta', 'tags'})
self.assertIsInstance(page['tags'], list)
def test_extra_field_ordering(self):
response = self.get_response(type='demosite.BlogEntryPage', fields='date,title,feed_image,related_links')
# Will crash if the JSON is invalid
content = json.loads(response.content.decode('UTF-8'))
# Test field order
content = json.JSONDecoder(object_pairs_hook=collections.OrderedDict).decode(response.content.decode('UTF-8'))
field_order = [
'id',
'meta',
'title',
'date',
'feed_image',
'related_links',
]
self.assertEqual(list(content['pages'][0].keys()), field_order)
def test_extra_fields_without_type_gives_error(self):
response = self.get_response(fields='title,related_links')
content = json.loads(response.content.decode('UTF-8'))
self.assertEqual(response.status_code, 400)
self.assertEqual(content, {'message': "unknown fields: related_links"})
def test_extra_fields_which_are_not_in_api_fields_gives_error(self):
response = self.get_response(fields='path')
content = json.loads(response.content.decode('UTF-8'))
self.assertEqual(response.status_code, 400)
self.assertEqual(content, {'message': "unknown fields: path"})
def test_extra_fields_unknown_field_gives_error(self):
response = self.get_response(fields='123,title,abc')
content = json.loads(response.content.decode('UTF-8'))
self.assertEqual(response.status_code, 400)
self.assertEqual(content, {'message': "unknown fields: 123, abc"})
# FILTERING
def test_filtering_exact_filter(self):
response = self.get_response(title='Home page')
content = json.loads(response.content.decode('UTF-8'))
page_id_list = self.get_page_id_list(content)
self.assertEqual(page_id_list, [2])
def test_filtering_exact_filter_on_specific_field(self):
response = self.get_response(type='demosite.BlogEntryPage', date='2013-12-02')
content = json.loads(response.content.decode('UTF-8'))
page_id_list = self.get_page_id_list(content)
self.assertEqual(page_id_list, [16])
def test_filtering_on_id(self):
response = self.get_response(id=16)
content = json.loads(response.content.decode('UTF-8'))
page_id_list = self.get_page_id_list(content)
self.assertEqual(page_id_list, [16])
def test_filtering_doesnt_work_on_specific_fields_without_type(self):
response = self.get_response(date='2013-12-02')
content = json.loads(response.content.decode('UTF-8'))
self.assertEqual(response.status_code, 400)
self.assertEqual(content, {'message': "query parameter is not an operation or a recognised field: date"})
def test_filtering_tags(self):
response = self.get_response(type='demosite.BlogEntryPage', tags='wagtail')
content = json.loads(response.content.decode('UTF-8'))
page_id_list = self.get_page_id_list(content)
self.assertEqual(page_id_list, [16, 18])
def test_filtering_multiple_tags(self):
response = self.get_response(type='demosite.BlogEntryPage', tags='wagtail,bird')
content = json.loads(response.content.decode('UTF-8'))
page_id_list = self.get_page_id_list(content)
self.assertEqual(page_id_list, [16])
def test_filtering_unknown_field_gives_error(self):
response = self.get_response(not_a_field='abc')
content = json.loads(response.content.decode('UTF-8'))
self.assertEqual(response.status_code, 400)
self.assertEqual(content, {'message': "query parameter is not an operation or a recognised field: not_a_field"})
# CHILD OF FILTER
def test_child_of_filter(self):
response = self.get_response(child_of=5)
content = json.loads(response.content.decode('UTF-8'))
page_id_list = self.get_page_id_list(content)
self.assertEqual(page_id_list, [16, 18, 19])
def test_child_of_with_type(self):
response = self.get_response(type='demosite.EventPage', child_of=5)
content = json.loads(response.content.decode('UTF-8'))
page_id_list = self.get_page_id_list(content)
self.assertEqual(page_id_list, [])
def test_child_of_unknown_page_gives_error(self):
response = self.get_response(child_of=1000)
content = json.loads(response.content.decode('UTF-8'))
self.assertEqual(response.status_code, 400)
self.assertEqual(content, {'message': "parent page doesn't exist"})
def test_child_of_not_integer_gives_error(self):
response = self.get_response(child_of='abc')
content = json.loads(response.content.decode('UTF-8'))
self.assertEqual(response.status_code, 400)
self.assertEqual(content, {'message': "child_of must be a positive integer"})
def test_child_of_page_thats_not_in_same_site_gives_error(self):
# Root page is not in any site, so pretend it doesn't exist
response = self.get_response(child_of=1)
content = json.loads(response.content.decode('UTF-8'))
self.assertEqual(response.status_code, 400)
self.assertEqual(content, {'message': "parent page doesn't exist"})
# DESCENDANT OF FILTER
def test_descendant_of_filter(self):
response = self.get_response(descendant_of=6)
content = json.loads(response.content.decode('UTF-8'))
page_id_list = self.get_page_id_list(content)
self.assertEqual(page_id_list, [10, 15, 17, 21, 22, 23])
def test_descendant_of_with_type(self):
response = self.get_response(type='tests.EventPage', descendant_of=6)
content = json.loads(response.content.decode('UTF-8'))
page_id_list = self.get_page_id_list(content)
self.assertEqual(page_id_list, [])
def test_descendant_of_unknown_page_gives_error(self):
response = self.get_response(descendant_of=1000)
content = json.loads(response.content.decode('UTF-8'))
self.assertEqual(response.status_code, 400)
self.assertEqual(content, {'message': "ancestor page doesn't exist"})
def test_descendant_of_not_integer_gives_error(self):
response = self.get_response(descendant_of='abc')
content = json.loads(response.content.decode('UTF-8'))
self.assertEqual(response.status_code, 400)
self.assertEqual(content, {'message': "descendant_of must be a positive integer"})
def test_descendant_of_page_thats_not_in_same_site_gives_error(self):
# Root page is not in any site, so pretend it doesn't exist
response = self.get_response(descendant_of=1)
content = json.loads(response.content.decode('UTF-8'))
self.assertEqual(response.status_code, 400)
self.assertEqual(content, {'message': "ancestor page doesn't exist"})
def test_descendant_of_when_filtering_by_child_of_gives_error(self):
response = self.get_response(descendant_of=6, child_of=5)
content = json.loads(response.content.decode('UTF-8'))
self.assertEqual(response.status_code, 400)
self.assertEqual(content, {'message': "filtering by descendant_of with child_of is not supported"})
# ORDERING
def test_ordering_default(self):
response = self.get_response()
content = json.loads(response.content.decode('UTF-8'))
page_id_list = self.get_page_id_list(content)
self.assertEqual(page_id_list, [2, 4, 8, 9, 5, 16, 18, 19, 6, 10, 15, 17, 21, 22, 23, 20, 13, 14, 12])
def test_ordering_by_title(self):
response = self.get_response(order='title')
content = json.loads(response.content.decode('UTF-8'))
page_id_list = self.get_page_id_list(content)
self.assertEqual(page_id_list, [21, 22, 19, 23, 5, 16, 18, 12, 14, 8, 9, 4, 2, 13, 20, 17, 6, 10, 15])
def test_ordering_by_title_backwards(self):
response = self.get_response(order='-title')
content = json.loads(response.content.decode('UTF-8'))
page_id_list = self.get_page_id_list(content)
self.assertEqual(page_id_list, [15, 10, 6, 17, 20, 13, 2, 4, 9, 8, 14, 12, 18, 16, 5, 23, 19, 22, 21])
def test_ordering_by_random(self):
response_1 = self.get_response(order='random')
content_1 = json.loads(response_1.content.decode('UTF-8'))
page_id_list_1 = self.get_page_id_list(content_1)
response_2 = self.get_response(order='random')
content_2 = json.loads(response_2.content.decode('UTF-8'))
page_id_list_2 = self.get_page_id_list(content_2)
self.assertNotEqual(page_id_list_1, page_id_list_2)
def test_ordering_by_random_backwards_gives_error(self):
response = self.get_response(order='-random')
content = json.loads(response.content.decode('UTF-8'))
self.assertEqual(response.status_code, 400)
self.assertEqual(content, {'message': "cannot order by 'random' (unknown field)"})
def test_ordering_by_random_with_offset_gives_error(self):
response = self.get_response(order='random', offset=10)
content = json.loads(response.content.decode('UTF-8'))
self.assertEqual(response.status_code, 400)
self.assertEqual(content, {'message': "random ordering with offset is not supported"})
def test_ordering_default_with_type(self):
response = self.get_response(type='demosite.BlogEntryPage')
content = json.loads(response.content.decode('UTF-8'))
page_id_list = self.get_page_id_list(content)
self.assertEqual(page_id_list, [16, 18, 19])
def test_ordering_by_title_with_type(self):
response = self.get_response(type='demosite.BlogEntryPage', order='title')
content = json.loads(response.content.decode('UTF-8'))
page_id_list = self.get_page_id_list(content)
self.assertEqual(page_id_list, [19, 16, 18])
def test_ordering_by_specific_field_with_type(self):
response = self.get_response(type='demosite.BlogEntryPage', order='date')
content = json.loads(response.content.decode('UTF-8'))
page_id_list = self.get_page_id_list(content)
self.assertEqual(page_id_list, [16, 18, 19])
def test_ordering_by_unknown_field_gives_error(self):
response = self.get_response(order='not_a_field')
content = json.loads(response.content.decode('UTF-8'))
self.assertEqual(response.status_code, 400)
self.assertEqual(content, {'message': "cannot order by 'not_a_field' (unknown field)"})
# LIMIT
def test_limit_only_two_results_returned(self):
response = self.get_response(limit=2)
content = json.loads(response.content.decode('UTF-8'))
self.assertEqual(len(content['pages']), 2)
def test_limit_total_count(self):
response = self.get_response(limit=2)
content = json.loads(response.content.decode('UTF-8'))
# The total count must not be affected by "limit"
self.assertEqual(content['meta']['total_count'], get_total_page_count())
def test_limit_not_integer_gives_error(self):
response = self.get_response(limit='abc')
content = json.loads(response.content.decode('UTF-8'))
self.assertEqual(response.status_code, 400)
self.assertEqual(content, {'message': "limit must be a positive integer"})
def test_limit_too_high_gives_error(self):
response = self.get_response(limit=1000)
content = json.loads(response.content.decode('UTF-8'))
self.assertEqual(response.status_code, 400)
self.assertEqual(content, {'message': "limit cannot be higher than 20"})
@override_settings(WAGTAILAPI_LIMIT_MAX=10)
def test_limit_maximum_can_be_changed(self):
response = self.get_response(limit=20)
content = json.loads(response.content.decode('UTF-8'))
self.assertEqual(response.status_code, 400)
self.assertEqual(content, {'message': "limit cannot be higher than 10"})
@override_settings(WAGTAILAPI_LIMIT_MAX=2)
def test_limit_default_changes_with_max(self):
# The default limit is 20. If WAGTAILAPI_LIMIT_MAX is less than that,
# the default should change accordingly.
response = self.get_response()
content = json.loads(response.content.decode('UTF-8'))
self.assertEqual(len(content['pages']), 2)
# OFFSET
def test_offset_5_usually_appears_5th_in_list(self):
response = self.get_response()
content = json.loads(response.content.decode('UTF-8'))
page_id_list = self.get_page_id_list(content)
self.assertEqual(page_id_list.index(5), 4)
def test_offset_5_moves_after_offset(self):
response = self.get_response(offset=4)
content = json.loads(response.content.decode('UTF-8'))
page_id_list = self.get_page_id_list(content)
self.assertEqual(page_id_list.index(5), 0)
def test_offset_total_count(self):
response = self.get_response(offset=10)
content = json.loads(response.content.decode('UTF-8'))
# The total count must not be affected by "offset"
self.assertEqual(content['meta']['total_count'], get_total_page_count())
def test_offset_not_integer_gives_error(self):
response = self.get_response(offset='abc')
content = json.loads(response.content.decode('UTF-8'))
self.assertEqual(response.status_code, 400)
self.assertEqual(content, {'message': "offset must be a positive integer"})
# SEARCH
def test_search_for_blog(self):
response = self.get_response(search='blog')
content = json.loads(response.content.decode('UTF-8'))
page_id_list = self.get_page_id_list(content)
# Check that the results are the blog index and three blog pages
self.assertEqual(set(page_id_list), set([5, 16, 18, 19]))
def test_search_with_type(self):
response = self.get_response(type='demosite.BlogEntryPage', search='blog')
content = json.loads(response.content.decode('UTF-8'))
page_id_list = self.get_page_id_list(content)
self.assertEqual(set(page_id_list), set([16, 18, 19]))
def test_search_when_ordering_gives_error(self):
response = self.get_response(search='blog', order='title')
content = json.loads(response.content.decode('UTF-8'))
self.assertEqual(response.status_code, 400)
self.assertEqual(content, {'message': "ordering with a search query is not supported"})
@override_settings(WAGTAILAPI_SEARCH_ENABLED=False)
def test_search_when_disabled_gives_error(self):
response = self.get_response(search='blog')
content = json.loads(response.content.decode('UTF-8'))
self.assertEqual(response.status_code, 400)
self.assertEqual(content, {'message': "search is disabled"})
def test_search_when_filtering_by_tag_gives_error(self):
response = self.get_response(type='demosite.BlogEntryPage', search='blog', tags='wagtail')
content = json.loads(response.content.decode('UTF-8'))
self.assertEqual(response.status_code, 400)
self.assertEqual(content, {'message': "filtering by tag with a search query is not supported"})
class TestPageDetail(TestCase):
fixtures = ['demosite.json']
def get_response(self, page_id, **params):
return self.client.get(reverse('wagtailapi_v1:pages:detail', args=(page_id, )), params)
def test_basic(self):
response = self.get_response(16)
self.assertEqual(response.status_code, 200)
self.assertEqual(response['Content-type'], 'application/json')
# Will crash if the JSON is invalid
content = json.loads(response.content.decode('UTF-8'))
# Check the id field
self.assertIn('id', content)
self.assertEqual(content['id'], 16)
# Check that the meta section is there
self.assertIn('meta', content)
self.assertIsInstance(content['meta'], dict)
# Check the meta type
self.assertIn('type', content['meta'])
self.assertEqual(content['meta']['type'], 'demosite.BlogEntryPage')
# Check the meta detail_url
self.assertIn('detail_url', content['meta'])
self.assertEqual(content['meta']['detail_url'], 'http://localhost/api/v1/pages/16/')
# Check the parent field
self.assertIn('parent', content)
self.assertIsInstance(content['parent'], dict)
self.assertEqual(set(content['parent'].keys()), {'id', 'meta'})
self.assertEqual(content['parent']['id'], 5)
self.assertIsInstance(content['parent']['meta'], dict)
self.assertEqual(set(content['parent']['meta'].keys()), {'type', 'detail_url'})
self.assertEqual(content['parent']['meta']['type'], 'demosite.BlogIndexPage')
self.assertEqual(content['parent']['meta']['detail_url'], 'http://localhost/api/v1/pages/5/')
# Check that the custom fields are included
self.assertIn('date', content)
self.assertIn('body', content)
self.assertIn('tags', content)
self.assertIn('feed_image', content)
self.assertIn('related_links', content)
self.assertIn('carousel_items', content)
# Check that the date was serialised properly
self.assertEqual(content['date'], '2013-12-02')
# Check that the tags were serialised properly
self.assertEqual(content['tags'], ['bird', 'wagtail'])
# Check that the feed image was serialised properly
self.assertIsInstance(content['feed_image'], dict)
self.assertEqual(set(content['feed_image'].keys()), {'id', 'meta'})
self.assertEqual(content['feed_image']['id'], 7)
self.assertIsInstance(content['feed_image']['meta'], dict)
self.assertEqual(set(content['feed_image']['meta'].keys()), {'type', 'detail_url'})
self.assertEqual(content['feed_image']['meta']['type'], 'wagtailimages.Image')
self.assertEqual(content['feed_image']['meta']['detail_url'], 'http://localhost/api/v1/images/7/')
# Check that the child relations were serialised properly
self.assertEqual(content['related_links'], [])
for carousel_item in content['carousel_items']:
self.assertEqual(set(carousel_item.keys()), {'embed_url', 'link', 'caption', 'image'})
def test_meta_parent_id_doesnt_show_root_page(self):
# Root page isn't in the site so don't show it if the user is looking at the home page
response = self.get_response(2)
content = json.loads(response.content.decode('UTF-8'))
self.assertNotIn('parent', content['meta'])
def test_field_ordering(self):
response = self.get_response(16)
# Will crash if the JSON is invalid
content = json.loads(response.content.decode('UTF-8'))
# Test field order
content = json.JSONDecoder(object_pairs_hook=collections.OrderedDict).decode(response.content.decode('UTF-8'))
field_order = [
'id',
'meta',
'parent',
'title',
'body',
'tags',
'date',
'feed_image',
'carousel_items',
'related_links',
]
self.assertEqual(list(content.keys()), field_order)
def test_null_foreign_key(self):
models.BlogEntryPage.objects.filter(id=16).update(feed_image_id=None)
response = self.get_response(16)
content = json.loads(response.content.decode('UTF-8'))
self.assertIn('related_links', content)
self.assertEqual(content['feed_image'], None)
class TestPageDetailWithStreamField(TestCase):
fixtures = ['test.json']
def setUp(self):
self.homepage = Page.objects.get(url_path='/home/')
def make_stream_page(self, body):
stream_page = StreamPage(
title='stream page',
body=body
)
return self.homepage.add_child(instance=stream_page)
def test_can_fetch_streamfield_content(self):
stream_page = self.make_stream_page('[{"type": "text", "value": "foo"}]')
response_url = reverse('wagtailapi_v1:pages:detail', args=(stream_page.id, ))
response = self.client.get(response_url)
self.assertEqual(response.status_code, 200)
self.assertEqual(response['content-type'], 'application/json')
content = json.loads(response.content.decode('utf-8'))
self.assertIn('id', content)
self.assertEqual(content['id'], stream_page.id)
self.assertIn('body', content)
self.assertEqual(content['body'], [{'type': 'text', 'value': 'foo'}])
def test_image_block(self):
stream_page = self.make_stream_page('[{"type": "image", "value": 1}]')
response_url = reverse('wagtailapi_v1:pages:detail', args=(stream_page.id, ))
response = self.client.get(response_url)
content = json.loads(response.content.decode('utf-8'))
# ForeignKeys in a StreamField shouldn't be translated into dictionary representation
self.assertEqual(content['body'], [{'type': 'image', 'value': 1}])
@override_settings(
WAGTAILFRONTENDCACHE={
'varnish': {
'BACKEND': 'wagtail.contrib.wagtailfrontendcache.backends.HTTPBackend',
'LOCATION': 'http://localhost:8000',
},
},
WAGTAILAPI_BASE_URL='http://api.example.com',
)
@mock.patch('wagtail.contrib.wagtailfrontendcache.backends.HTTPBackend.purge')
class TestPageCacheInvalidation(TestCase):
fixtures = ['demosite.json']
@classmethod
def setUpClass(cls):
super(TestPageCacheInvalidation, cls).setUpClass()
signal_handlers.register_signal_handlers()
@classmethod
def tearDownClass(cls):
super(TestPageCacheInvalidation, cls).tearDownClass()
signal_handlers.unregister_signal_handlers()
def test_republish_page_purges(self, purge):
Page.objects.get(id=2).save_revision().publish()
purge.assert_any_call('http://api.example.com/api/v1/pages/2/')
def test_unpublish_page_purges(self, purge):
Page.objects.get(id=2).unpublish()
purge.assert_any_call('http://api.example.com/api/v1/pages/2/')
def test_delete_page_purges(self, purge):
Page.objects.get(id=16).delete()
purge.assert_any_call('http://api.example.com/api/v1/pages/16/')
def test_save_draft_doesnt_purge(self, purge):
Page.objects.get(id=2).save_revision()
purge.assert_not_called()
| bsd-3-clause |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.