repo_name
stringlengths 6
100
| path
stringlengths 4
294
| copies
stringlengths 1
5
| size
stringlengths 4
6
| content
stringlengths 606
896k
| license
stringclasses 15
values |
---|---|---|---|---|---|
sbtlaarzc/vispy | vispy/scene/canvas.py | 20 | 21169 | # -*- coding: utf-8 -*-
# Copyright (c) 2015, Vispy Development Team.
# Distributed under the (new) BSD License. See LICENSE.txt for more info.
from __future__ import division
import weakref
import numpy as np
from .. import gloo
from .. import app
from .visuals import VisualNode
from ..visuals.transforms import TransformSystem
from ..color import Color
from ..util import logger, Frozen
from ..util.profiler import Profiler
from .subscene import SubScene
from .events import SceneMouseEvent
from .widgets import Widget
class SceneCanvas(app.Canvas, Frozen):
"""A Canvas that automatically draws the contents of a scene
Parameters
----------
title : str
The widget title
size : (width, height)
The size of the window.
position : (x, y)
The position of the window in screen coordinates.
show : bool
Whether to show the widget immediately. Default False.
autoswap : bool
Whether to swap the buffers automatically after a draw event.
Default True. If True, the ``swap_buffers`` Canvas method will
be called last (by default) by the ``canvas.draw`` event handler.
app : Application | str
Give vispy Application instance to use as a backend.
(vispy.app is used by default.) If str, then an application
using the chosen backend (e.g., 'pyglet') will be created.
Note the canvas application can be accessed at ``canvas.app``.
create_native : bool
Whether to create the widget immediately. Default True.
vsync : bool
Enable vertical synchronization.
resizable : bool
Allow the window to be resized.
decorate : bool
Decorate the window. Default True.
fullscreen : bool | int
If False, windowed mode is used (default). If True, the default
monitor is used. If int, the given monitor number is used.
config : dict
A dict with OpenGL configuration options, which is combined
with the default configuration options and used to initialize
the context. See ``canvas.context.config`` for possible
options.
shared : Canvas | GLContext | None
An existing canvas or context to share OpenGL objects with.
keys : str | dict | None
Default key mapping to use. If 'interactive', escape and F11 will
close the canvas and toggle full-screen mode, respectively.
If dict, maps keys to functions. If dict values are strings,
they are assumed to be ``Canvas`` methods, otherwise they should
be callable.
parent : widget-object
The parent widget if this makes sense for the used backend.
dpi : float | None
Resolution in dots-per-inch to use for the canvas. If dpi is None,
then the value will be determined by querying the global config first,
and then the operating system.
always_on_top : bool
If True, try to create the window in always-on-top mode.
px_scale : int > 0
A scale factor to apply between logical and physical pixels in addition
to the actual scale factor determined by the backend. This option
allows the scale factor to be adjusted for testing.
bgcolor : Color
The background color to use.
See also
--------
vispy.app.Canvas
Notes
-----
Receives the following events:
* initialize
* resize
* draw
* mouse_press
* mouse_release
* mouse_double_click
* mouse_move
* mouse_wheel
* key_press
* key_release
* stylus
* touch
* close
The ordering of the mouse_double_click, mouse_press, and mouse_release
events are not guaranteed to be consistent between backends. Only certain
backends natively support double-clicking (currently Qt and WX); on other
backends, they are detected manually with a fixed time delay.
This can cause problems with accessibility, as increasing the OS detection
time or using a dedicated double-click button will not be respected.
"""
def __init__(self, title='VisPy canvas', size=(800, 600), position=None,
show=False, autoswap=True, app=None, create_native=True,
vsync=False, resizable=True, decorate=True, fullscreen=False,
config=None, shared=None, keys=None, parent=None, dpi=None,
always_on_top=False, px_scale=1, bgcolor='black'):
self._scene = None
# A default widget that follows the shape of the canvas
self._central_widget = None
self._draw_order = weakref.WeakKeyDictionary()
self._drawing = False
self._fb_stack = []
self._vp_stack = []
self._mouse_handler = None
self.transforms = TransformSystem(canvas=self)
self._bgcolor = Color(bgcolor).rgba
# Set to True to enable sending mouse events even when no button is
# pressed. Disabled by default because it is very expensive. Also
# private for now because this behavior / API needs more thought.
self._send_hover_events = False
super(SceneCanvas, self).__init__(
title, size, position, show, autoswap, app, create_native, vsync,
resizable, decorate, fullscreen, config, shared, keys, parent, dpi,
always_on_top, px_scale)
self.events.mouse_press.connect(self._process_mouse_event)
self.events.mouse_move.connect(self._process_mouse_event)
self.events.mouse_release.connect(self._process_mouse_event)
self.events.mouse_wheel.connect(self._process_mouse_event)
self.scene = SubScene()
self.freeze()
@property
def scene(self):
""" The SubScene object that represents the root node of the
scene graph to be displayed.
"""
return self._scene
@scene.setter
def scene(self, node):
oldscene = self._scene
self._scene = node
if oldscene is not None:
oldscene._set_canvas(None)
oldscene.events.children_change.disconnect(self._update_scenegraph)
if node is not None:
node._set_canvas(self)
node.events.children_change.connect(self._update_scenegraph)
@property
def central_widget(self):
""" Returns the default widget that occupies the entire area of the
canvas.
"""
if self._central_widget is None:
self._central_widget = Widget(size=self.size, parent=self.scene)
return self._central_widget
@property
def bgcolor(self):
return Color(self._bgcolor)
@bgcolor.setter
def bgcolor(self, color):
self._bgcolor = Color(color).rgba
if hasattr(self, '_backend'):
self.update()
def update(self, node=None):
"""Update the scene
Parameters
----------
node : instance of Node
Not used.
"""
# TODO: use node bounds to keep track of minimum drawable area
if self._drawing:
return
app.Canvas.update(self)
def on_draw(self, event):
"""Draw handler
Parameters
----------
event : instance of Event
The draw event.
"""
if self._scene is None:
return # Can happen on initialization
logger.debug('Canvas draw')
self._draw_scene()
def render(self, region=None, size=None, bgcolor=None):
"""Render the scene to an offscreen buffer and return the image array.
Parameters
----------
region : tuple | None
Specifies the region of the canvas to render. Format is
(x, y, w, h). By default, the entire canvas is rendered.
size : tuple | None
Specifies the size of the image array to return. If no size is
given, then the size of the *region* is used, multiplied by the
pixel scaling factor of the canvas (see `pixel_scale`). This
argument allows the scene to be rendered at resolutions different
from the native canvas resolution.
bgcolor : instance of Color | None
The background color to use.
Returns
-------
image : array
Numpy array of type ubyte and shape (h, w, 4). Index [0, 0] is the
upper-left corner of the rendered region.
"""
self.set_current()
# Set up a framebuffer to render to
offset = (0, 0) if region is None else region[:2]
csize = self.size if region is None else region[2:]
s = self.pixel_scale
size = tuple([x * s for x in csize]) if size is None else size
fbo = gloo.FrameBuffer(color=gloo.RenderBuffer(size[::-1]),
depth=gloo.RenderBuffer(size[::-1]))
self.push_fbo(fbo, offset, csize)
try:
self._draw_scene(bgcolor=bgcolor)
return fbo.read()
finally:
self.pop_fbo()
def _draw_scene(self, bgcolor=None):
if bgcolor is None:
bgcolor = self._bgcolor
self.context.clear(color=bgcolor, depth=True)
self.draw_visual(self.scene)
def draw_visual(self, visual, event=None):
""" Draw a visual and its children to the canvas or currently active
framebuffer.
Parameters
----------
visual : Visual
The visual to draw
event : None or DrawEvent
Optionally specifies the original canvas draw event that initiated
this draw.
"""
prof = Profiler()
# make sure this canvas's context is active
self.set_current()
try:
self._drawing = True
# get order to draw visuals
if visual not in self._draw_order:
self._draw_order[visual] = self._generate_draw_order()
order = self._draw_order[visual]
# draw (while avoiding branches with visible=False)
stack = []
invisible_node = None
for node, start in order:
if start:
stack.append(node)
if invisible_node is None:
if not node.visible:
# disable drawing until we exit this node's subtree
invisible_node = node
else:
if hasattr(node, 'draw'):
node.draw()
prof.mark(str(node))
else:
if node is invisible_node:
invisible_node = None
stack.pop()
finally:
self._drawing = False
def _generate_draw_order(self, node=None):
"""Return a list giving the order to draw visuals.
Each node appears twice in the list--(node, True) appears before the
node's children are drawn, and (node, False) appears after.
"""
if node is None:
node = self._scene
order = [(node, True)]
children = node.children
children.sort(key=lambda ch: ch.order)
for ch in children:
order.extend(self._generate_draw_order(ch))
order.append((node, False))
return order
def _update_scenegraph(self, event):
"""Called when topology of scenegraph has changed.
"""
self._draw_order.clear()
self.update()
def _process_mouse_event(self, event):
prof = Profiler() # noqa
deliver_types = ['mouse_press', 'mouse_wheel']
if self._send_hover_events:
deliver_types += ['mouse_move']
picked = self._mouse_handler
if picked is None:
if event.type in deliver_types:
picked = self.visual_at(event.pos)
# No visual to handle this event; bail out now
if picked is None:
return
# Create an event to pass to the picked visual
scene_event = SceneMouseEvent(event=event, visual=picked)
# Deliver the event
if picked == self._mouse_handler:
# If we already have a mouse handler, then no other node may
# receive the event
if event.type == 'mouse_release':
self._mouse_handler = None
getattr(picked.events, event.type)(scene_event)
else:
# If we don't have a mouse handler, then pass the event through
# the chain of parents until a node accepts the event.
while picked is not None:
getattr(picked.events, event.type)(scene_event)
if scene_event.handled:
if event.type == 'mouse_press':
self._mouse_handler = picked
break
if event.type in deliver_types:
# events that are not handled get passed to parent
picked = picked.parent
scene_event.visual = picked
else:
picked = None
# If something in the scene handled the scene_event, then we mark
# the original event accordingly.
event.handled = scene_event.handled
def visual_at(self, pos):
"""Return the visual at a given position
Parameters
----------
pos : tuple
The position in logical coordinates to query.
Returns
-------
visual : instance of Visual | None
The visual at the position, if it exists.
"""
tr = self.transforms.get_transform('canvas', 'framebuffer')
fbpos = tr.map(pos)[:2]
try:
id_ = self._render_picking(region=(fbpos[0], fbpos[1],
1, 1))
vis = VisualNode._visual_ids.get(id_[0, 0], None)
except RuntimeError:
# Don't have read_pixels() support for IPython. Fall back to
# bounds checking.
return self._visual_bounds_at(pos)
return vis
def _visual_bounds_at(self, pos, node=None):
"""Find a visual whose bounding rect encompasses *pos*.
"""
if node is None:
node = self.scene
for ch in node.children:
hit = self._visual_bounds_at(pos, ch)
if hit is not None:
return hit
if (not isinstance(node, VisualNode) or not node.visible or
not node.interactive):
return None
bounds = [node.bounds(axis=i) for i in range(2)]
if None in bounds:
return None
tr = self.scene.node_transform(node).inverse
corners = np.array([
[bounds[0][0], bounds[1][0]],
[bounds[0][0], bounds[1][1]],
[bounds[0][1], bounds[1][0]],
[bounds[0][1], bounds[1][1]]])
bounds = tr.map(corners)
xhit = bounds[:, 0].min() < pos[0] < bounds[:, 0].max()
yhit = bounds[:, 1].min() < pos[1] < bounds[:, 1].max()
if xhit and yhit:
return node
def visuals_at(self, pos, radius=10):
"""Return a list of visuals within *radius* pixels of *pos*.
Visuals are sorted by their proximity to *pos*.
Parameters
----------
pos : tuple
(x, y) position at which to find visuals.
radius : int
Distance away from *pos* to search for visuals.
"""
tr = self.transforms.get_transform('canvas', 'framebuffer')
pos = tr.map(pos)[:2]
id = self._render_picking(region=(pos[0]-radius, pos[1]-radius,
radius * 2 + 1, radius * 2 + 1))
ids = []
seen = set()
for i in range(radius):
subr = id[radius-i:radius+i+1, radius-i:radius+i+1]
subr_ids = set(list(np.unique(subr)))
ids.extend(list(subr_ids - seen))
seen |= subr_ids
visuals = [VisualNode._visual_ids.get(x, None) for x in ids]
return [v for v in visuals if v is not None]
def _render_picking(self, **kwargs):
"""Render the scene in picking mode, returning a 2D array of visual
IDs.
"""
try:
self._scene.picking = True
img = self.render(bgcolor=(0, 0, 0, 0), **kwargs)
finally:
self._scene.picking = False
img = img.astype('int32') * [2**0, 2**8, 2**16, 2**24]
id_ = img.sum(axis=2).astype('int32')
return id_
def on_resize(self, event):
"""Resize handler
Parameters
----------
event : instance of Event
The resize event.
"""
self._update_transforms()
if self._central_widget is not None:
self._central_widget.size = self.size
if len(self._vp_stack) == 0:
self.context.set_viewport(0, 0, *self.physical_size)
def on_close(self, event):
"""Close event handler
Parameters
----------
event : instance of Event
The event.
"""
self.events.mouse_press.disconnect(self._process_mouse_event)
self.events.mouse_move.disconnect(self._process_mouse_event)
self.events.mouse_release.disconnect(self._process_mouse_event)
self.events.mouse_wheel.disconnect(self._process_mouse_event)
# -------------------------------------------------- transform handling ---
def push_viewport(self, viewport):
""" Push a viewport (x, y, w, h) on the stack. Values must be integers
relative to the active framebuffer.
Parameters
----------
viewport : tuple
The viewport as (x, y, w, h).
"""
vp = list(viewport)
# Normalize viewport before setting;
if vp[2] < 0:
vp[0] += vp[2]
vp[2] *= -1
if vp[3] < 0:
vp[1] += vp[3]
vp[3] *= -1
self._vp_stack.append(vp)
try:
self.context.set_viewport(*vp)
except:
self._vp_stack.pop()
raise
self._update_transforms()
def pop_viewport(self):
""" Pop a viewport from the stack.
"""
vp = self._vp_stack.pop()
# Activate latest
if len(self._vp_stack) > 0:
self.context.set_viewport(*self._vp_stack[-1])
else:
self.context.set_viewport(0, 0, *self.physical_size)
self._update_transforms()
return vp
def push_fbo(self, fbo, offset, csize):
""" Push an FBO on the stack.
This activates the framebuffer and causes subsequent rendering to be
written to the framebuffer rather than the canvas's back buffer. This
will also set the canvas viewport to cover the boundaries of the
framebuffer.
Parameters
----------
fbo : instance of FrameBuffer
The framebuffer object .
offset : tuple
The location of the fbo origin relative to the canvas's framebuffer
origin.
csize : tuple
The size of the region in the canvas's framebuffer that should be
covered by this framebuffer object.
"""
self._fb_stack.append((fbo, offset, csize))
try:
fbo.activate()
h, w = fbo.color_buffer.shape[:2]
self.push_viewport((0, 0, w, h))
except Exception:
self._fb_stack.pop()
raise
self._update_transforms()
def pop_fbo(self):
""" Pop an FBO from the stack.
"""
fbo = self._fb_stack.pop()
fbo[0].deactivate()
self.pop_viewport()
if len(self._fb_stack) > 0:
old_fbo = self._fb_stack[-1]
old_fbo[0].activate()
self._update_transforms()
return fbo
def _current_framebuffer(self):
""" Return (fbo, origin, canvas_size) for the current
FBO on the stack, or for the canvas if there is no FBO.
"""
if len(self._fb_stack) == 0:
return None, (0, 0), self.size
else:
return self._fb_stack[-1]
def _update_transforms(self):
"""Update the canvas's TransformSystem to correct for the current
canvas size, framebuffer, and viewport.
"""
if len(self._fb_stack) == 0:
fb_size = fb_rect = None
else:
fb, origin, fb_size = self._fb_stack[-1]
fb_rect = origin + fb_size
if len(self._vp_stack) == 0:
viewport = None
else:
viewport = self._vp_stack[-1]
self.transforms.configure(viewport=viewport, fbo_size=fb_size,
fbo_rect=fb_rect)
| bsd-3-clause |
onitake/ansible | lib/ansible/modules/network/illumos/dladm_vnic.py | 43 | 6588 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2015, Adam Števko <[email protected]>
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: dladm_vnic
short_description: Manage VNICs on Solaris/illumos systems.
description:
- Create or delete VNICs on Solaris/illumos systems.
version_added: "2.2"
author: Adam Števko (@xen0l)
options:
name:
description:
- VNIC name.
required: true
link:
description:
- VNIC underlying link name.
required: true
temporary:
description:
- Specifies that the VNIC is temporary. Temporary VNICs
do not persist across reboots.
required: false
default: false
type: bool
mac:
description:
- Sets the VNIC's MAC address. Must be valid unicast MAC address.
required: false
default: false
aliases: [ "macaddr" ]
vlan:
description:
- Enable VLAN tagging for this VNIC. The VLAN tag will have id
I(vlan).
required: false
default: false
aliases: [ "vlan_id" ]
state:
description:
- Create or delete Solaris/illumos VNIC.
required: false
default: "present"
choices: [ "present", "absent" ]
'''
EXAMPLES = '''
# Create 'vnic0' VNIC over 'bnx0' link
- dladm_vnic:
name: vnic0
link: bnx0
state: present
# Create VNIC with specified MAC and VLAN tag over 'aggr0'
- dladm_vnic:
name: vnic1
link: aggr0
mac: '00:00:5E:00:53:23'
vlan: 4
# Remove 'vnic0' VNIC
- dladm_vnic:
name: vnic0
link: bnx0
state: absent
'''
RETURN = '''
name:
description: VNIC name
returned: always
type: string
sample: "vnic0"
link:
description: VNIC underlying link name
returned: always
type: string
sample: "igb0"
state:
description: state of the target
returned: always
type: string
sample: "present"
temporary:
description: VNIC's persistence
returned: always
type: boolean
sample: "True"
mac:
description: MAC address to use for VNIC
returned: if mac is specified
type: string
sample: "00:00:5E:00:53:42"
vlan:
description: VLAN to use for VNIC
returned: success
type: int
sample: 42
'''
import re
from ansible.module_utils.basic import AnsibleModule
class VNIC(object):
UNICAST_MAC_REGEX = r'^[a-f0-9][2-9a-f0]:([a-f0-9]{2}:){4}[a-f0-9]{2}$'
def __init__(self, module):
self.module = module
self.name = module.params['name']
self.link = module.params['link']
self.mac = module.params['mac']
self.vlan = module.params['vlan']
self.temporary = module.params['temporary']
self.state = module.params['state']
def vnic_exists(self):
cmd = [self.module.get_bin_path('dladm', True)]
cmd.append('show-vnic')
cmd.append(self.name)
(rc, _, _) = self.module.run_command(cmd)
if rc == 0:
return True
else:
return False
def create_vnic(self):
cmd = [self.module.get_bin_path('dladm', True)]
cmd.append('create-vnic')
if self.temporary:
cmd.append('-t')
if self.mac:
cmd.append('-m')
cmd.append(self.mac)
if self.vlan:
cmd.append('-v')
cmd.append(self.vlan)
cmd.append('-l')
cmd.append(self.link)
cmd.append(self.name)
return self.module.run_command(cmd)
def delete_vnic(self):
cmd = [self.module.get_bin_path('dladm', True)]
cmd.append('delete-vnic')
if self.temporary:
cmd.append('-t')
cmd.append(self.name)
return self.module.run_command(cmd)
def is_valid_unicast_mac(self):
mac_re = re.match(self.UNICAST_MAC_REGEX, self.mac)
return mac_re is None
def is_valid_vlan_id(self):
return 0 <= self.vlan <= 4095
def main():
module = AnsibleModule(
argument_spec=dict(
name=dict(required=True),
link=dict(required=True),
mac=dict(default=None, aliases=['macaddr']),
vlan=dict(default=None, aliases=['vlan_id']),
temporary=dict(default=False, type='bool'),
state=dict(default='present', choices=['absent', 'present']),
),
supports_check_mode=True
)
vnic = VNIC(module)
rc = None
out = ''
err = ''
result = {}
result['name'] = vnic.name
result['link'] = vnic.link
result['state'] = vnic.state
result['temporary'] = vnic.temporary
if vnic.mac is not None:
if vnic.is_valid_unicast_mac():
module.fail_json(msg='Invalid unicast MAC address',
mac=vnic.mac,
name=vnic.name,
state=vnic.state,
link=vnic.link,
vlan=vnic.vlan)
result['mac'] = vnic.mac
if vnic.vlan is not None:
if vnic.is_valid_vlan_id():
module.fail_json(msg='Invalid VLAN tag',
mac=vnic.mac,
name=vnic.name,
state=vnic.state,
link=vnic.link,
vlan=vnic.vlan)
result['vlan'] = vnic.vlan
if vnic.state == 'absent':
if vnic.vnic_exists():
if module.check_mode:
module.exit_json(changed=True)
(rc, out, err) = vnic.delete_vnic()
if rc != 0:
module.fail_json(name=vnic.name, msg=err, rc=rc)
elif vnic.state == 'present':
if not vnic.vnic_exists():
if module.check_mode:
module.exit_json(changed=True)
(rc, out, err) = vnic.create_vnic()
if rc is not None and rc != 0:
module.fail_json(name=vnic.name, msg=err, rc=rc)
if rc is None:
result['changed'] = False
else:
result['changed'] = True
if out:
result['stdout'] = out
if err:
result['stderr'] = err
module.exit_json(**result)
if __name__ == '__main__':
main()
| gpl-3.0 |
jzylks/ncclient | docs/source/conf.py | 11 | 6704 | # -*- coding: utf-8 -*-
#
# ncclient documentation build configuration file, created by
# sphinx-quickstart on Fri Sep 18 17:32:15 2009.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.append(os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.doctest', 'sphinx.ext.intersphinx', 'sphinx.ext.todo', 'sphinx.ext.coverage', 'sphinx.ext.pngmath', 'sphinx.ext.ifconfig']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'ncclient'
copyright = u'2009, Shikhar Bhushan; 2011, Leonidas Poulopoulos'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.3.2'
# The full version, including alpha/beta/rc tags.
release = '0.3.2'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of documents that shouldn't be included in the build.
#unused_docs = []
# List of directories, relative to source directory, that shouldn't be searched
# for source files.
exclude_trees = []
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = 'obj'
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
modindex_common_prefix = ["ncclient."]
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. Major themes that come with
# Sphinx are currently 'default' and 'sphinxdoc'.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
html_logo = "_static/logo.png"
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_use_modindex = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = ''
# Output file base name for HTML help builder.
htmlhelp_basename = 'ncclientdoc'
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
latex_paper_size = 'a4'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'ncclient.tex', u'ncclient Documentation',
u'Shikhar Bhushan \and Leonidas Poulopoulos', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
latex_logo = "_static/logo.png"
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_use_modindex = True
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'http://docs.python.org/': None}
autoclass_content = 'both'
| apache-2.0 |
pnedunuri/scipy | scipy/optimize/_root.py | 109 | 26007 | """
Unified interfaces to root finding algorithms.
Functions
---------
- root : find a root of a vector function.
"""
from __future__ import division, print_function, absolute_import
__all__ = ['root']
import numpy as np
from scipy._lib.six import callable
from warnings import warn
from .optimize import MemoizeJac, OptimizeResult, _check_unknown_options
from .minpack import _root_hybr, leastsq
from ._spectral import _root_df_sane
from . import nonlin
def root(fun, x0, args=(), method='hybr', jac=None, tol=None, callback=None,
options=None):
"""
Find a root of a vector function.
Parameters
----------
fun : callable
A vector function to find a root of.
x0 : ndarray
Initial guess.
args : tuple, optional
Extra arguments passed to the objective function and its Jacobian.
method : str, optional
Type of solver. Should be one of
- 'hybr' :ref:`(see here) <optimize.root-hybr>`
- 'lm' :ref:`(see here) <optimize.root-lm>`
- 'broyden1' :ref:`(see here) <optimize.root-broyden1>`
- 'broyden2' :ref:`(see here) <optimize.root-broyden2>`
- 'anderson' :ref:`(see here) <optimize.root-anderson>`
- 'linearmixing' :ref:`(see here) <optimize.root-linearmixing>`
- 'diagbroyden' :ref:`(see here) <optimize.root-diagbroyden>`
- 'excitingmixing' :ref:`(see here) <optimize.root-excitingmixing>`
- 'krylov' :ref:`(see here) <optimize.root-krylov>`
- 'df-sane' :ref:`(see here) <optimize.root-dfsane>`
jac : bool or callable, optional
If `jac` is a Boolean and is True, `fun` is assumed to return the
value of Jacobian along with the objective function. If False, the
Jacobian will be estimated numerically.
`jac` can also be a callable returning the Jacobian of `fun`. In
this case, it must accept the same arguments as `fun`.
tol : float, optional
Tolerance for termination. For detailed control, use solver-specific
options.
callback : function, optional
Optional callback function. It is called on every iteration as
``callback(x, f)`` where `x` is the current solution and `f`
the corresponding residual. For all methods but 'hybr' and 'lm'.
options : dict, optional
A dictionary of solver options. E.g. `xtol` or `maxiter`, see
:obj:`show_options()` for details.
Returns
-------
sol : OptimizeResult
The solution represented as a ``OptimizeResult`` object.
Important attributes are: ``x`` the solution array, ``success`` a
Boolean flag indicating if the algorithm exited successfully and
``message`` which describes the cause of the termination. See
`OptimizeResult` for a description of other attributes.
See also
--------
show_options : Additional options accepted by the solvers
Notes
-----
This section describes the available solvers that can be selected by the
'method' parameter. The default method is *hybr*.
Method *hybr* uses a modification of the Powell hybrid method as
implemented in MINPACK [1]_.
Method *lm* solves the system of nonlinear equations in a least squares
sense using a modification of the Levenberg-Marquardt algorithm as
implemented in MINPACK [1]_.
Method *df-sane* is a derivative-free spectral method. [3]_
Methods *broyden1*, *broyden2*, *anderson*, *linearmixing*,
*diagbroyden*, *excitingmixing*, *krylov* are inexact Newton methods,
with backtracking or full line searches [2]_. Each method corresponds
to a particular Jacobian approximations. See `nonlin` for details.
- Method *broyden1* uses Broyden's first Jacobian approximation, it is
known as Broyden's good method.
- Method *broyden2* uses Broyden's second Jacobian approximation, it
is known as Broyden's bad method.
- Method *anderson* uses (extended) Anderson mixing.
- Method *Krylov* uses Krylov approximation for inverse Jacobian. It
is suitable for large-scale problem.
- Method *diagbroyden* uses diagonal Broyden Jacobian approximation.
- Method *linearmixing* uses a scalar Jacobian approximation.
- Method *excitingmixing* uses a tuned diagonal Jacobian
approximation.
.. warning::
The algorithms implemented for methods *diagbroyden*,
*linearmixing* and *excitingmixing* may be useful for specific
problems, but whether they will work may depend strongly on the
problem.
.. versionadded:: 0.11.0
References
----------
.. [1] More, Jorge J., Burton S. Garbow, and Kenneth E. Hillstrom.
1980. User Guide for MINPACK-1.
.. [2] C. T. Kelley. 1995. Iterative Methods for Linear and Nonlinear
Equations. Society for Industrial and Applied Mathematics.
<http://www.siam.org/books/kelley/>
.. [3] W. La Cruz, J.M. Martinez, M. Raydan. Math. Comp. 75, 1429 (2006).
Examples
--------
The following functions define a system of nonlinear equations and its
jacobian.
>>> def fun(x):
... return [x[0] + 0.5 * (x[0] - x[1])**3 - 1.0,
... 0.5 * (x[1] - x[0])**3 + x[1]]
>>> def jac(x):
... return np.array([[1 + 1.5 * (x[0] - x[1])**2,
... -1.5 * (x[0] - x[1])**2],
... [-1.5 * (x[1] - x[0])**2,
... 1 + 1.5 * (x[1] - x[0])**2]])
A solution can be obtained as follows.
>>> from scipy import optimize
>>> sol = optimize.root(fun, [0, 0], jac=jac, method='hybr')
>>> sol.x
array([ 0.8411639, 0.1588361])
"""
if not isinstance(args, tuple):
args = (args,)
meth = method.lower()
if options is None:
options = {}
if callback is not None and meth in ('hybr', 'lm'):
warn('Method %s does not accept callback.' % method,
RuntimeWarning)
# fun also returns the jacobian
if not callable(jac) and meth in ('hybr', 'lm'):
if bool(jac):
fun = MemoizeJac(fun)
jac = fun.derivative
else:
jac = None
# set default tolerances
if tol is not None:
options = dict(options)
if meth in ('hybr', 'lm'):
options.setdefault('xtol', tol)
elif meth in ('df-sane',):
options.setdefault('ftol', tol)
elif meth in ('broyden1', 'broyden2', 'anderson', 'linearmixing',
'diagbroyden', 'excitingmixing', 'krylov'):
options.setdefault('xtol', tol)
options.setdefault('xatol', np.inf)
options.setdefault('ftol', np.inf)
options.setdefault('fatol', np.inf)
if meth == 'hybr':
sol = _root_hybr(fun, x0, args=args, jac=jac, **options)
elif meth == 'lm':
sol = _root_leastsq(fun, x0, args=args, jac=jac, **options)
elif meth == 'df-sane':
_warn_jac_unused(jac, method)
sol = _root_df_sane(fun, x0, args=args, callback=callback,
**options)
elif meth in ('broyden1', 'broyden2', 'anderson', 'linearmixing',
'diagbroyden', 'excitingmixing', 'krylov'):
_warn_jac_unused(jac, method)
sol = _root_nonlin_solve(fun, x0, args=args, jac=jac,
_method=meth, _callback=callback,
**options)
else:
raise ValueError('Unknown solver %s' % method)
return sol
def _warn_jac_unused(jac, method):
if jac is not None:
warn('Method %s does not use the jacobian (jac).' % (method,),
RuntimeWarning)
def _root_leastsq(func, x0, args=(), jac=None,
col_deriv=0, xtol=1.49012e-08, ftol=1.49012e-08,
gtol=0.0, maxiter=0, eps=0.0, factor=100, diag=None,
**unknown_options):
"""
Solve for least squares with Levenberg-Marquardt
Options
-------
col_deriv : bool
non-zero to specify that the Jacobian function computes derivatives
down the columns (faster, because there is no transpose operation).
ftol : float
Relative error desired in the sum of squares.
xtol : float
Relative error desired in the approximate solution.
gtol : float
Orthogonality desired between the function vector and the columns
of the Jacobian.
maxiter : int
The maximum number of calls to the function. If zero, then
100*(N+1) is the maximum where N is the number of elements in x0.
epsfcn : float
A suitable step length for the forward-difference approximation of
the Jacobian (for Dfun=None). If epsfcn is less than the machine
precision, it is assumed that the relative errors in the functions
are of the order of the machine precision.
factor : float
A parameter determining the initial step bound
(``factor * || diag * x||``). Should be in interval ``(0.1, 100)``.
diag : sequence
N positive entries that serve as a scale factors for the variables.
"""
_check_unknown_options(unknown_options)
x, cov_x, info, msg, ier = leastsq(func, x0, args=args, Dfun=jac,
full_output=True,
col_deriv=col_deriv, xtol=xtol,
ftol=ftol, gtol=gtol,
maxfev=maxiter, epsfcn=eps,
factor=factor, diag=diag)
sol = OptimizeResult(x=x, message=msg, status=ier,
success=ier in (1, 2, 3, 4), cov_x=cov_x,
fun=info.pop('fvec'))
sol.update(info)
return sol
def _root_nonlin_solve(func, x0, args=(), jac=None,
_callback=None, _method=None,
nit=None, disp=False, maxiter=None,
ftol=None, fatol=None, xtol=None, xatol=None,
tol_norm=None, line_search='armijo', jac_options=None,
**unknown_options):
_check_unknown_options(unknown_options)
f_tol = fatol
f_rtol = ftol
x_tol = xatol
x_rtol = xtol
verbose = disp
if jac_options is None:
jac_options = dict()
jacobian = {'broyden1': nonlin.BroydenFirst,
'broyden2': nonlin.BroydenSecond,
'anderson': nonlin.Anderson,
'linearmixing': nonlin.LinearMixing,
'diagbroyden': nonlin.DiagBroyden,
'excitingmixing': nonlin.ExcitingMixing,
'krylov': nonlin.KrylovJacobian
}[_method]
if args:
if jac:
def f(x):
return func(x, *args)[0]
else:
def f(x):
return func(x, *args)
else:
f = func
x, info = nonlin.nonlin_solve(f, x0, jacobian=jacobian(**jac_options),
iter=nit, verbose=verbose,
maxiter=maxiter, f_tol=f_tol,
f_rtol=f_rtol, x_tol=x_tol,
x_rtol=x_rtol, tol_norm=tol_norm,
line_search=line_search,
callback=_callback, full_output=True,
raise_exception=False)
sol = OptimizeResult(x=x)
sol.update(info)
return sol
def _root_broyden1_doc():
"""
Options
-------
nit : int, optional
Number of iterations to make. If omitted (default), make as many
as required to meet tolerances.
disp : bool, optional
Print status to stdout on every iteration.
maxiter : int, optional
Maximum number of iterations to make. If more are needed to
meet convergence, `NoConvergence` is raised.
ftol : float, optional
Relative tolerance for the residual. If omitted, not used.
fatol : float, optional
Absolute tolerance (in max-norm) for the residual.
If omitted, default is 6e-6.
xtol : float, optional
Relative minimum step size. If omitted, not used.
xatol : float, optional
Absolute minimum step size, as determined from the Jacobian
approximation. If the step size is smaller than this, optimization
is terminated as successful. If omitted, not used.
tol_norm : function(vector) -> scalar, optional
Norm to use in convergence check. Default is the maximum norm.
line_search : {None, 'armijo' (default), 'wolfe'}, optional
Which type of a line search to use to determine the step size in
the direction given by the Jacobian approximation. Defaults to
'armijo'.
jac_options : dict, optional
Options for the respective Jacobian approximation.
alpha : float, optional
Initial guess for the Jacobian is (-1/alpha).
reduction_method : str or tuple, optional
Method used in ensuring that the rank of the Broyden
matrix stays low. Can either be a string giving the
name of the method, or a tuple of the form ``(method,
param1, param2, ...)`` that gives the name of the
method and values for additional parameters.
Methods available:
- ``restart``: drop all matrix columns. Has no
extra parameters.
- ``simple``: drop oldest matrix column. Has no
extra parameters.
- ``svd``: keep only the most significant SVD
components.
Extra parameters:
- ``to_retain``: number of SVD components to
retain when rank reduction is done.
Default is ``max_rank - 2``.
max_rank : int, optional
Maximum rank for the Broyden matrix.
Default is infinity (ie., no rank reduction).
"""
pass
def _root_broyden2_doc():
"""
Options
-------
nit : int, optional
Number of iterations to make. If omitted (default), make as many
as required to meet tolerances.
disp : bool, optional
Print status to stdout on every iteration.
maxiter : int, optional
Maximum number of iterations to make. If more are needed to
meet convergence, `NoConvergence` is raised.
ftol : float, optional
Relative tolerance for the residual. If omitted, not used.
fatol : float, optional
Absolute tolerance (in max-norm) for the residual.
If omitted, default is 6e-6.
xtol : float, optional
Relative minimum step size. If omitted, not used.
xatol : float, optional
Absolute minimum step size, as determined from the Jacobian
approximation. If the step size is smaller than this, optimization
is terminated as successful. If omitted, not used.
tol_norm : function(vector) -> scalar, optional
Norm to use in convergence check. Default is the maximum norm.
line_search : {None, 'armijo' (default), 'wolfe'}, optional
Which type of a line search to use to determine the step size in
the direction given by the Jacobian approximation. Defaults to
'armijo'.
jac_options : dict, optional
Options for the respective Jacobian approximation.
alpha : float, optional
Initial guess for the Jacobian is (-1/alpha).
reduction_method : str or tuple, optional
Method used in ensuring that the rank of the Broyden
matrix stays low. Can either be a string giving the
name of the method, or a tuple of the form ``(method,
param1, param2, ...)`` that gives the name of the
method and values for additional parameters.
Methods available:
- ``restart``: drop all matrix columns. Has no
extra parameters.
- ``simple``: drop oldest matrix column. Has no
extra parameters.
- ``svd``: keep only the most significant SVD
components.
Extra parameters:
- ``to_retain``: number of SVD components to
retain when rank reduction is done.
Default is ``max_rank - 2``.
max_rank : int, optional
Maximum rank for the Broyden matrix.
Default is infinity (ie., no rank reduction).
"""
pass
def _root_anderson_doc():
"""
Options
-------
nit : int, optional
Number of iterations to make. If omitted (default), make as many
as required to meet tolerances.
disp : bool, optional
Print status to stdout on every iteration.
maxiter : int, optional
Maximum number of iterations to make. If more are needed to
meet convergence, `NoConvergence` is raised.
ftol : float, optional
Relative tolerance for the residual. If omitted, not used.
fatol : float, optional
Absolute tolerance (in max-norm) for the residual.
If omitted, default is 6e-6.
xtol : float, optional
Relative minimum step size. If omitted, not used.
xatol : float, optional
Absolute minimum step size, as determined from the Jacobian
approximation. If the step size is smaller than this, optimization
is terminated as successful. If omitted, not used.
tol_norm : function(vector) -> scalar, optional
Norm to use in convergence check. Default is the maximum norm.
line_search : {None, 'armijo' (default), 'wolfe'}, optional
Which type of a line search to use to determine the step size in
the direction given by the Jacobian approximation. Defaults to
'armijo'.
jac_options : dict, optional
Options for the respective Jacobian approximation.
alpha : float, optional
Initial guess for the Jacobian is (-1/alpha).
M : float, optional
Number of previous vectors to retain. Defaults to 5.
w0 : float, optional
Regularization parameter for numerical stability.
Compared to unity, good values of the order of 0.01.
"""
pass
def _root_linearmixing_doc():
"""
Options
-------
nit : int, optional
Number of iterations to make. If omitted (default), make as many
as required to meet tolerances.
disp : bool, optional
Print status to stdout on every iteration.
maxiter : int, optional
Maximum number of iterations to make. If more are needed to
meet convergence, ``NoConvergence`` is raised.
ftol : float, optional
Relative tolerance for the residual. If omitted, not used.
fatol : float, optional
Absolute tolerance (in max-norm) for the residual.
If omitted, default is 6e-6.
xtol : float, optional
Relative minimum step size. If omitted, not used.
xatol : float, optional
Absolute minimum step size, as determined from the Jacobian
approximation. If the step size is smaller than this, optimization
is terminated as successful. If omitted, not used.
tol_norm : function(vector) -> scalar, optional
Norm to use in convergence check. Default is the maximum norm.
line_search : {None, 'armijo' (default), 'wolfe'}, optional
Which type of a line search to use to determine the step size in
the direction given by the Jacobian approximation. Defaults to
'armijo'.
jac_options : dict, optional
Options for the respective Jacobian approximation.
alpha : float, optional
initial guess for the jacobian is (-1/alpha).
"""
pass
def _root_diagbroyden_doc():
"""
Options
-------
nit : int, optional
Number of iterations to make. If omitted (default), make as many
as required to meet tolerances.
disp : bool, optional
Print status to stdout on every iteration.
maxiter : int, optional
Maximum number of iterations to make. If more are needed to
meet convergence, `NoConvergence` is raised.
ftol : float, optional
Relative tolerance for the residual. If omitted, not used.
fatol : float, optional
Absolute tolerance (in max-norm) for the residual.
If omitted, default is 6e-6.
xtol : float, optional
Relative minimum step size. If omitted, not used.
xatol : float, optional
Absolute minimum step size, as determined from the Jacobian
approximation. If the step size is smaller than this, optimization
is terminated as successful. If omitted, not used.
tol_norm : function(vector) -> scalar, optional
Norm to use in convergence check. Default is the maximum norm.
line_search : {None, 'armijo' (default), 'wolfe'}, optional
Which type of a line search to use to determine the step size in
the direction given by the Jacobian approximation. Defaults to
'armijo'.
jac_options : dict, optional
Options for the respective Jacobian approximation.
alpha : float, optional
initial guess for the jacobian is (-1/alpha).
"""
pass
def _root_excitingmixing_doc():
"""
Options
-------
nit : int, optional
Number of iterations to make. If omitted (default), make as many
as required to meet tolerances.
disp : bool, optional
Print status to stdout on every iteration.
maxiter : int, optional
Maximum number of iterations to make. If more are needed to
meet convergence, `NoConvergence` is raised.
ftol : float, optional
Relative tolerance for the residual. If omitted, not used.
fatol : float, optional
Absolute tolerance (in max-norm) for the residual.
If omitted, default is 6e-6.
xtol : float, optional
Relative minimum step size. If omitted, not used.
xatol : float, optional
Absolute minimum step size, as determined from the Jacobian
approximation. If the step size is smaller than this, optimization
is terminated as successful. If omitted, not used.
tol_norm : function(vector) -> scalar, optional
Norm to use in convergence check. Default is the maximum norm.
line_search : {None, 'armijo' (default), 'wolfe'}, optional
Which type of a line search to use to determine the step size in
the direction given by the Jacobian approximation. Defaults to
'armijo'.
jac_options : dict, optional
Options for the respective Jacobian approximation.
alpha : float, optional
Initial Jacobian approximation is (-1/alpha).
alphamax : float, optional
The entries of the diagonal Jacobian are kept in the range
``[alpha, alphamax]``.
"""
pass
def _root_krylov_doc():
"""
Options
-------
nit : int, optional
Number of iterations to make. If omitted (default), make as many
as required to meet tolerances.
disp : bool, optional
Print status to stdout on every iteration.
maxiter : int, optional
Maximum number of iterations to make. If more are needed to
meet convergence, `NoConvergence` is raised.
ftol : float, optional
Relative tolerance for the residual. If omitted, not used.
fatol : float, optional
Absolute tolerance (in max-norm) for the residual.
If omitted, default is 6e-6.
xtol : float, optional
Relative minimum step size. If omitted, not used.
xatol : float, optional
Absolute minimum step size, as determined from the Jacobian
approximation. If the step size is smaller than this, optimization
is terminated as successful. If omitted, not used.
tol_norm : function(vector) -> scalar, optional
Norm to use in convergence check. Default is the maximum norm.
line_search : {None, 'armijo' (default), 'wolfe'}, optional
Which type of a line search to use to determine the step size in
the direction given by the Jacobian approximation. Defaults to
'armijo'.
jac_options : dict, optional
Options for the respective Jacobian approximation.
rdiff : float, optional
Relative step size to use in numerical differentiation.
method : {'lgmres', 'gmres', 'bicgstab', 'cgs', 'minres'} or function
Krylov method to use to approximate the Jacobian.
Can be a string, or a function implementing the same
interface as the iterative solvers in
`scipy.sparse.linalg`.
The default is `scipy.sparse.linalg.lgmres`.
inner_M : LinearOperator or InverseJacobian
Preconditioner for the inner Krylov iteration.
Note that you can use also inverse Jacobians as (adaptive)
preconditioners. For example,
>>> jac = BroydenFirst()
>>> kjac = KrylovJacobian(inner_M=jac.inverse).
If the preconditioner has a method named 'update', it will
be called as ``update(x, f)`` after each nonlinear step,
with ``x`` giving the current point, and ``f`` the current
function value.
inner_tol, inner_maxiter, ...
Parameters to pass on to the "inner" Krylov solver.
See `scipy.sparse.linalg.gmres` for details.
outer_k : int, optional
Size of the subspace kept across LGMRES nonlinear
iterations.
See `scipy.sparse.linalg.lgmres` for details.
"""
pass
| bsd-3-clause |
cedub/tower-cli | tests/test_models_fields.py | 2 | 3064 | # Copyright 2014, Ansible, Inc.
# Luke Sneeringer <[email protected]>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from tower_cli import models
from tests.compat import unittest, mock
class FieldTests(unittest.TestCase):
"""A set of tests to establish that the base Field class works in the
way we expect.
"""
def test_dunder_lt(self):
"""Establish that the `__lt__` comparison method on fields works
as expected.
"""
f1 = models.Field()
f2 = models.Field()
self.assertTrue(f1 < f2)
def test_dunder_gt(self):
"""Establish that the `__gt__` comparison method on fields works
in the way we expect.
"""
f1 = models.Field()
f2 = models.Field()
self.assertTrue(f2 > f1)
def test_help_property_explicit(self):
"""Establish that an explicitly provided help text is preserved
as the field's help.
"""
f1 = models.Field(help_text='foo bar baz')
self.assertEqual(f1.help, 'foo bar baz')
def test_help_property_implicit(self):
"""Establish that a sane implicit help text is provided if none is
specified.
"""
f1 = models.Field()
f1.name = 'f1'
self.assertEqual(f1.help, 'The f1 field.')
def test_flags_standard(self):
"""Establish that the `flags` property returns what I expect for a
run-of-the-mill field.
"""
f1 = models.Field()
self.assertEqual(f1.flags, ['str'])
def test_flags_unique_unfilterable(self):
"""Establish that the `flags` property successfully flags unfilterable
and unique flags.
"""
f1 = models.Field(unique=True, filterable=False)
self.assertIn('unique', f1.flags)
self.assertIn('not filterable', f1.flags)
def test_flags_read_only(self):
"""Establish that the `flags` property successfully flags read-only
flags.
"""
f = models.Field(read_only=True)
self.assertEqual(f.flags, ['str', 'read-only'])
def test_flags_not_required(self):
"""Establish that the `flags` property successfully flags a
not-required field.
"""
f = models.Field(type=int, required=False)
self.assertEqual(f.flags, ['int', 'not required'])
def test_flags_type(self):
"""Establish that the flags property successfully shows the correct
type name.
"""
f = models.Field(type=bool)
self.assertEqual(f.flags, ['bool'])
| apache-2.0 |
safwanrahman/kuma | kuma/wiki/tests/test_helpers.py | 4 | 6145 | # -*- coding: utf-8 -*-
import mock
from django.contrib.sites.models import Site
from kuma.core.cache import memcache
from kuma.core.tests import eq_
from kuma.users.tests import UserTestCase
from . import WikiTestCase, document, revision
from ..models import DocumentZone
from ..templatetags.jinja_helpers import (absolutify,
document_zone_management_links,
revisions_unified_diff,
selector_content_find, tojson)
class HelpTests(WikiTestCase):
def test_tojson(self):
eq_(tojson({'title': '<script>alert("Hi!")</script>'}),
'{"title": "<script>alert("Hi!")</script>"}')
@mock.patch.object(Site.objects, 'get_current')
def test_absolutify(self, get_current):
get_current.return_value.domain = 'testserver'
eq_(absolutify(''), 'https://testserver/')
eq_(absolutify('/'), 'https://testserver/')
eq_(absolutify('//'), 'https://testserver/')
eq_(absolutify('/foo/bar'), 'https://testserver/foo/bar')
eq_(absolutify('http://domain.com'), 'http://domain.com')
site = Site(domain='otherserver')
eq_(absolutify('/woo', site), 'https://otherserver/woo')
eq_(absolutify('/woo?var=value'), 'https://testserver/woo?var=value')
eq_(absolutify('/woo?var=value#fragment'),
'https://testserver/woo?var=value#fragment')
class RevisionsUnifiedDiffTests(UserTestCase, WikiTestCase):
def test_from_revision_none(self):
rev = revision()
try:
diff = revisions_unified_diff(None, rev)
except AttributeError:
self.fail("Should not throw AttributeError")
eq_("Diff is unavailable.", diff)
def test_from_revision_non_ascii(self):
doc1 = document(title=u'Gänsefüßchen', save=True)
rev1 = revision(document=doc1, content=u'spam', save=True)
doc2 = document(title=u'Außendienstüberwachlösung', save=True)
rev2 = revision(document=doc2, content=u'eggs', save=True)
try:
revisions_unified_diff(rev1, rev2)
except UnicodeEncodeError:
self.fail("Should not throw UnicodeEncodeError")
class DocumentZoneTests(UserTestCase, WikiTestCase):
"""Tests for DocumentZone helpers"""
def setUp(self):
super(DocumentZoneTests, self).setUp()
self.root_links_content = """
<p>Links content</p>
"""
self.root_content = """
<h4 id="links">Links</h4>
%s
""" % (self.root_links_content)
root_rev = revision(title='ZoneRoot',
slug='ZoneRoot',
content=self.root_content,
is_approved=True,
save=True)
self.root_doc = root_rev.document
self.root_doc.rendered_html = self.root_content
self.root_doc.save()
self.root_zone = DocumentZone(document=self.root_doc)
self.root_zone.save()
sub_rev = revision(title='SubPage',
slug='SubPage',
content='This is a subpage',
is_approved=True,
save=True)
self.sub_doc = sub_rev.document
self.sub_doc.parent_topic = self.root_doc
self.sub_doc.rendered_html = sub_rev.content
self.sub_doc.save()
self.sub_sub_links_content = """
<p>Sub-page links content</p>
"""
self.sub_sub_content = """
<h4 id="links">Links</h4>
%s
""" % (self.sub_sub_links_content)
sub_sub_rev = revision(title='SubSubPage',
slug='SubSubPage',
content='This is a subpage',
is_approved=True,
save=True)
self.sub_sub_doc = sub_sub_rev.document
self.sub_sub_doc.parent_topic = self.sub_doc
self.sub_sub_doc.rendered_html = self.sub_sub_content
self.sub_sub_doc.save()
other_rev = revision(title='otherPage',
slug='otherPage',
content='This is an other page',
is_approved=True,
save=True)
self.other_doc = other_rev.document
self.other_doc.save()
memcache.clear()
def test_document_zone_links(self):
admin = self.user_model.objects.filter(is_superuser=True)[0]
random = self.user_model.objects.filter(is_superuser=False)[0]
cases = [
(admin, self.root_doc, False, True),
(random, self.root_doc, False, False),
(admin, self.sub_doc, True, True),
(random, self.sub_doc, False, False),
(admin, self.other_doc, True, False),
(random, self.other_doc, False, False),
]
for (user, doc, add, change) in cases:
result_links = document_zone_management_links(user, doc)
eq_(add, result_links['add'] is not None, (user, doc))
eq_(change, result_links['change'] is not None)
class SelectorContentFindTests(UserTestCase, WikiTestCase):
def test_selector_not_found_returns_empty_string(self):
doc_content = u'<div id="not-summary">Not the summary</div>'
doc1 = document(title=u'Test Missing Selector', save=True)
doc1.rendered_html = doc_content
doc1.save()
revision(document=doc1, content=doc_content, save=True)
content = selector_content_find(doc1, 'summary')
assert content == ''
def test_pyquery_bad_selector_syntax_returns_empty_string(self):
doc_content = u'<div id="not-suNot the summary</span'
doc1 = document(title=u'Test Missing Selector', save=True)
doc1.rendered_html = doc_content
doc1.save()
revision(document=doc1, content=doc_content, save=True)
content = selector_content_find(doc1, '.')
assert content == ''
| mpl-2.0 |
crazy-canux/xplugin_nagios | plugin/plugins/jit/src/jit/session.py | 1 | 6030 | # -*- coding: utf-8 -*-
# Copyright (C) Canux CHENG <[email protected]>
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE
# OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
============================
:mod:`jit.session` module
============================
Contains classes that handle session connection to fetch XML interface
content remotly.
"""
import logging
import urlparse
import socket
import ftplib
from StringIO import StringIO
import exceptions
import requests
logger = logging.getLogger('plugin.jit.session')
class RemoteInterfaceSession(object):
"""
Remote session handling to fetch XML interface content.
"""
def __init__(self, url, timeout=10):
"""
Initialize a new remote session on ``url``. Raise an error if
``timeout` in seconds is reached.
>>> session = RemoteInterfaceSession(\
"http://insalert.app.corp:80/insequence/Alert_USMSMSQL0001.xml")
>>> session.protocol
'http'
>>> session.hostname
'insalert.app.corp'
>>> session.port
80
>>> session.path
'/insequence/Alert_USMSMSQL0001.xml'
:param url: Valid URL scheme.
:type url: basestring
"""
self._url = urlparse.urlparse(url)
self.protocol = self._url.scheme
self.timeout = timeout
self.hostname = self._url.hostname
self.port = self._url.port
self.path = self._url.path
self.query = self._url.query
self.username = getattr(self._url, "username", None)
self.password = getattr(self._url, "password", None)
self._remote = None
def connect(self):
"""
Establish the remote connection.
# FTP session
>>> session = RemoteInterfaceSession(\
"ftp://Nagios:[email protected]:4000/ALERT_BEGNESEQ0001.xml")
>>> session.connect()
>>> isinstance(session._remote, ftplib.FTP)
True
# HTTP session
>>> session = RemoteInterfaceSession(\
"http://insalert.app.corp:80/insequence/Alert_USMSMSQL0001.xml")
>>> session.connect()
>>> session._remote
<Response [200]>
# Error
>>> session = RemoteInterfaceSession(\
"gopher://insalert.app.corp:80/insequence/Alert_USMSMSQL0001.xml")
>>> session.connect()
Traceback (most recent call last):
...
NotImplementedError: Only HTTP or FTP are supported !
"""
logger.debug("Remote session uses %s." % self.protocol.upper())
if self.protocol == "ftp":
self._remote = ftplib.FTP()
try:
self._remote.connect(self.hostname,
self.port,
self.timeout)
if self.username and self.password:
self._remote.login(self.username, self.password)
else:
self._remote.login()
except socket.timeout:
raise exceptions.FTPTimedOut("Timeout on FTP server %s !" %
self.hostname)
except:
raise exceptions.FTPError(self.hostname,
self.port,
self.username)
logger.debug('Successfully authenticated on FTP server.')
elif self.protocol == "http":
if self.username and self.password:
credentials = (self.username, self.password)
else:
credentials = None
url = "{0.protocol}://{0.hostname}{0.path}?{0.query}"
self._remote = requests.get(url.format(self), auth=credentials)
self._remote.raise_for_status()
logger.debug('Successfully authenticated on HTTP server.')
else:
raise NotImplementedError("Only HTTP or FTP are supported !")
def read_data(self):
"""
Returns the XML data as a file-like object (StringIO).
# FTP session
>>> session = RemoteInterfaceSession(\
"ftp://Nagios:[email protected]:4000/ALERT_BEGNESEQ0001.xml")
>>> session.connect()
>>> data = session.read_data()
>>> "<?xml" in data
True
# HTTP session
>>> session = RemoteInterfaceSession(\
"http://insalert.app.corp:80/insequence/Alert_USMSMSQL0001.xml")
>>> session.connect()
>>> data = session.read_data()
>>> "USMSMSQL0001" in data
True
"""
data = StringIO()
if isinstance(self._remote, ftplib.FTP):
try:
self._remote.retrlines('RETR %s' % self.path, data.write)
except ftplib.all_errors as e:
raise exceptions.FTPRetrError(
"Cannot read the XML data over FTP: %s" % e)
elif isinstance(self._remote, requests.Response):
data.write(self._remote.text)
return data.getvalue()
| gpl-2.0 |
WoLpH/EventGhost | lib27/site-packages/tornado/test/httpclient_test.py | 12 | 26340 | #!/usr/bin/env python
from __future__ import absolute_import, division, print_function, with_statement
import base64
import binascii
from contextlib import closing
import copy
import functools
import sys
import threading
import datetime
from io import BytesIO
from tornado.escape import utf8
from tornado import gen
from tornado.httpclient import HTTPRequest, HTTPResponse, _RequestProxy, HTTPError, HTTPClient
from tornado.httpserver import HTTPServer
from tornado.ioloop import IOLoop
from tornado.iostream import IOStream
from tornado.log import gen_log
from tornado import netutil
from tornado.stack_context import ExceptionStackContext, NullContext
from tornado.testing import AsyncHTTPTestCase, bind_unused_port, gen_test, ExpectLog
from tornado.test.util import unittest, skipOnTravis
from tornado.util import u
from tornado.web import Application, RequestHandler, url
from tornado.httputil import format_timestamp, HTTPHeaders
class HelloWorldHandler(RequestHandler):
def get(self):
name = self.get_argument("name", "world")
self.set_header("Content-Type", "text/plain")
self.finish("Hello %s!" % name)
class PostHandler(RequestHandler):
def post(self):
self.finish("Post arg1: %s, arg2: %s" % (
self.get_argument("arg1"), self.get_argument("arg2")))
class PutHandler(RequestHandler):
def put(self):
self.write("Put body: ")
self.write(self.request.body)
class RedirectHandler(RequestHandler):
def prepare(self):
self.write('redirects can have bodies too')
self.redirect(self.get_argument("url"),
status=int(self.get_argument("status", "302")))
class ChunkHandler(RequestHandler):
@gen.coroutine
def get(self):
self.write("asdf")
self.flush()
# Wait a bit to ensure the chunks are sent and received separately.
yield gen.sleep(0.01)
self.write("qwer")
class AuthHandler(RequestHandler):
def get(self):
self.finish(self.request.headers["Authorization"])
class CountdownHandler(RequestHandler):
def get(self, count):
count = int(count)
if count > 0:
self.redirect(self.reverse_url("countdown", count - 1))
else:
self.write("Zero")
class EchoPostHandler(RequestHandler):
def post(self):
self.write(self.request.body)
class UserAgentHandler(RequestHandler):
def get(self):
self.write(self.request.headers.get('User-Agent', 'User agent not set'))
class ContentLength304Handler(RequestHandler):
def get(self):
self.set_status(304)
self.set_header('Content-Length', 42)
def _clear_headers_for_304(self):
# Tornado strips content-length from 304 responses, but here we
# want to simulate servers that include the headers anyway.
pass
class PatchHandler(RequestHandler):
def patch(self):
"Return the request payload - so we can check it is being kept"
self.write(self.request.body)
class AllMethodsHandler(RequestHandler):
SUPPORTED_METHODS = RequestHandler.SUPPORTED_METHODS + ('OTHER',)
def method(self):
self.write(self.request.method)
get = post = put = delete = options = patch = other = method
# These tests end up getting run redundantly: once here with the default
# HTTPClient implementation, and then again in each implementation's own
# test suite.
class HTTPClientCommonTestCase(AsyncHTTPTestCase):
def get_app(self):
return Application([
url("/hello", HelloWorldHandler),
url("/post", PostHandler),
url("/put", PutHandler),
url("/redirect", RedirectHandler),
url("/chunk", ChunkHandler),
url("/auth", AuthHandler),
url("/countdown/([0-9]+)", CountdownHandler, name="countdown"),
url("/echopost", EchoPostHandler),
url("/user_agent", UserAgentHandler),
url("/304_with_content_length", ContentLength304Handler),
url("/all_methods", AllMethodsHandler),
url('/patch', PatchHandler),
], gzip=True)
def test_patch_receives_payload(self):
body = b"some patch data"
response = self.fetch("/patch", method='PATCH', body=body)
self.assertEqual(response.code, 200)
self.assertEqual(response.body, body)
@skipOnTravis
def test_hello_world(self):
response = self.fetch("/hello")
self.assertEqual(response.code, 200)
self.assertEqual(response.headers["Content-Type"], "text/plain")
self.assertEqual(response.body, b"Hello world!")
self.assertEqual(int(response.request_time), 0)
response = self.fetch("/hello?name=Ben")
self.assertEqual(response.body, b"Hello Ben!")
def test_streaming_callback(self):
# streaming_callback is also tested in test_chunked
chunks = []
response = self.fetch("/hello",
streaming_callback=chunks.append)
# with streaming_callback, data goes to the callback and not response.body
self.assertEqual(chunks, [b"Hello world!"])
self.assertFalse(response.body)
def test_post(self):
response = self.fetch("/post", method="POST",
body="arg1=foo&arg2=bar")
self.assertEqual(response.code, 200)
self.assertEqual(response.body, b"Post arg1: foo, arg2: bar")
def test_chunked(self):
response = self.fetch("/chunk")
self.assertEqual(response.body, b"asdfqwer")
chunks = []
response = self.fetch("/chunk",
streaming_callback=chunks.append)
self.assertEqual(chunks, [b"asdf", b"qwer"])
self.assertFalse(response.body)
def test_chunked_close(self):
# test case in which chunks spread read-callback processing
# over several ioloop iterations, but the connection is already closed.
sock, port = bind_unused_port()
with closing(sock):
def write_response(stream, request_data):
if b"HTTP/1." not in request_data:
self.skipTest("requires HTTP/1.x")
stream.write(b"""\
HTTP/1.1 200 OK
Transfer-Encoding: chunked
1
1
1
2
0
""".replace(b"\n", b"\r\n"), callback=stream.close)
def accept_callback(conn, address):
# fake an HTTP server using chunked encoding where the final chunks
# and connection close all happen at once
stream = IOStream(conn, io_loop=self.io_loop)
stream.read_until(b"\r\n\r\n",
functools.partial(write_response, stream))
netutil.add_accept_handler(sock, accept_callback, self.io_loop)
self.http_client.fetch("http://127.0.0.1:%d/" % port, self.stop)
resp = self.wait()
resp.rethrow()
self.assertEqual(resp.body, b"12")
self.io_loop.remove_handler(sock.fileno())
def test_streaming_stack_context(self):
chunks = []
exc_info = []
def error_handler(typ, value, tb):
exc_info.append((typ, value, tb))
return True
def streaming_cb(chunk):
chunks.append(chunk)
if chunk == b'qwer':
1 / 0
with ExceptionStackContext(error_handler):
self.fetch('/chunk', streaming_callback=streaming_cb)
self.assertEqual(chunks, [b'asdf', b'qwer'])
self.assertEqual(1, len(exc_info))
self.assertIs(exc_info[0][0], ZeroDivisionError)
def test_basic_auth(self):
self.assertEqual(self.fetch("/auth", auth_username="Aladdin",
auth_password="open sesame").body,
b"Basic QWxhZGRpbjpvcGVuIHNlc2FtZQ==")
def test_basic_auth_explicit_mode(self):
self.assertEqual(self.fetch("/auth", auth_username="Aladdin",
auth_password="open sesame",
auth_mode="basic").body,
b"Basic QWxhZGRpbjpvcGVuIHNlc2FtZQ==")
def test_unsupported_auth_mode(self):
# curl and simple clients handle errors a bit differently; the
# important thing is that they don't fall back to basic auth
# on an unknown mode.
with ExpectLog(gen_log, "uncaught exception", required=False):
with self.assertRaises((ValueError, HTTPError)):
response = self.fetch("/auth", auth_username="Aladdin",
auth_password="open sesame",
auth_mode="asdf")
response.rethrow()
def test_follow_redirect(self):
response = self.fetch("/countdown/2", follow_redirects=False)
self.assertEqual(302, response.code)
self.assertTrue(response.headers["Location"].endswith("/countdown/1"))
response = self.fetch("/countdown/2")
self.assertEqual(200, response.code)
self.assertTrue(response.effective_url.endswith("/countdown/0"))
self.assertEqual(b"Zero", response.body)
def test_credentials_in_url(self):
url = self.get_url("/auth").replace("http://", "http://me:secret@")
self.http_client.fetch(url, self.stop)
response = self.wait()
self.assertEqual(b"Basic " + base64.b64encode(b"me:secret"),
response.body)
def test_body_encoding(self):
unicode_body = u("\xe9")
byte_body = binascii.a2b_hex(b"e9")
# unicode string in body gets converted to utf8
response = self.fetch("/echopost", method="POST", body=unicode_body,
headers={"Content-Type": "application/blah"})
self.assertEqual(response.headers["Content-Length"], "2")
self.assertEqual(response.body, utf8(unicode_body))
# byte strings pass through directly
response = self.fetch("/echopost", method="POST",
body=byte_body,
headers={"Content-Type": "application/blah"})
self.assertEqual(response.headers["Content-Length"], "1")
self.assertEqual(response.body, byte_body)
# Mixing unicode in headers and byte string bodies shouldn't
# break anything
response = self.fetch("/echopost", method="POST", body=byte_body,
headers={"Content-Type": "application/blah"},
user_agent=u("foo"))
self.assertEqual(response.headers["Content-Length"], "1")
self.assertEqual(response.body, byte_body)
def test_types(self):
response = self.fetch("/hello")
self.assertEqual(type(response.body), bytes)
self.assertEqual(type(response.headers["Content-Type"]), str)
self.assertEqual(type(response.code), int)
self.assertEqual(type(response.effective_url), str)
def test_header_callback(self):
first_line = []
headers = {}
chunks = []
def header_callback(header_line):
if header_line.startswith('HTTP/1.1 101'):
# Upgrading to HTTP/2
pass
elif header_line.startswith('HTTP/'):
first_line.append(header_line)
elif header_line != '\r\n':
k, v = header_line.split(':', 1)
headers[k.lower()] = v.strip()
def streaming_callback(chunk):
# All header callbacks are run before any streaming callbacks,
# so the header data is available to process the data as it
# comes in.
self.assertEqual(headers['content-type'], 'text/html; charset=UTF-8')
chunks.append(chunk)
self.fetch('/chunk', header_callback=header_callback,
streaming_callback=streaming_callback)
self.assertEqual(len(first_line), 1, first_line)
self.assertRegexpMatches(first_line[0], 'HTTP/[0-9]\\.[0-9] 200.*\r\n')
self.assertEqual(chunks, [b'asdf', b'qwer'])
def test_header_callback_stack_context(self):
exc_info = []
def error_handler(typ, value, tb):
exc_info.append((typ, value, tb))
return True
def header_callback(header_line):
if header_line.lower().startswith('content-type:'):
1 / 0
with ExceptionStackContext(error_handler):
self.fetch('/chunk', header_callback=header_callback)
self.assertEqual(len(exc_info), 1)
self.assertIs(exc_info[0][0], ZeroDivisionError)
def test_configure_defaults(self):
defaults = dict(user_agent='TestDefaultUserAgent', allow_ipv6=False)
# Construct a new instance of the configured client class
client = self.http_client.__class__(self.io_loop, force_instance=True,
defaults=defaults)
try:
client.fetch(self.get_url('/user_agent'), callback=self.stop)
response = self.wait()
self.assertEqual(response.body, b'TestDefaultUserAgent')
finally:
client.close()
def test_header_types(self):
# Header values may be passed as character or utf8 byte strings,
# in a plain dictionary or an HTTPHeaders object.
# Keys must always be the native str type.
# All combinations should have the same results on the wire.
for value in [u("MyUserAgent"), b"MyUserAgent"]:
for container in [dict, HTTPHeaders]:
headers = container()
headers['User-Agent'] = value
resp = self.fetch('/user_agent', headers=headers)
self.assertEqual(
resp.body, b"MyUserAgent",
"response=%r, value=%r, container=%r" %
(resp.body, value, container))
def test_multi_line_headers(self):
# Multi-line http headers are rare but rfc-allowed
# http://www.w3.org/Protocols/rfc2616/rfc2616-sec4.html#sec4.2
sock, port = bind_unused_port()
with closing(sock):
def write_response(stream, request_data):
if b"HTTP/1." not in request_data:
self.skipTest("requires HTTP/1.x")
stream.write(b"""\
HTTP/1.1 200 OK
X-XSS-Protection: 1;
\tmode=block
""".replace(b"\n", b"\r\n"), callback=stream.close)
def accept_callback(conn, address):
stream = IOStream(conn, io_loop=self.io_loop)
stream.read_until(b"\r\n\r\n",
functools.partial(write_response, stream))
netutil.add_accept_handler(sock, accept_callback, self.io_loop)
self.http_client.fetch("http://127.0.0.1:%d/" % port, self.stop)
resp = self.wait()
resp.rethrow()
self.assertEqual(resp.headers['X-XSS-Protection'], "1; mode=block")
self.io_loop.remove_handler(sock.fileno())
def test_304_with_content_length(self):
# According to the spec 304 responses SHOULD NOT include
# Content-Length or other entity headers, but some servers do it
# anyway.
# http://www.w3.org/Protocols/rfc2616/rfc2616-sec10.html#sec10.3.5
response = self.fetch('/304_with_content_length')
self.assertEqual(response.code, 304)
self.assertEqual(response.headers['Content-Length'], '42')
def test_final_callback_stack_context(self):
# The final callback should be run outside of the httpclient's
# stack_context. We want to ensure that there is not stack_context
# between the user's callback and the IOLoop, so monkey-patch
# IOLoop.handle_callback_exception and disable the test harness's
# context with a NullContext.
# Note that this does not apply to secondary callbacks (header
# and streaming_callback), as errors there must be seen as errors
# by the http client so it can clean up the connection.
exc_info = []
def handle_callback_exception(callback):
exc_info.append(sys.exc_info())
self.stop()
self.io_loop.handle_callback_exception = handle_callback_exception
with NullContext():
self.http_client.fetch(self.get_url('/hello'),
lambda response: 1 / 0)
self.wait()
self.assertEqual(exc_info[0][0], ZeroDivisionError)
@gen_test
def test_future_interface(self):
response = yield self.http_client.fetch(self.get_url('/hello'))
self.assertEqual(response.body, b'Hello world!')
@gen_test
def test_future_http_error(self):
with self.assertRaises(HTTPError) as context:
yield self.http_client.fetch(self.get_url('/notfound'))
self.assertEqual(context.exception.code, 404)
self.assertEqual(context.exception.response.code, 404)
@gen_test
def test_future_http_error_no_raise(self):
response = yield self.http_client.fetch(self.get_url('/notfound'), raise_error=False)
self.assertEqual(response.code, 404)
@gen_test
def test_reuse_request_from_response(self):
# The response.request attribute should be an HTTPRequest, not
# a _RequestProxy.
# This test uses self.http_client.fetch because self.fetch calls
# self.get_url on the input unconditionally.
url = self.get_url('/hello')
response = yield self.http_client.fetch(url)
self.assertEqual(response.request.url, url)
self.assertTrue(isinstance(response.request, HTTPRequest))
response2 = yield self.http_client.fetch(response.request)
self.assertEqual(response2.body, b'Hello world!')
def test_all_methods(self):
for method in ['GET', 'DELETE', 'OPTIONS']:
response = self.fetch('/all_methods', method=method)
self.assertEqual(response.body, utf8(method))
for method in ['POST', 'PUT', 'PATCH']:
response = self.fetch('/all_methods', method=method, body=b'')
self.assertEqual(response.body, utf8(method))
response = self.fetch('/all_methods', method='HEAD')
self.assertEqual(response.body, b'')
response = self.fetch('/all_methods', method='OTHER',
allow_nonstandard_methods=True)
self.assertEqual(response.body, b'OTHER')
def test_body_sanity_checks(self):
# These methods require a body.
for method in ('POST', 'PUT', 'PATCH'):
with self.assertRaises(ValueError) as context:
resp = self.fetch('/all_methods', method=method)
resp.rethrow()
self.assertIn('must not be None', str(context.exception))
resp = self.fetch('/all_methods', method=method,
allow_nonstandard_methods=True)
self.assertEqual(resp.code, 200)
# These methods don't allow a body.
for method in ('GET', 'DELETE', 'OPTIONS'):
with self.assertRaises(ValueError) as context:
resp = self.fetch('/all_methods', method=method, body=b'asdf')
resp.rethrow()
self.assertIn('must be None', str(context.exception))
# In most cases this can be overridden, but curl_httpclient
# does not allow body with a GET at all.
if method != 'GET':
resp = self.fetch('/all_methods', method=method, body=b'asdf',
allow_nonstandard_methods=True)
resp.rethrow()
self.assertEqual(resp.code, 200)
# This test causes odd failures with the combination of
# curl_httpclient (at least with the version of libcurl available
# on ubuntu 12.04), TwistedIOLoop, and epoll. For POST (but not PUT),
# curl decides the response came back too soon and closes the connection
# to start again. It does this *before* telling the socket callback to
# unregister the FD. Some IOLoop implementations have special kernel
# integration to discover this immediately. Tornado's IOLoops
# ignore errors on remove_handler to accommodate this behavior, but
# Twisted's reactor does not. The removeReader call fails and so
# do all future removeAll calls (which our tests do at cleanup).
#
# def test_post_307(self):
# response = self.fetch("/redirect?status=307&url=/post",
# method="POST", body=b"arg1=foo&arg2=bar")
# self.assertEqual(response.body, b"Post arg1: foo, arg2: bar")
def test_put_307(self):
response = self.fetch("/redirect?status=307&url=/put",
method="PUT", body=b"hello")
response.rethrow()
self.assertEqual(response.body, b"Put body: hello")
class RequestProxyTest(unittest.TestCase):
def test_request_set(self):
proxy = _RequestProxy(HTTPRequest('http://example.com/',
user_agent='foo'),
dict())
self.assertEqual(proxy.user_agent, 'foo')
def test_default_set(self):
proxy = _RequestProxy(HTTPRequest('http://example.com/'),
dict(network_interface='foo'))
self.assertEqual(proxy.network_interface, 'foo')
def test_both_set(self):
proxy = _RequestProxy(HTTPRequest('http://example.com/',
proxy_host='foo'),
dict(proxy_host='bar'))
self.assertEqual(proxy.proxy_host, 'foo')
def test_neither_set(self):
proxy = _RequestProxy(HTTPRequest('http://example.com/'),
dict())
self.assertIs(proxy.auth_username, None)
def test_bad_attribute(self):
proxy = _RequestProxy(HTTPRequest('http://example.com/'),
dict())
with self.assertRaises(AttributeError):
proxy.foo
def test_defaults_none(self):
proxy = _RequestProxy(HTTPRequest('http://example.com/'), None)
self.assertIs(proxy.auth_username, None)
class HTTPResponseTestCase(unittest.TestCase):
def test_str(self):
response = HTTPResponse(HTTPRequest('http://example.com'),
200, headers={}, buffer=BytesIO())
s = str(response)
self.assertTrue(s.startswith('HTTPResponse('))
self.assertIn('code=200', s)
class SyncHTTPClientTest(unittest.TestCase):
def setUp(self):
if IOLoop.configured_class().__name__ in ('TwistedIOLoop',
'AsyncIOMainLoop'):
# TwistedIOLoop only supports the global reactor, so we can't have
# separate IOLoops for client and server threads.
# AsyncIOMainLoop doesn't work with the default policy
# (although it could with some tweaks to this test and a
# policy that created loops for non-main threads).
raise unittest.SkipTest(
'Sync HTTPClient not compatible with TwistedIOLoop or '
'AsyncIOMainLoop')
self.server_ioloop = IOLoop()
sock, self.port = bind_unused_port()
app = Application([('/', HelloWorldHandler)])
self.server = HTTPServer(app, io_loop=self.server_ioloop)
self.server.add_socket(sock)
self.server_thread = threading.Thread(target=self.server_ioloop.start)
self.server_thread.start()
self.http_client = HTTPClient()
def tearDown(self):
def stop_server():
self.server.stop()
# Delay the shutdown of the IOLoop by one iteration because
# the server may still have some cleanup work left when
# the client finishes with the response (this is noticable
# with http/2, which leaves a Future with an unexamined
# StreamClosedError on the loop).
self.server_ioloop.add_callback(self.server_ioloop.stop)
self.server_ioloop.add_callback(stop_server)
self.server_thread.join()
self.http_client.close()
self.server_ioloop.close(all_fds=True)
def get_url(self, path):
return 'http://127.0.0.1:%d%s' % (self.port, path)
def test_sync_client(self):
response = self.http_client.fetch(self.get_url('/'))
self.assertEqual(b'Hello world!', response.body)
def test_sync_client_error(self):
# Synchronous HTTPClient raises errors directly; no need for
# response.rethrow()
with self.assertRaises(HTTPError) as assertion:
self.http_client.fetch(self.get_url('/notfound'))
self.assertEqual(assertion.exception.code, 404)
class HTTPRequestTestCase(unittest.TestCase):
def test_headers(self):
request = HTTPRequest('http://example.com', headers={'foo': 'bar'})
self.assertEqual(request.headers, {'foo': 'bar'})
def test_headers_setter(self):
request = HTTPRequest('http://example.com')
request.headers = {'bar': 'baz'}
self.assertEqual(request.headers, {'bar': 'baz'})
def test_null_headers_setter(self):
request = HTTPRequest('http://example.com')
request.headers = None
self.assertEqual(request.headers, {})
def test_body(self):
request = HTTPRequest('http://example.com', body='foo')
self.assertEqual(request.body, utf8('foo'))
def test_body_setter(self):
request = HTTPRequest('http://example.com')
request.body = 'foo'
self.assertEqual(request.body, utf8('foo'))
def test_if_modified_since(self):
http_date = datetime.datetime.utcnow()
request = HTTPRequest('http://example.com', if_modified_since=http_date)
self.assertEqual(request.headers,
{'If-Modified-Since': format_timestamp(http_date)})
class HTTPErrorTestCase(unittest.TestCase):
def test_copy(self):
e = HTTPError(403)
e2 = copy.copy(e)
self.assertIsNot(e, e2)
self.assertEqual(e.code, e2.code)
def test_str(self):
e = HTTPError(403)
self.assertEqual(str(e), "HTTP 403: Forbidden")
| gpl-2.0 |
bxlab/HiFive_Paper | Scripts/HiCLib/bx-python-0.7.1/scripts/maf_print_scores.py | 2 | 1582 | #!/usr/bin/env python
"""
Read a MAF from standard input and print the score of each block. It can
optionally recalculate each score using the hox70 matrix, and normalize the
score by the number of columns in the alignment.
TODO: Should be able to read an arbitrary scoring matrix.
usage: %prog [options]
-r, --recalculate: don't use the score from the maf, recalculate (using hox70 matrix)
-l, --lnorm: divide (normalize) score by alignment text length
"""
from __future__ import division
import sys
from bx.cookbook import doc_optparse
from bx.align import maf
from bx.align import score
from optparse import OptionParser
def main():
# Parse command line arguments
options, args = doc_optparse.parse( __doc__ )
try:
lnorm = bool( options.lnorm )
recalculate = bool( options.recalculate )
except:
doc_optparse.exit()
hox70 = score.build_scoring_scheme( """ A C G T
91 -114 -31 -123
-114 100 -125 -31
-31 -125 100 -114
-123 -31 -114 91 """, 400, 30, default=0 )
maf_reader = maf.Reader( sys.stdin )
for m in maf_reader:
if m.text_size == 0:
print "NA"
continue
s = m.score
# Recalculate?
if recalculate:
s = hox70.score_alignment( m )
# Normalize?
if lnorm:
s = s / m.text_size
# Print
print s
if __name__ == "__main__":
main()
| bsd-3-clause |
rdio/translate-toolkit | lang/test_ne.py | 33 | 2955 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from translate.lang import factory
def test_punctranslate():
"""Tests that we can translate punctuation."""
language = factory.getlanguage('ne')
assert language.punctranslate(u"") == u""
assert language.punctranslate(u"abc efg") == u"abc efg"
assert language.punctranslate(u"abc efg.") == u"abc efg ।"
assert language.punctranslate(u"(abc efg).") == u"(abc efg) ।"
assert language.punctranslate(u"abc efg...") == u"abc efg..."
assert language.punctranslate(u"abc efg?") == u"abc efg ?"
def test_sentences():
"""Tests basic functionality of sentence segmentation."""
language = factory.getlanguage('ne')
sentences = language.sentences(u"")
assert sentences == []
# Without spaces before the punctuation
sentences = language.sentences(u"यसको भौगोलिक अक्षांश २६ डिग्री २२ मिनेट देखि ३० डिग्री २७ मिनेट उत्तर र ८० डिग्री ४ मिनेट देखि ८८ डिग्री १२ मिनेट पूर्वी देशान्तर सम्म फैलिएको छ। यसको कूल क्षेत्रफल १,४७,१८१ वर्ग कि.मि छ।\n")
assert sentences == [u"यसको भौगोलिक अक्षांश २६ डिग्री २२ मिनेट देखि ३० डिग्री २७ मिनेट उत्तर र ८० डिग्री ४ मिनेट देखि ८८ डिग्री १२ मिनेट पूर्वी देशान्तर सम्म फैलिएको छ।", u"यसको कूल क्षेत्रफल १,४७,१८१ वर्ग कि.मि छ।"]
# With spaces before the punctuation
sentences = language.sentences(u"यसको भौगोलिक अक्षांश २६ डिग्री २२ मिनेट देखि ३० डिग्री २७ मिनेट उत्तर र ८० डिग्री ४ मिनेट देखि ८८ डिग्री १२ मिनेट पूर्वी देशान्तर सम्म फैलिएको छ । यसको कूल क्षेत्रफल १,४७,१८१ वर्ग कि.मि छ ।\n")
assert sentences == [u"यसको भौगोलिक अक्षांश २६ डिग्री २२ मिनेट देखि ३० डिग्री २७ मिनेट उत्तर र ८० डिग्री ४ मिनेट देखि ८८ डिग्री १२ मिनेट पूर्वी देशान्तर सम्म फैलिएको छ ।", u"यसको कूल क्षेत्रफल १,४७,१८१ वर्ग कि.मि छ ।"]
| gpl-2.0 |
adrianholovaty/django | django/contrib/localflavor/it/util.py | 436 | 1807 | from django.utils.encoding import smart_str, smart_unicode
def ssn_check_digit(value):
"Calculate Italian social security number check digit."
ssn_even_chars = {
'0': 0, '1': 1, '2': 2, '3': 3, '4': 4, '5': 5, '6': 6, '7': 7, '8': 8,
'9': 9, 'A': 0, 'B': 1, 'C': 2, 'D': 3, 'E': 4, 'F': 5, 'G': 6, 'H': 7,
'I': 8, 'J': 9, 'K': 10, 'L': 11, 'M': 12, 'N': 13, 'O': 14, 'P': 15,
'Q': 16, 'R': 17, 'S': 18, 'T': 19, 'U': 20, 'V': 21, 'W': 22, 'X': 23,
'Y': 24, 'Z': 25
}
ssn_odd_chars = {
'0': 1, '1': 0, '2': 5, '3': 7, '4': 9, '5': 13, '6': 15, '7': 17, '8':
19, '9': 21, 'A': 1, 'B': 0, 'C': 5, 'D': 7, 'E': 9, 'F': 13, 'G': 15,
'H': 17, 'I': 19, 'J': 21, 'K': 2, 'L': 4, 'M': 18, 'N': 20, 'O': 11,
'P': 3, 'Q': 6, 'R': 8, 'S': 12, 'T': 14, 'U': 16, 'V': 10, 'W': 22,
'X': 25, 'Y': 24, 'Z': 23
}
# Chars from 'A' to 'Z'
ssn_check_digits = [chr(x) for x in range(65, 91)]
ssn = value.upper()
total = 0
for i in range(0, 15):
try:
if i % 2 == 0:
total += ssn_odd_chars[ssn[i]]
else:
total += ssn_even_chars[ssn[i]]
except KeyError:
msg = "Character '%(char)s' is not allowed." % {'char': ssn[i]}
raise ValueError(msg)
return ssn_check_digits[total % 26]
def vat_number_check_digit(vat_number):
"Calculate Italian VAT number check digit."
normalized_vat_number = smart_str(vat_number).zfill(10)
total = 0
for i in range(0, 10, 2):
total += int(normalized_vat_number[i])
for i in range(1, 11, 2):
quotient , remainder = divmod(int(normalized_vat_number[i]) * 2, 10)
total += quotient + remainder
return smart_unicode((10 - total % 10) % 10)
| bsd-3-clause |
hack-r/supeRstition | python/pycalcal/wrappers.py | 2 | 1715 | from pycalcal import chinese_from_fixed, fixed_from_chinese, \
gregorian_from_fixed, fixed_from_gregorian, \
chinese_new_year
from datetime import date
from collections import namedtuple
chinese_date= namedtuple('chinese_date', 'year, month, day, is_leap_month')
def _tuple_from_pcc_chinese(pcc_cdate):
cycle, offset, month, leap, day = pcc_cdate
year = cycle*60 + offset - 2697
return chinese_date(year, month, day, leap)
def _pcc_chinese_from_tuple(cdate):
year, month, day, leap = cdate
cycle, offset = divmod(year + 2697, 60)
return [cycle, offset, month, leap, day]
def _date_from_pcc_gregorian(pcc_gdate):
year, month, day = pcc_gdate
return date(year, month, day)
def _pcc_gregorian_from_date(gdate):
year = gdate.year
month = gdate.month
day = gdate.day
return [year, month, day]
def gregorian_from_chinese(cdate):
pcc_cdate = _pcc_chinese_from_tuple(cdate)
pcc_gdate = gregorian_from_fixed(fixed_from_chinese(pcc_cdate))
return _date_from_pcc_gregorian(pcc_gdate)
def chinese_from_gregorian(gdate):
pcc_gdate = _pcc_gregorian_from_date(gdate)
pcc_cdate = chinese_from_fixed(fixed_from_gregorian(pcc_gdate))
return _tuple_from_pcc_chinese(pcc_cdate)
def get_branch(cdate):
return (cdate.year - 4) % 12
def get_stem(cdate):
return (cdate.year - 4) % 10
def is_valid_chinese_date(cdate):
pcc_cdate = _pcc_chinese_from_tuple(cdate)
cdate2 = chinese_from_fixed(fixed_from_chinese(pcc_cdate))
if cdate2[0] == pcc_cdate[0] - 1 and \
cdate2[1] == 60 and \
pcc_cdate[1] == 0:
return cdate2[2:] == pcc_cdate[2:]
return cdate2 == pcc_cdate
| gpl-2.0 |
embray/d2to1 | d2to1/core.py | 2 | 3325 | import os
import sys
import warnings
from distutils.errors import DistutilsFileError, DistutilsSetupError
from .extern import six
from .util import DefaultGetDict, IgnoreDict, cfg_to_args
def d2to1(dist, attr, value):
"""Implements the actual d2to1 setup() keyword. When used, this should be
the only keyword in your setup() aside from `setup_requires`.
If given as a string, the value of d2to1 is assumed to be the relative path
to the setup.cfg file to use. Otherwise, if it evaluates to true, it
simply assumes that d2to1 should be used, and the default 'setup.cfg' is
used.
This works by reading the setup.cfg file, parsing out the supported
metadata and command options, and using them to rebuild the
`DistributionMetadata` object and set the newly added command options.
The reason for doing things this way is that a custom `Distribution` class
will not play nicely with setup_requires; however, this implementation may
not work well with distributions that do use a `Distribution` subclass.
"""
from distutils.core import Distribution
from setuptools.dist import _get_unpatched
_Distribution = _get_unpatched(Distribution)
if not value:
return
if isinstance(value, six.string_types):
path = os.path.abspath(value)
else:
path = os.path.abspath('setup.cfg')
if not os.path.exists(path):
raise DistutilsFileError(
'The setup.cfg file %s does not exist.' % path)
# Converts the setup.cfg file to setup() arguments
try:
attrs = cfg_to_args(path)
except:
e = sys.exc_info()[1]
raise DistutilsSetupError(
'Error parsing %s: %s: %s' % (path, e.__class__.__name__,
e.args[0]))
# Repeat some of the Distribution initialization code with the newly
# provided attrs
if attrs:
# Skips 'options' and 'licence' support which are rarely used; may add
# back in later if demanded
for key, val in six.iteritems(attrs):
if hasattr(dist.metadata, 'set_' + key):
getattr(dist.metadata, 'set_' + key)(val)
elif hasattr(dist.metadata, key):
setattr(dist.metadata, key, val)
elif hasattr(dist, key):
setattr(dist, key, val)
else:
msg = 'Unknown distribution option: %s' % repr(key)
warnings.warn(msg)
# Re-finalize the underlying Distribution
_Distribution.finalize_options(dist)
# This bit comes out of distribute/setuptools
if isinstance(dist.metadata.version, six.integer_types + (float,)):
# Some people apparently take "version number" too literally :)
dist.metadata.version = str(dist.metadata.version)
# This bit of hackery is necessary so that the Distribution will ignore
# normally unsupport command options (namely pre-hooks and post-hooks).
# dist.command_options is normally a dict mapping command names to dicts of
# their options. Now it will be a defaultdict that returns IgnoreDicts for
# the each command's options so we can pass through the unsupported options
ignore = ['pre_hook.*', 'post_hook.*']
dist.command_options = DefaultGetDict(lambda: IgnoreDict(ignore))
| bsd-3-clause |
bgxavier/nova | nova/tests/unit/api/openstack/compute/contrib/test_console_output.py | 33 | 6686 | # Copyright 2011 Eldar Nugaev
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import string
import webob
from nova.api.openstack.compute.contrib import console_output \
as console_output_v2
from nova.api.openstack.compute.plugins.v3 import console_output \
as console_output_v21
from nova.compute import api as compute_api
from nova import exception
from nova import test
from nova.tests.unit.api.openstack import fakes
from nova.tests.unit import fake_instance
def fake_get_console_output(self, _context, _instance, tail_length):
fixture = [str(i) for i in range(5)]
if tail_length is None:
pass
elif tail_length == 0:
fixture = []
else:
fixture = fixture[-int(tail_length):]
return '\n'.join(fixture)
def fake_get_console_output_not_ready(self, _context, _instance, tail_length):
raise exception.InstanceNotReady(instance_id=_instance["uuid"])
def fake_get_console_output_all_characters(self, _ctx, _instance, _tail_len):
return string.printable
def fake_get(self, context, instance_uuid, want_objects=False,
expected_attrs=None):
return fake_instance.fake_instance_obj(context, **{'uuid': instance_uuid})
def fake_get_not_found(*args, **kwargs):
raise exception.InstanceNotFound(instance_id='fake')
class ConsoleOutputExtensionTestV21(test.NoDBTestCase):
controller_class = console_output_v21
validation_error = exception.ValidationError
def setUp(self):
super(ConsoleOutputExtensionTestV21, self).setUp()
self.stubs.Set(compute_api.API, 'get_console_output',
fake_get_console_output)
self.stubs.Set(compute_api.API, 'get', fake_get)
self.controller = self.controller_class.ConsoleOutputController()
self.req = fakes.HTTPRequest.blank('')
def _get_console_output(self, length_dict=None):
length_dict = length_dict or {}
body = {'os-getConsoleOutput': length_dict}
return self.controller.get_console_output(self.req, fakes.FAKE_UUID,
body=body)
def _check_console_output_failure(self, exception, body):
self.assertRaises(exception,
self.controller.get_console_output,
self.req, fakes.FAKE_UUID, body=body)
def test_get_text_console_instance_action(self):
output = self._get_console_output()
self.assertEqual({'output': '0\n1\n2\n3\n4'}, output)
def test_get_console_output_with_tail(self):
output = self._get_console_output(length_dict={'length': 3})
self.assertEqual({'output': '2\n3\n4'}, output)
def test_get_console_output_with_none_length(self):
output = self._get_console_output(length_dict={'length': None})
self.assertEqual({'output': '0\n1\n2\n3\n4'}, output)
def test_get_console_output_with_length_as_str(self):
output = self._get_console_output(length_dict={'length': '3'})
self.assertEqual({'output': '2\n3\n4'}, output)
def test_get_console_output_filtered_characters(self):
self.stubs.Set(compute_api.API, 'get_console_output',
fake_get_console_output_all_characters)
output = self._get_console_output()
expect = string.digits + string.letters + string.punctuation + ' \t\n'
self.assertEqual({'output': expect}, output)
def test_get_text_console_no_instance(self):
self.stubs.Set(compute_api.API, 'get', fake_get_not_found)
body = {'os-getConsoleOutput': {}}
self._check_console_output_failure(webob.exc.HTTPNotFound, body)
def test_get_text_console_no_instance_on_get_output(self):
self.stubs.Set(compute_api.API,
'get_console_output',
fake_get_not_found)
body = {'os-getConsoleOutput': {}}
self._check_console_output_failure(webob.exc.HTTPNotFound, body)
def test_get_console_output_with_non_integer_length(self):
body = {'os-getConsoleOutput': {'length': 'NaN'}}
self._check_console_output_failure(self.validation_error, body)
def test_get_text_console_bad_body(self):
body = {}
self._check_console_output_failure(self.validation_error, body)
def test_get_console_output_with_length_as_float(self):
body = {'os-getConsoleOutput': {'length': 2.5}}
self._check_console_output_failure(self.validation_error, body)
def test_get_console_output_not_ready(self):
self.stubs.Set(compute_api.API, 'get_console_output',
fake_get_console_output_not_ready)
body = {'os-getConsoleOutput': {}}
self._check_console_output_failure(webob.exc.HTTPConflict, body)
def test_not_implemented(self):
self.stubs.Set(compute_api.API, 'get_console_output',
fakes.fake_not_implemented)
body = {'os-getConsoleOutput': {}}
self._check_console_output_failure(webob.exc.HTTPNotImplemented, body)
def test_get_console_output_with_boolean_length(self):
body = {'os-getConsoleOutput': {'length': True}}
self._check_console_output_failure(self.validation_error, body)
class ConsoleOutputExtensionTestV2(ConsoleOutputExtensionTestV21):
controller_class = console_output_v2
validation_error = webob.exc.HTTPBadRequest
class ConsoleOutpuPolicyEnforcementV21(test.NoDBTestCase):
def setUp(self):
super(ConsoleOutpuPolicyEnforcementV21, self).setUp()
self.controller = console_output_v21.ConsoleOutputController()
def test_get_console_output_policy_failed(self):
rule_name = "os_compute_api:os-console-output"
self.policy.set_rules({rule_name: "project:non_fake"})
req = fakes.HTTPRequest.blank('')
body = {'os-getConsoleOutput': {}}
exc = self.assertRaises(
exception.PolicyNotAuthorized,
self.controller.get_console_output, req, fakes.FAKE_UUID,
body=body)
self.assertEqual(
"Policy doesn't allow %s to be performed." % rule_name,
exc.format_message())
| apache-2.0 |
nikolaychernov/tree-view-list-android | Android-OTA/CreateOTA.py | 43 | 3230 | '''
Created on 21-03-2011
@author: maciek
'''
from IndexGenerator import IndexGenerator
from optparse import OptionParser
import os
import tempfile
import shutil
import logging
logging.basicConfig(level = logging.DEBUG)
parser = OptionParser()
parser.add_option('-n', '--app-name', action='store', dest='appName', help='aplication name')
parser.add_option('-u', '--release-urls', action='store', dest='releaseUrls', help='URLs of download files - as coma separated list of entrires')
parser.add_option('-d', '--destination-directory', action='store', dest='otaAppDir', help='Directory where OTA files are created')
parser.add_option('-v', '--version', action='store', dest='version', help='Version of the application')
parser.add_option('-r', '--releases', action='store', dest='releases', help='Release names of the application')
parser.add_option('-R', '--release-notes', action='store', dest='releaseNotes', help='Release notes of the application (in txt2tags format)')
parser.add_option('-D', '--description', action='store', dest='description', help='Description of the application (in txt2tags format)')
(options, args) = parser.parse_args()
if options.appName == None:
parser.error("Please specify the appName.")
elif options.releaseUrls == None:
parser.error("Please specify releaseUrls")
elif options.otaAppDir == None:
parser.error("Please specify destination directory")
elif options.version == None:
parser.error("Please specify version")
elif options.releases == None:
parser.error("Please specify releases")
elif options.releaseNotes == None:
parser.error("Please specify releaseNotes")
elif options.description == None:
parser.error("Please specify description")
appName = options.appName
releaseUrls = options.releaseUrls
otaAppDir = options.otaAppDir
version = options.version
releases = options.releases
releaseNotes = options.releaseNotes
description = options.description
def findIconFilename():
iconPath = "res/drawable-hdpi/icon.png"
if not os.path.exists(iconPath):
iconPath = "res/drawable-mdpi/icon.png"
if not os.path.exists(iconPath):
iconPath = "res/drawable-ldpi/icon.png"
if not os.path.exists(iconPath):
iconPath = "res/drawable/icon.png"
logging.debug("IconPath: "+iconPath)
return iconPath
def createOTApackage():
'''
crates all needed files in tmp dir
'''
releaseNotesContent = open(releaseNotes).read()
descriptionContent = open(description).read()
indexGenerator = IndexGenerator(appName, releaseUrls, releaseNotesContent, descriptionContent, version, releases)
index = indexGenerator.get();
tempIndexFile = tempfile.TemporaryFile()
tempIndexFile.write(index)
tempIndexFile.flush()
tempIndexFile.seek(0)
return tempIndexFile
tempIndexFile = createOTApackage()
if not os.path.isdir(otaAppDir):
logging.debug("creating dir: "+otaAppDir)
os.mkdir(otaAppDir)
else:
logging.warning("dir: "+otaAppDir+" exists")
indexFile = open(os.path.join(otaAppDir,"index.html"),'w')
shutil.copyfileobj(tempIndexFile, indexFile)
srcIconFileName = findIconFilename()
disIconFileName = os.path.join(otaAppDir,"Icon.png")
shutil.copy(srcIconFileName,disIconFileName)
| bsd-2-clause |
creeptonik/videojs-live-card | node_modules/node-sass/node_modules/node-gyp/gyp/pylib/gyp/xcodeproj_file.py | 1366 | 120842 | # Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Xcode project file generator.
This module is both an Xcode project file generator and a documentation of the
Xcode project file format. Knowledge of the project file format was gained
based on extensive experience with Xcode, and by making changes to projects in
Xcode.app and observing the resultant changes in the associated project files.
XCODE PROJECT FILES
The generator targets the file format as written by Xcode 3.2 (specifically,
3.2.6), but past experience has taught that the format has not changed
significantly in the past several years, and future versions of Xcode are able
to read older project files.
Xcode project files are "bundled": the project "file" from an end-user's
perspective is actually a directory with an ".xcodeproj" extension. The
project file from this module's perspective is actually a file inside this
directory, always named "project.pbxproj". This file contains a complete
description of the project and is all that is needed to use the xcodeproj.
Other files contained in the xcodeproj directory are simply used to store
per-user settings, such as the state of various UI elements in the Xcode
application.
The project.pbxproj file is a property list, stored in a format almost
identical to the NeXTstep property list format. The file is able to carry
Unicode data, and is encoded in UTF-8. The root element in the property list
is a dictionary that contains several properties of minimal interest, and two
properties of immense interest. The most important property is a dictionary
named "objects". The entire structure of the project is represented by the
children of this property. The objects dictionary is keyed by unique 96-bit
values represented by 24 uppercase hexadecimal characters. Each value in the
objects dictionary is itself a dictionary, describing an individual object.
Each object in the dictionary is a member of a class, which is identified by
the "isa" property of each object. A variety of classes are represented in a
project file. Objects can refer to other objects by ID, using the 24-character
hexadecimal object key. A project's objects form a tree, with a root object
of class PBXProject at the root. As an example, the PBXProject object serves
as parent to an XCConfigurationList object defining the build configurations
used in the project, a PBXGroup object serving as a container for all files
referenced in the project, and a list of target objects, each of which defines
a target in the project. There are several different types of target object,
such as PBXNativeTarget and PBXAggregateTarget. In this module, this
relationship is expressed by having each target type derive from an abstract
base named XCTarget.
The project.pbxproj file's root dictionary also contains a property, sibling to
the "objects" dictionary, named "rootObject". The value of rootObject is a
24-character object key referring to the root PBXProject object in the
objects dictionary.
In Xcode, every file used as input to a target or produced as a final product
of a target must appear somewhere in the hierarchy rooted at the PBXGroup
object referenced by the PBXProject's mainGroup property. A PBXGroup is
generally represented as a folder in the Xcode application. PBXGroups can
contain other PBXGroups as well as PBXFileReferences, which are pointers to
actual files.
Each XCTarget contains a list of build phases, represented in this module by
the abstract base XCBuildPhase. Examples of concrete XCBuildPhase derivations
are PBXSourcesBuildPhase and PBXFrameworksBuildPhase, which correspond to the
"Compile Sources" and "Link Binary With Libraries" phases displayed in the
Xcode application. Files used as input to these phases (for example, source
files in the former case and libraries and frameworks in the latter) are
represented by PBXBuildFile objects, referenced by elements of "files" lists
in XCTarget objects. Each PBXBuildFile object refers to a PBXBuildFile
object as a "weak" reference: it does not "own" the PBXBuildFile, which is
owned by the root object's mainGroup or a descendant group. In most cases, the
layer of indirection between an XCBuildPhase and a PBXFileReference via a
PBXBuildFile appears extraneous, but there's actually one reason for this:
file-specific compiler flags are added to the PBXBuildFile object so as to
allow a single file to be a member of multiple targets while having distinct
compiler flags for each. These flags can be modified in the Xcode applciation
in the "Build" tab of a File Info window.
When a project is open in the Xcode application, Xcode will rewrite it. As
such, this module is careful to adhere to the formatting used by Xcode, to
avoid insignificant changes appearing in the file when it is used in the
Xcode application. This will keep version control repositories happy, and
makes it possible to compare a project file used in Xcode to one generated by
this module to determine if any significant changes were made in the
application.
Xcode has its own way of assigning 24-character identifiers to each object,
which is not duplicated here. Because the identifier only is only generated
once, when an object is created, and is then left unchanged, there is no need
to attempt to duplicate Xcode's behavior in this area. The generator is free
to select any identifier, even at random, to refer to the objects it creates,
and Xcode will retain those identifiers and use them when subsequently
rewriting the project file. However, the generator would choose new random
identifiers each time the project files are generated, leading to difficulties
comparing "used" project files to "pristine" ones produced by this module,
and causing the appearance of changes as every object identifier is changed
when updated projects are checked in to a version control repository. To
mitigate this problem, this module chooses identifiers in a more deterministic
way, by hashing a description of each object as well as its parent and ancestor
objects. This strategy should result in minimal "shift" in IDs as successive
generations of project files are produced.
THIS MODULE
This module introduces several classes, all derived from the XCObject class.
Nearly all of the "brains" are built into the XCObject class, which understands
how to create and modify objects, maintain the proper tree structure, compute
identifiers, and print objects. For the most part, classes derived from
XCObject need only provide a _schema class object, a dictionary that
expresses what properties objects of the class may contain.
Given this structure, it's possible to build a minimal project file by creating
objects of the appropriate types and making the proper connections:
config_list = XCConfigurationList()
group = PBXGroup()
project = PBXProject({'buildConfigurationList': config_list,
'mainGroup': group})
With the project object set up, it can be added to an XCProjectFile object.
XCProjectFile is a pseudo-class in the sense that it is a concrete XCObject
subclass that does not actually correspond to a class type found in a project
file. Rather, it is used to represent the project file's root dictionary.
Printing an XCProjectFile will print the entire project file, including the
full "objects" dictionary.
project_file = XCProjectFile({'rootObject': project})
project_file.ComputeIDs()
project_file.Print()
Xcode project files are always encoded in UTF-8. This module will accept
strings of either the str class or the unicode class. Strings of class str
are assumed to already be encoded in UTF-8. Obviously, if you're just using
ASCII, you won't encounter difficulties because ASCII is a UTF-8 subset.
Strings of class unicode are handled properly and encoded in UTF-8 when
a project file is output.
"""
import gyp.common
import posixpath
import re
import struct
import sys
# hashlib is supplied as of Python 2.5 as the replacement interface for sha
# and other secure hashes. In 2.6, sha is deprecated. Import hashlib if
# available, avoiding a deprecation warning under 2.6. Import sha otherwise,
# preserving 2.4 compatibility.
try:
import hashlib
_new_sha1 = hashlib.sha1
except ImportError:
import sha
_new_sha1 = sha.new
# See XCObject._EncodeString. This pattern is used to determine when a string
# can be printed unquoted. Strings that match this pattern may be printed
# unquoted. Strings that do not match must be quoted and may be further
# transformed to be properly encoded. Note that this expression matches the
# characters listed with "+", for 1 or more occurrences: if a string is empty,
# it must not match this pattern, because it needs to be encoded as "".
_unquoted = re.compile('^[A-Za-z0-9$./_]+$')
# Strings that match this pattern are quoted regardless of what _unquoted says.
# Oddly, Xcode will quote any string with a run of three or more underscores.
_quoted = re.compile('___')
# This pattern should match any character that needs to be escaped by
# XCObject._EncodeString. See that function.
_escaped = re.compile('[\\\\"]|[\x00-\x1f]')
# Used by SourceTreeAndPathFromPath
_path_leading_variable = re.compile(r'^\$\((.*?)\)(/(.*))?$')
def SourceTreeAndPathFromPath(input_path):
"""Given input_path, returns a tuple with sourceTree and path values.
Examples:
input_path (source_tree, output_path)
'$(VAR)/path' ('VAR', 'path')
'$(VAR)' ('VAR', None)
'path' (None, 'path')
"""
source_group_match = _path_leading_variable.match(input_path)
if source_group_match:
source_tree = source_group_match.group(1)
output_path = source_group_match.group(3) # This may be None.
else:
source_tree = None
output_path = input_path
return (source_tree, output_path)
def ConvertVariablesToShellSyntax(input_string):
return re.sub(r'\$\((.*?)\)', '${\\1}', input_string)
class XCObject(object):
"""The abstract base of all class types used in Xcode project files.
Class variables:
_schema: A dictionary defining the properties of this class. The keys to
_schema are string property keys as used in project files. Values
are a list of four or five elements:
[ is_list, property_type, is_strong, is_required, default ]
is_list: True if the property described is a list, as opposed
to a single element.
property_type: The type to use as the value of the property,
or if is_list is True, the type to use for each
element of the value's list. property_type must
be an XCObject subclass, or one of the built-in
types str, int, or dict.
is_strong: If property_type is an XCObject subclass, is_strong
is True to assert that this class "owns," or serves
as parent, to the property value (or, if is_list is
True, values). is_strong must be False if
property_type is not an XCObject subclass.
is_required: True if the property is required for the class.
Note that is_required being True does not preclude
an empty string ("", in the case of property_type
str) or list ([], in the case of is_list True) from
being set for the property.
default: Optional. If is_requried is True, default may be set
to provide a default value for objects that do not supply
their own value. If is_required is True and default
is not provided, users of the class must supply their own
value for the property.
Note that although the values of the array are expressed in
boolean terms, subclasses provide values as integers to conserve
horizontal space.
_should_print_single_line: False in XCObject. Subclasses whose objects
should be written to the project file in the
alternate single-line format, such as
PBXFileReference and PBXBuildFile, should
set this to True.
_encode_transforms: Used by _EncodeString to encode unprintable characters.
The index into this list is the ordinal of the
character to transform; each value is a string
used to represent the character in the output. XCObject
provides an _encode_transforms list suitable for most
XCObject subclasses.
_alternate_encode_transforms: Provided for subclasses that wish to use
the alternate encoding rules. Xcode seems
to use these rules when printing objects in
single-line format. Subclasses that desire
this behavior should set _encode_transforms
to _alternate_encode_transforms.
_hashables: A list of XCObject subclasses that can be hashed by ComputeIDs
to construct this object's ID. Most classes that need custom
hashing behavior should do it by overriding Hashables,
but in some cases an object's parent may wish to push a
hashable value into its child, and it can do so by appending
to _hashables.
Attributes:
id: The object's identifier, a 24-character uppercase hexadecimal string.
Usually, objects being created should not set id until the entire
project file structure is built. At that point, UpdateIDs() should
be called on the root object to assign deterministic values for id to
each object in the tree.
parent: The object's parent. This is set by a parent XCObject when a child
object is added to it.
_properties: The object's property dictionary. An object's properties are
described by its class' _schema variable.
"""
_schema = {}
_should_print_single_line = False
# See _EncodeString.
_encode_transforms = []
i = 0
while i < ord(' '):
_encode_transforms.append('\\U%04x' % i)
i = i + 1
_encode_transforms[7] = '\\a'
_encode_transforms[8] = '\\b'
_encode_transforms[9] = '\\t'
_encode_transforms[10] = '\\n'
_encode_transforms[11] = '\\v'
_encode_transforms[12] = '\\f'
_encode_transforms[13] = '\\n'
_alternate_encode_transforms = list(_encode_transforms)
_alternate_encode_transforms[9] = chr(9)
_alternate_encode_transforms[10] = chr(10)
_alternate_encode_transforms[11] = chr(11)
def __init__(self, properties=None, id=None, parent=None):
self.id = id
self.parent = parent
self._properties = {}
self._hashables = []
self._SetDefaultsFromSchema()
self.UpdateProperties(properties)
def __repr__(self):
try:
name = self.Name()
except NotImplementedError:
return '<%s at 0x%x>' % (self.__class__.__name__, id(self))
return '<%s %r at 0x%x>' % (self.__class__.__name__, name, id(self))
def Copy(self):
"""Make a copy of this object.
The new object will have its own copy of lists and dicts. Any XCObject
objects owned by this object (marked "strong") will be copied in the
new object, even those found in lists. If this object has any weak
references to other XCObjects, the same references are added to the new
object without making a copy.
"""
that = self.__class__(id=self.id, parent=self.parent)
for key, value in self._properties.iteritems():
is_strong = self._schema[key][2]
if isinstance(value, XCObject):
if is_strong:
new_value = value.Copy()
new_value.parent = that
that._properties[key] = new_value
else:
that._properties[key] = value
elif isinstance(value, str) or isinstance(value, unicode) or \
isinstance(value, int):
that._properties[key] = value
elif isinstance(value, list):
if is_strong:
# If is_strong is True, each element is an XCObject, so it's safe to
# call Copy.
that._properties[key] = []
for item in value:
new_item = item.Copy()
new_item.parent = that
that._properties[key].append(new_item)
else:
that._properties[key] = value[:]
elif isinstance(value, dict):
# dicts are never strong.
if is_strong:
raise TypeError('Strong dict for key ' + key + ' in ' + \
self.__class__.__name__)
else:
that._properties[key] = value.copy()
else:
raise TypeError('Unexpected type ' + value.__class__.__name__ + \
' for key ' + key + ' in ' + self.__class__.__name__)
return that
def Name(self):
"""Return the name corresponding to an object.
Not all objects necessarily need to be nameable, and not all that do have
a "name" property. Override as needed.
"""
# If the schema indicates that "name" is required, try to access the
# property even if it doesn't exist. This will result in a KeyError
# being raised for the property that should be present, which seems more
# appropriate than NotImplementedError in this case.
if 'name' in self._properties or \
('name' in self._schema and self._schema['name'][3]):
return self._properties['name']
raise NotImplementedError(self.__class__.__name__ + ' must implement Name')
def Comment(self):
"""Return a comment string for the object.
Most objects just use their name as the comment, but PBXProject uses
different values.
The returned comment is not escaped and does not have any comment marker
strings applied to it.
"""
return self.Name()
def Hashables(self):
hashables = [self.__class__.__name__]
name = self.Name()
if name != None:
hashables.append(name)
hashables.extend(self._hashables)
return hashables
def HashablesForChild(self):
return None
def ComputeIDs(self, recursive=True, overwrite=True, seed_hash=None):
"""Set "id" properties deterministically.
An object's "id" property is set based on a hash of its class type and
name, as well as the class type and name of all ancestor objects. As
such, it is only advisable to call ComputeIDs once an entire project file
tree is built.
If recursive is True, recurse into all descendant objects and update their
hashes.
If overwrite is True, any existing value set in the "id" property will be
replaced.
"""
def _HashUpdate(hash, data):
"""Update hash with data's length and contents.
If the hash were updated only with the value of data, it would be
possible for clowns to induce collisions by manipulating the names of
their objects. By adding the length, it's exceedingly less likely that
ID collisions will be encountered, intentionally or not.
"""
hash.update(struct.pack('>i', len(data)))
hash.update(data)
if seed_hash is None:
seed_hash = _new_sha1()
hash = seed_hash.copy()
hashables = self.Hashables()
assert len(hashables) > 0
for hashable in hashables:
_HashUpdate(hash, hashable)
if recursive:
hashables_for_child = self.HashablesForChild()
if hashables_for_child is None:
child_hash = hash
else:
assert len(hashables_for_child) > 0
child_hash = seed_hash.copy()
for hashable in hashables_for_child:
_HashUpdate(child_hash, hashable)
for child in self.Children():
child.ComputeIDs(recursive, overwrite, child_hash)
if overwrite or self.id is None:
# Xcode IDs are only 96 bits (24 hex characters), but a SHA-1 digest is
# is 160 bits. Instead of throwing out 64 bits of the digest, xor them
# into the portion that gets used.
assert hash.digest_size % 4 == 0
digest_int_count = hash.digest_size / 4
digest_ints = struct.unpack('>' + 'I' * digest_int_count, hash.digest())
id_ints = [0, 0, 0]
for index in xrange(0, digest_int_count):
id_ints[index % 3] ^= digest_ints[index]
self.id = '%08X%08X%08X' % tuple(id_ints)
def EnsureNoIDCollisions(self):
"""Verifies that no two objects have the same ID. Checks all descendants.
"""
ids = {}
descendants = self.Descendants()
for descendant in descendants:
if descendant.id in ids:
other = ids[descendant.id]
raise KeyError(
'Duplicate ID %s, objects "%s" and "%s" in "%s"' % \
(descendant.id, str(descendant._properties),
str(other._properties), self._properties['rootObject'].Name()))
ids[descendant.id] = descendant
def Children(self):
"""Returns a list of all of this object's owned (strong) children."""
children = []
for property, attributes in self._schema.iteritems():
(is_list, property_type, is_strong) = attributes[0:3]
if is_strong and property in self._properties:
if not is_list:
children.append(self._properties[property])
else:
children.extend(self._properties[property])
return children
def Descendants(self):
"""Returns a list of all of this object's descendants, including this
object.
"""
children = self.Children()
descendants = [self]
for child in children:
descendants.extend(child.Descendants())
return descendants
def PBXProjectAncestor(self):
# The base case for recursion is defined at PBXProject.PBXProjectAncestor.
if self.parent:
return self.parent.PBXProjectAncestor()
return None
def _EncodeComment(self, comment):
"""Encodes a comment to be placed in the project file output, mimicing
Xcode behavior.
"""
# This mimics Xcode behavior by wrapping the comment in "/*" and "*/". If
# the string already contains a "*/", it is turned into "(*)/". This keeps
# the file writer from outputting something that would be treated as the
# end of a comment in the middle of something intended to be entirely a
# comment.
return '/* ' + comment.replace('*/', '(*)/') + ' */'
def _EncodeTransform(self, match):
# This function works closely with _EncodeString. It will only be called
# by re.sub with match.group(0) containing a character matched by the
# the _escaped expression.
char = match.group(0)
# Backslashes (\) and quotation marks (") are always replaced with a
# backslash-escaped version of the same. Everything else gets its
# replacement from the class' _encode_transforms array.
if char == '\\':
return '\\\\'
if char == '"':
return '\\"'
return self._encode_transforms[ord(char)]
def _EncodeString(self, value):
"""Encodes a string to be placed in the project file output, mimicing
Xcode behavior.
"""
# Use quotation marks when any character outside of the range A-Z, a-z, 0-9,
# $ (dollar sign), . (period), and _ (underscore) is present. Also use
# quotation marks to represent empty strings.
#
# Escape " (double-quote) and \ (backslash) by preceding them with a
# backslash.
#
# Some characters below the printable ASCII range are encoded specially:
# 7 ^G BEL is encoded as "\a"
# 8 ^H BS is encoded as "\b"
# 11 ^K VT is encoded as "\v"
# 12 ^L NP is encoded as "\f"
# 127 ^? DEL is passed through as-is without escaping
# - In PBXFileReference and PBXBuildFile objects:
# 9 ^I HT is passed through as-is without escaping
# 10 ^J NL is passed through as-is without escaping
# 13 ^M CR is passed through as-is without escaping
# - In other objects:
# 9 ^I HT is encoded as "\t"
# 10 ^J NL is encoded as "\n"
# 13 ^M CR is encoded as "\n" rendering it indistinguishable from
# 10 ^J NL
# All other characters within the ASCII control character range (0 through
# 31 inclusive) are encoded as "\U001f" referring to the Unicode code point
# in hexadecimal. For example, character 14 (^N SO) is encoded as "\U000e".
# Characters above the ASCII range are passed through to the output encoded
# as UTF-8 without any escaping. These mappings are contained in the
# class' _encode_transforms list.
if _unquoted.search(value) and not _quoted.search(value):
return value
return '"' + _escaped.sub(self._EncodeTransform, value) + '"'
def _XCPrint(self, file, tabs, line):
file.write('\t' * tabs + line)
def _XCPrintableValue(self, tabs, value, flatten_list=False):
"""Returns a representation of value that may be printed in a project file,
mimicing Xcode's behavior.
_XCPrintableValue can handle str and int values, XCObjects (which are
made printable by returning their id property), and list and dict objects
composed of any of the above types. When printing a list or dict, and
_should_print_single_line is False, the tabs parameter is used to determine
how much to indent the lines corresponding to the items in the list or
dict.
If flatten_list is True, single-element lists will be transformed into
strings.
"""
printable = ''
comment = None
if self._should_print_single_line:
sep = ' '
element_tabs = ''
end_tabs = ''
else:
sep = '\n'
element_tabs = '\t' * (tabs + 1)
end_tabs = '\t' * tabs
if isinstance(value, XCObject):
printable += value.id
comment = value.Comment()
elif isinstance(value, str):
printable += self._EncodeString(value)
elif isinstance(value, unicode):
printable += self._EncodeString(value.encode('utf-8'))
elif isinstance(value, int):
printable += str(value)
elif isinstance(value, list):
if flatten_list and len(value) <= 1:
if len(value) == 0:
printable += self._EncodeString('')
else:
printable += self._EncodeString(value[0])
else:
printable = '(' + sep
for item in value:
printable += element_tabs + \
self._XCPrintableValue(tabs + 1, item, flatten_list) + \
',' + sep
printable += end_tabs + ')'
elif isinstance(value, dict):
printable = '{' + sep
for item_key, item_value in sorted(value.iteritems()):
printable += element_tabs + \
self._XCPrintableValue(tabs + 1, item_key, flatten_list) + ' = ' + \
self._XCPrintableValue(tabs + 1, item_value, flatten_list) + ';' + \
sep
printable += end_tabs + '}'
else:
raise TypeError("Can't make " + value.__class__.__name__ + ' printable')
if comment != None:
printable += ' ' + self._EncodeComment(comment)
return printable
def _XCKVPrint(self, file, tabs, key, value):
"""Prints a key and value, members of an XCObject's _properties dictionary,
to file.
tabs is an int identifying the indentation level. If the class'
_should_print_single_line variable is True, tabs is ignored and the
key-value pair will be followed by a space insead of a newline.
"""
if self._should_print_single_line:
printable = ''
after_kv = ' '
else:
printable = '\t' * tabs
after_kv = '\n'
# Xcode usually prints remoteGlobalIDString values in PBXContainerItemProxy
# objects without comments. Sometimes it prints them with comments, but
# the majority of the time, it doesn't. To avoid unnecessary changes to
# the project file after Xcode opens it, don't write comments for
# remoteGlobalIDString. This is a sucky hack and it would certainly be
# cleaner to extend the schema to indicate whether or not a comment should
# be printed, but since this is the only case where the problem occurs and
# Xcode itself can't seem to make up its mind, the hack will suffice.
#
# Also see PBXContainerItemProxy._schema['remoteGlobalIDString'].
if key == 'remoteGlobalIDString' and isinstance(self,
PBXContainerItemProxy):
value_to_print = value.id
else:
value_to_print = value
# PBXBuildFile's settings property is represented in the output as a dict,
# but a hack here has it represented as a string. Arrange to strip off the
# quotes so that it shows up in the output as expected.
if key == 'settings' and isinstance(self, PBXBuildFile):
strip_value_quotes = True
else:
strip_value_quotes = False
# In another one-off, let's set flatten_list on buildSettings properties
# of XCBuildConfiguration objects, because that's how Xcode treats them.
if key == 'buildSettings' and isinstance(self, XCBuildConfiguration):
flatten_list = True
else:
flatten_list = False
try:
printable_key = self._XCPrintableValue(tabs, key, flatten_list)
printable_value = self._XCPrintableValue(tabs, value_to_print,
flatten_list)
if strip_value_quotes and len(printable_value) > 1 and \
printable_value[0] == '"' and printable_value[-1] == '"':
printable_value = printable_value[1:-1]
printable += printable_key + ' = ' + printable_value + ';' + after_kv
except TypeError, e:
gyp.common.ExceptionAppend(e,
'while printing key "%s"' % key)
raise
self._XCPrint(file, 0, printable)
def Print(self, file=sys.stdout):
"""Prints a reprentation of this object to file, adhering to Xcode output
formatting.
"""
self.VerifyHasRequiredProperties()
if self._should_print_single_line:
# When printing an object in a single line, Xcode doesn't put any space
# between the beginning of a dictionary (or presumably a list) and the
# first contained item, so you wind up with snippets like
# ...CDEF = {isa = PBXFileReference; fileRef = 0123...
# If it were me, I would have put a space in there after the opening
# curly, but I guess this is just another one of those inconsistencies
# between how Xcode prints PBXFileReference and PBXBuildFile objects as
# compared to other objects. Mimic Xcode's behavior here by using an
# empty string for sep.
sep = ''
end_tabs = 0
else:
sep = '\n'
end_tabs = 2
# Start the object. For example, '\t\tPBXProject = {\n'.
self._XCPrint(file, 2, self._XCPrintableValue(2, self) + ' = {' + sep)
# "isa" isn't in the _properties dictionary, it's an intrinsic property
# of the class which the object belongs to. Xcode always outputs "isa"
# as the first element of an object dictionary.
self._XCKVPrint(file, 3, 'isa', self.__class__.__name__)
# The remaining elements of an object dictionary are sorted alphabetically.
for property, value in sorted(self._properties.iteritems()):
self._XCKVPrint(file, 3, property, value)
# End the object.
self._XCPrint(file, end_tabs, '};\n')
def UpdateProperties(self, properties, do_copy=False):
"""Merge the supplied properties into the _properties dictionary.
The input properties must adhere to the class schema or a KeyError or
TypeError exception will be raised. If adding an object of an XCObject
subclass and the schema indicates a strong relationship, the object's
parent will be set to this object.
If do_copy is True, then lists, dicts, strong-owned XCObjects, and
strong-owned XCObjects in lists will be copied instead of having their
references added.
"""
if properties is None:
return
for property, value in properties.iteritems():
# Make sure the property is in the schema.
if not property in self._schema:
raise KeyError(property + ' not in ' + self.__class__.__name__)
# Make sure the property conforms to the schema.
(is_list, property_type, is_strong) = self._schema[property][0:3]
if is_list:
if value.__class__ != list:
raise TypeError(
property + ' of ' + self.__class__.__name__ + \
' must be list, not ' + value.__class__.__name__)
for item in value:
if not isinstance(item, property_type) and \
not (item.__class__ == unicode and property_type == str):
# Accept unicode where str is specified. str is treated as
# UTF-8-encoded.
raise TypeError(
'item of ' + property + ' of ' + self.__class__.__name__ + \
' must be ' + property_type.__name__ + ', not ' + \
item.__class__.__name__)
elif not isinstance(value, property_type) and \
not (value.__class__ == unicode and property_type == str):
# Accept unicode where str is specified. str is treated as
# UTF-8-encoded.
raise TypeError(
property + ' of ' + self.__class__.__name__ + ' must be ' + \
property_type.__name__ + ', not ' + value.__class__.__name__)
# Checks passed, perform the assignment.
if do_copy:
if isinstance(value, XCObject):
if is_strong:
self._properties[property] = value.Copy()
else:
self._properties[property] = value
elif isinstance(value, str) or isinstance(value, unicode) or \
isinstance(value, int):
self._properties[property] = value
elif isinstance(value, list):
if is_strong:
# If is_strong is True, each element is an XCObject, so it's safe
# to call Copy.
self._properties[property] = []
for item in value:
self._properties[property].append(item.Copy())
else:
self._properties[property] = value[:]
elif isinstance(value, dict):
self._properties[property] = value.copy()
else:
raise TypeError("Don't know how to copy a " + \
value.__class__.__name__ + ' object for ' + \
property + ' in ' + self.__class__.__name__)
else:
self._properties[property] = value
# Set up the child's back-reference to this object. Don't use |value|
# any more because it may not be right if do_copy is true.
if is_strong:
if not is_list:
self._properties[property].parent = self
else:
for item in self._properties[property]:
item.parent = self
def HasProperty(self, key):
return key in self._properties
def GetProperty(self, key):
return self._properties[key]
def SetProperty(self, key, value):
self.UpdateProperties({key: value})
def DelProperty(self, key):
if key in self._properties:
del self._properties[key]
def AppendProperty(self, key, value):
# TODO(mark): Support ExtendProperty too (and make this call that)?
# Schema validation.
if not key in self._schema:
raise KeyError(key + ' not in ' + self.__class__.__name__)
(is_list, property_type, is_strong) = self._schema[key][0:3]
if not is_list:
raise TypeError(key + ' of ' + self.__class__.__name__ + ' must be list')
if not isinstance(value, property_type):
raise TypeError('item of ' + key + ' of ' + self.__class__.__name__ + \
' must be ' + property_type.__name__ + ', not ' + \
value.__class__.__name__)
# If the property doesn't exist yet, create a new empty list to receive the
# item.
if not key in self._properties:
self._properties[key] = []
# Set up the ownership link.
if is_strong:
value.parent = self
# Store the item.
self._properties[key].append(value)
def VerifyHasRequiredProperties(self):
"""Ensure that all properties identified as required by the schema are
set.
"""
# TODO(mark): A stronger verification mechanism is needed. Some
# subclasses need to perform validation beyond what the schema can enforce.
for property, attributes in self._schema.iteritems():
(is_list, property_type, is_strong, is_required) = attributes[0:4]
if is_required and not property in self._properties:
raise KeyError(self.__class__.__name__ + ' requires ' + property)
def _SetDefaultsFromSchema(self):
"""Assign object default values according to the schema. This will not
overwrite properties that have already been set."""
defaults = {}
for property, attributes in self._schema.iteritems():
(is_list, property_type, is_strong, is_required) = attributes[0:4]
if is_required and len(attributes) >= 5 and \
not property in self._properties:
default = attributes[4]
defaults[property] = default
if len(defaults) > 0:
# Use do_copy=True so that each new object gets its own copy of strong
# objects, lists, and dicts.
self.UpdateProperties(defaults, do_copy=True)
class XCHierarchicalElement(XCObject):
"""Abstract base for PBXGroup and PBXFileReference. Not represented in a
project file."""
# TODO(mark): Do name and path belong here? Probably so.
# If path is set and name is not, name may have a default value. Name will
# be set to the basename of path, if the basename of path is different from
# the full value of path. If path is already just a leaf name, name will
# not be set.
_schema = XCObject._schema.copy()
_schema.update({
'comments': [0, str, 0, 0],
'fileEncoding': [0, str, 0, 0],
'includeInIndex': [0, int, 0, 0],
'indentWidth': [0, int, 0, 0],
'lineEnding': [0, int, 0, 0],
'sourceTree': [0, str, 0, 1, '<group>'],
'tabWidth': [0, int, 0, 0],
'usesTabs': [0, int, 0, 0],
'wrapsLines': [0, int, 0, 0],
})
def __init__(self, properties=None, id=None, parent=None):
# super
XCObject.__init__(self, properties, id, parent)
if 'path' in self._properties and not 'name' in self._properties:
path = self._properties['path']
name = posixpath.basename(path)
if name != '' and path != name:
self.SetProperty('name', name)
if 'path' in self._properties and \
(not 'sourceTree' in self._properties or \
self._properties['sourceTree'] == '<group>'):
# If the pathname begins with an Xcode variable like "$(SDKROOT)/", take
# the variable out and make the path be relative to that variable by
# assigning the variable name as the sourceTree.
(source_tree, path) = SourceTreeAndPathFromPath(self._properties['path'])
if source_tree != None:
self._properties['sourceTree'] = source_tree
if path != None:
self._properties['path'] = path
if source_tree != None and path is None and \
not 'name' in self._properties:
# The path was of the form "$(SDKROOT)" with no path following it.
# This object is now relative to that variable, so it has no path
# attribute of its own. It does, however, keep a name.
del self._properties['path']
self._properties['name'] = source_tree
def Name(self):
if 'name' in self._properties:
return self._properties['name']
elif 'path' in self._properties:
return self._properties['path']
else:
# This happens in the case of the root PBXGroup.
return None
def Hashables(self):
"""Custom hashables for XCHierarchicalElements.
XCHierarchicalElements are special. Generally, their hashes shouldn't
change if the paths don't change. The normal XCObject implementation of
Hashables adds a hashable for each object, which means that if
the hierarchical structure changes (possibly due to changes caused when
TakeOverOnlyChild runs and encounters slight changes in the hierarchy),
the hashes will change. For example, if a project file initially contains
a/b/f1 and a/b becomes collapsed into a/b, f1 will have a single parent
a/b. If someone later adds a/f2 to the project file, a/b can no longer be
collapsed, and f1 winds up with parent b and grandparent a. That would
be sufficient to change f1's hash.
To counteract this problem, hashables for all XCHierarchicalElements except
for the main group (which has neither a name nor a path) are taken to be
just the set of path components. Because hashables are inherited from
parents, this provides assurance that a/b/f1 has the same set of hashables
whether its parent is b or a/b.
The main group is a special case. As it is permitted to have no name or
path, it is permitted to use the standard XCObject hash mechanism. This
is not considered a problem because there can be only one main group.
"""
if self == self.PBXProjectAncestor()._properties['mainGroup']:
# super
return XCObject.Hashables(self)
hashables = []
# Put the name in first, ensuring that if TakeOverOnlyChild collapses
# children into a top-level group like "Source", the name always goes
# into the list of hashables without interfering with path components.
if 'name' in self._properties:
# Make it less likely for people to manipulate hashes by following the
# pattern of always pushing an object type value onto the list first.
hashables.append(self.__class__.__name__ + '.name')
hashables.append(self._properties['name'])
# NOTE: This still has the problem that if an absolute path is encountered,
# including paths with a sourceTree, they'll still inherit their parents'
# hashables, even though the paths aren't relative to their parents. This
# is not expected to be much of a problem in practice.
path = self.PathFromSourceTreeAndPath()
if path != None:
components = path.split(posixpath.sep)
for component in components:
hashables.append(self.__class__.__name__ + '.path')
hashables.append(component)
hashables.extend(self._hashables)
return hashables
def Compare(self, other):
# Allow comparison of these types. PBXGroup has the highest sort rank;
# PBXVariantGroup is treated as equal to PBXFileReference.
valid_class_types = {
PBXFileReference: 'file',
PBXGroup: 'group',
PBXVariantGroup: 'file',
}
self_type = valid_class_types[self.__class__]
other_type = valid_class_types[other.__class__]
if self_type == other_type:
# If the two objects are of the same sort rank, compare their names.
return cmp(self.Name(), other.Name())
# Otherwise, sort groups before everything else.
if self_type == 'group':
return -1
return 1
def CompareRootGroup(self, other):
# This function should be used only to compare direct children of the
# containing PBXProject's mainGroup. These groups should appear in the
# listed order.
# TODO(mark): "Build" is used by gyp.generator.xcode, perhaps the
# generator should have a way of influencing this list rather than having
# to hardcode for the generator here.
order = ['Source', 'Intermediates', 'Projects', 'Frameworks', 'Products',
'Build']
# If the groups aren't in the listed order, do a name comparison.
# Otherwise, groups in the listed order should come before those that
# aren't.
self_name = self.Name()
other_name = other.Name()
self_in = isinstance(self, PBXGroup) and self_name in order
other_in = isinstance(self, PBXGroup) and other_name in order
if not self_in and not other_in:
return self.Compare(other)
if self_name in order and not other_name in order:
return -1
if other_name in order and not self_name in order:
return 1
# If both groups are in the listed order, go by the defined order.
self_index = order.index(self_name)
other_index = order.index(other_name)
if self_index < other_index:
return -1
if self_index > other_index:
return 1
return 0
def PathFromSourceTreeAndPath(self):
# Turn the object's sourceTree and path properties into a single flat
# string of a form comparable to the path parameter. If there's a
# sourceTree property other than "<group>", wrap it in $(...) for the
# comparison.
components = []
if self._properties['sourceTree'] != '<group>':
components.append('$(' + self._properties['sourceTree'] + ')')
if 'path' in self._properties:
components.append(self._properties['path'])
if len(components) > 0:
return posixpath.join(*components)
return None
def FullPath(self):
# Returns a full path to self relative to the project file, or relative
# to some other source tree. Start with self, and walk up the chain of
# parents prepending their paths, if any, until no more parents are
# available (project-relative path) or until a path relative to some
# source tree is found.
xche = self
path = None
while isinstance(xche, XCHierarchicalElement) and \
(path is None or \
(not path.startswith('/') and not path.startswith('$'))):
this_path = xche.PathFromSourceTreeAndPath()
if this_path != None and path != None:
path = posixpath.join(this_path, path)
elif this_path != None:
path = this_path
xche = xche.parent
return path
class PBXGroup(XCHierarchicalElement):
"""
Attributes:
_children_by_path: Maps pathnames of children of this PBXGroup to the
actual child XCHierarchicalElement objects.
_variant_children_by_name_and_path: Maps (name, path) tuples of
PBXVariantGroup children to the actual child PBXVariantGroup objects.
"""
_schema = XCHierarchicalElement._schema.copy()
_schema.update({
'children': [1, XCHierarchicalElement, 1, 1, []],
'name': [0, str, 0, 0],
'path': [0, str, 0, 0],
})
def __init__(self, properties=None, id=None, parent=None):
# super
XCHierarchicalElement.__init__(self, properties, id, parent)
self._children_by_path = {}
self._variant_children_by_name_and_path = {}
for child in self._properties.get('children', []):
self._AddChildToDicts(child)
def Hashables(self):
# super
hashables = XCHierarchicalElement.Hashables(self)
# It is not sufficient to just rely on name and parent to build a unique
# hashable : a node could have two child PBXGroup sharing a common name.
# To add entropy the hashable is enhanced with the names of all its
# children.
for child in self._properties.get('children', []):
child_name = child.Name()
if child_name != None:
hashables.append(child_name)
return hashables
def HashablesForChild(self):
# To avoid a circular reference the hashables used to compute a child id do
# not include the child names.
return XCHierarchicalElement.Hashables(self)
def _AddChildToDicts(self, child):
# Sets up this PBXGroup object's dicts to reference the child properly.
child_path = child.PathFromSourceTreeAndPath()
if child_path:
if child_path in self._children_by_path:
raise ValueError('Found multiple children with path ' + child_path)
self._children_by_path[child_path] = child
if isinstance(child, PBXVariantGroup):
child_name = child._properties.get('name', None)
key = (child_name, child_path)
if key in self._variant_children_by_name_and_path:
raise ValueError('Found multiple PBXVariantGroup children with ' + \
'name ' + str(child_name) + ' and path ' + \
str(child_path))
self._variant_children_by_name_and_path[key] = child
def AppendChild(self, child):
# Callers should use this instead of calling
# AppendProperty('children', child) directly because this function
# maintains the group's dicts.
self.AppendProperty('children', child)
self._AddChildToDicts(child)
def GetChildByName(self, name):
# This is not currently optimized with a dict as GetChildByPath is because
# it has few callers. Most callers probably want GetChildByPath. This
# function is only useful to get children that have names but no paths,
# which is rare. The children of the main group ("Source", "Products",
# etc.) is pretty much the only case where this likely to come up.
#
# TODO(mark): Maybe this should raise an error if more than one child is
# present with the same name.
if not 'children' in self._properties:
return None
for child in self._properties['children']:
if child.Name() == name:
return child
return None
def GetChildByPath(self, path):
if not path:
return None
if path in self._children_by_path:
return self._children_by_path[path]
return None
def GetChildByRemoteObject(self, remote_object):
# This method is a little bit esoteric. Given a remote_object, which
# should be a PBXFileReference in another project file, this method will
# return this group's PBXReferenceProxy object serving as a local proxy
# for the remote PBXFileReference.
#
# This function might benefit from a dict optimization as GetChildByPath
# for some workloads, but profiling shows that it's not currently a
# problem.
if not 'children' in self._properties:
return None
for child in self._properties['children']:
if not isinstance(child, PBXReferenceProxy):
continue
container_proxy = child._properties['remoteRef']
if container_proxy._properties['remoteGlobalIDString'] == remote_object:
return child
return None
def AddOrGetFileByPath(self, path, hierarchical):
"""Returns an existing or new file reference corresponding to path.
If hierarchical is True, this method will create or use the necessary
hierarchical group structure corresponding to path. Otherwise, it will
look in and create an item in the current group only.
If an existing matching reference is found, it is returned, otherwise, a
new one will be created, added to the correct group, and returned.
If path identifies a directory by virtue of carrying a trailing slash,
this method returns a PBXFileReference of "folder" type. If path
identifies a variant, by virtue of it identifying a file inside a directory
with an ".lproj" extension, this method returns a PBXVariantGroup
containing the variant named by path, and possibly other variants. For
all other paths, a "normal" PBXFileReference will be returned.
"""
# Adding or getting a directory? Directories end with a trailing slash.
is_dir = False
if path.endswith('/'):
is_dir = True
path = posixpath.normpath(path)
if is_dir:
path = path + '/'
# Adding or getting a variant? Variants are files inside directories
# with an ".lproj" extension. Xcode uses variants for localization. For
# a variant path/to/Language.lproj/MainMenu.nib, put a variant group named
# MainMenu.nib inside path/to, and give it a variant named Language. In
# this example, grandparent would be set to path/to and parent_root would
# be set to Language.
variant_name = None
parent = posixpath.dirname(path)
grandparent = posixpath.dirname(parent)
parent_basename = posixpath.basename(parent)
(parent_root, parent_ext) = posixpath.splitext(parent_basename)
if parent_ext == '.lproj':
variant_name = parent_root
if grandparent == '':
grandparent = None
# Putting a directory inside a variant group is not currently supported.
assert not is_dir or variant_name is None
path_split = path.split(posixpath.sep)
if len(path_split) == 1 or \
((is_dir or variant_name != None) and len(path_split) == 2) or \
not hierarchical:
# The PBXFileReference or PBXVariantGroup will be added to or gotten from
# this PBXGroup, no recursion necessary.
if variant_name is None:
# Add or get a PBXFileReference.
file_ref = self.GetChildByPath(path)
if file_ref != None:
assert file_ref.__class__ == PBXFileReference
else:
file_ref = PBXFileReference({'path': path})
self.AppendChild(file_ref)
else:
# Add or get a PBXVariantGroup. The variant group name is the same
# as the basename (MainMenu.nib in the example above). grandparent
# specifies the path to the variant group itself, and path_split[-2:]
# is the path of the specific variant relative to its group.
variant_group_name = posixpath.basename(path)
variant_group_ref = self.AddOrGetVariantGroupByNameAndPath(
variant_group_name, grandparent)
variant_path = posixpath.sep.join(path_split[-2:])
variant_ref = variant_group_ref.GetChildByPath(variant_path)
if variant_ref != None:
assert variant_ref.__class__ == PBXFileReference
else:
variant_ref = PBXFileReference({'name': variant_name,
'path': variant_path})
variant_group_ref.AppendChild(variant_ref)
# The caller is interested in the variant group, not the specific
# variant file.
file_ref = variant_group_ref
return file_ref
else:
# Hierarchical recursion. Add or get a PBXGroup corresponding to the
# outermost path component, and then recurse into it, chopping off that
# path component.
next_dir = path_split[0]
group_ref = self.GetChildByPath(next_dir)
if group_ref != None:
assert group_ref.__class__ == PBXGroup
else:
group_ref = PBXGroup({'path': next_dir})
self.AppendChild(group_ref)
return group_ref.AddOrGetFileByPath(posixpath.sep.join(path_split[1:]),
hierarchical)
def AddOrGetVariantGroupByNameAndPath(self, name, path):
"""Returns an existing or new PBXVariantGroup for name and path.
If a PBXVariantGroup identified by the name and path arguments is already
present as a child of this object, it is returned. Otherwise, a new
PBXVariantGroup with the correct properties is created, added as a child,
and returned.
This method will generally be called by AddOrGetFileByPath, which knows
when to create a variant group based on the structure of the pathnames
passed to it.
"""
key = (name, path)
if key in self._variant_children_by_name_and_path:
variant_group_ref = self._variant_children_by_name_and_path[key]
assert variant_group_ref.__class__ == PBXVariantGroup
return variant_group_ref
variant_group_properties = {'name': name}
if path != None:
variant_group_properties['path'] = path
variant_group_ref = PBXVariantGroup(variant_group_properties)
self.AppendChild(variant_group_ref)
return variant_group_ref
def TakeOverOnlyChild(self, recurse=False):
"""If this PBXGroup has only one child and it's also a PBXGroup, take
it over by making all of its children this object's children.
This function will continue to take over only children when those children
are groups. If there are three PBXGroups representing a, b, and c, with
c inside b and b inside a, and a and b have no other children, this will
result in a taking over both b and c, forming a PBXGroup for a/b/c.
If recurse is True, this function will recurse into children and ask them
to collapse themselves by taking over only children as well. Assuming
an example hierarchy with files at a/b/c/d1, a/b/c/d2, and a/b/c/d3/e/f
(d1, d2, and f are files, the rest are groups), recursion will result in
a group for a/b/c containing a group for d3/e.
"""
# At this stage, check that child class types are PBXGroup exactly,
# instead of using isinstance. The only subclass of PBXGroup,
# PBXVariantGroup, should not participate in reparenting in the same way:
# reparenting by merging different object types would be wrong.
while len(self._properties['children']) == 1 and \
self._properties['children'][0].__class__ == PBXGroup:
# Loop to take over the innermost only-child group possible.
child = self._properties['children'][0]
# Assume the child's properties, including its children. Save a copy
# of this object's old properties, because they'll still be needed.
# This object retains its existing id and parent attributes.
old_properties = self._properties
self._properties = child._properties
self._children_by_path = child._children_by_path
if not 'sourceTree' in self._properties or \
self._properties['sourceTree'] == '<group>':
# The child was relative to its parent. Fix up the path. Note that
# children with a sourceTree other than "<group>" are not relative to
# their parents, so no path fix-up is needed in that case.
if 'path' in old_properties:
if 'path' in self._properties:
# Both the original parent and child have paths set.
self._properties['path'] = posixpath.join(old_properties['path'],
self._properties['path'])
else:
# Only the original parent has a path, use it.
self._properties['path'] = old_properties['path']
if 'sourceTree' in old_properties:
# The original parent had a sourceTree set, use it.
self._properties['sourceTree'] = old_properties['sourceTree']
# If the original parent had a name set, keep using it. If the original
# parent didn't have a name but the child did, let the child's name
# live on. If the name attribute seems unnecessary now, get rid of it.
if 'name' in old_properties and old_properties['name'] != None and \
old_properties['name'] != self.Name():
self._properties['name'] = old_properties['name']
if 'name' in self._properties and 'path' in self._properties and \
self._properties['name'] == self._properties['path']:
del self._properties['name']
# Notify all children of their new parent.
for child in self._properties['children']:
child.parent = self
# If asked to recurse, recurse.
if recurse:
for child in self._properties['children']:
if child.__class__ == PBXGroup:
child.TakeOverOnlyChild(recurse)
def SortGroup(self):
self._properties['children'] = \
sorted(self._properties['children'], cmp=lambda x,y: x.Compare(y))
# Recurse.
for child in self._properties['children']:
if isinstance(child, PBXGroup):
child.SortGroup()
class XCFileLikeElement(XCHierarchicalElement):
# Abstract base for objects that can be used as the fileRef property of
# PBXBuildFile.
def PathHashables(self):
# A PBXBuildFile that refers to this object will call this method to
# obtain additional hashables specific to this XCFileLikeElement. Don't
# just use this object's hashables, they're not specific and unique enough
# on their own (without access to the parent hashables.) Instead, provide
# hashables that identify this object by path by getting its hashables as
# well as the hashables of ancestor XCHierarchicalElement objects.
hashables = []
xche = self
while xche != None and isinstance(xche, XCHierarchicalElement):
xche_hashables = xche.Hashables()
for index in xrange(0, len(xche_hashables)):
hashables.insert(index, xche_hashables[index])
xche = xche.parent
return hashables
class XCContainerPortal(XCObject):
# Abstract base for objects that can be used as the containerPortal property
# of PBXContainerItemProxy.
pass
class XCRemoteObject(XCObject):
# Abstract base for objects that can be used as the remoteGlobalIDString
# property of PBXContainerItemProxy.
pass
class PBXFileReference(XCFileLikeElement, XCContainerPortal, XCRemoteObject):
_schema = XCFileLikeElement._schema.copy()
_schema.update({
'explicitFileType': [0, str, 0, 0],
'lastKnownFileType': [0, str, 0, 0],
'name': [0, str, 0, 0],
'path': [0, str, 0, 1],
})
# Weird output rules for PBXFileReference.
_should_print_single_line = True
# super
_encode_transforms = XCFileLikeElement._alternate_encode_transforms
def __init__(self, properties=None, id=None, parent=None):
# super
XCFileLikeElement.__init__(self, properties, id, parent)
if 'path' in self._properties and self._properties['path'].endswith('/'):
self._properties['path'] = self._properties['path'][:-1]
is_dir = True
else:
is_dir = False
if 'path' in self._properties and \
not 'lastKnownFileType' in self._properties and \
not 'explicitFileType' in self._properties:
# TODO(mark): This is the replacement for a replacement for a quick hack.
# It is no longer incredibly sucky, but this list needs to be extended.
extension_map = {
'a': 'archive.ar',
'app': 'wrapper.application',
'bdic': 'file',
'bundle': 'wrapper.cfbundle',
'c': 'sourcecode.c.c',
'cc': 'sourcecode.cpp.cpp',
'cpp': 'sourcecode.cpp.cpp',
'css': 'text.css',
'cxx': 'sourcecode.cpp.cpp',
'dart': 'sourcecode',
'dylib': 'compiled.mach-o.dylib',
'framework': 'wrapper.framework',
'gyp': 'sourcecode',
'gypi': 'sourcecode',
'h': 'sourcecode.c.h',
'hxx': 'sourcecode.cpp.h',
'icns': 'image.icns',
'java': 'sourcecode.java',
'js': 'sourcecode.javascript',
'kext': 'wrapper.kext',
'm': 'sourcecode.c.objc',
'mm': 'sourcecode.cpp.objcpp',
'nib': 'wrapper.nib',
'o': 'compiled.mach-o.objfile',
'pdf': 'image.pdf',
'pl': 'text.script.perl',
'plist': 'text.plist.xml',
'pm': 'text.script.perl',
'png': 'image.png',
'py': 'text.script.python',
'r': 'sourcecode.rez',
'rez': 'sourcecode.rez',
's': 'sourcecode.asm',
'storyboard': 'file.storyboard',
'strings': 'text.plist.strings',
'swift': 'sourcecode.swift',
'ttf': 'file',
'xcassets': 'folder.assetcatalog',
'xcconfig': 'text.xcconfig',
'xcdatamodel': 'wrapper.xcdatamodel',
'xcdatamodeld':'wrapper.xcdatamodeld',
'xib': 'file.xib',
'y': 'sourcecode.yacc',
}
prop_map = {
'dart': 'explicitFileType',
'gyp': 'explicitFileType',
'gypi': 'explicitFileType',
}
if is_dir:
file_type = 'folder'
prop_name = 'lastKnownFileType'
else:
basename = posixpath.basename(self._properties['path'])
(root, ext) = posixpath.splitext(basename)
# Check the map using a lowercase extension.
# TODO(mark): Maybe it should try with the original case first and fall
# back to lowercase, in case there are any instances where case
# matters. There currently aren't.
if ext != '':
ext = ext[1:].lower()
# TODO(mark): "text" is the default value, but "file" is appropriate
# for unrecognized files not containing text. Xcode seems to choose
# based on content.
file_type = extension_map.get(ext, 'text')
prop_name = prop_map.get(ext, 'lastKnownFileType')
self._properties[prop_name] = file_type
class PBXVariantGroup(PBXGroup, XCFileLikeElement):
"""PBXVariantGroup is used by Xcode to represent localizations."""
# No additions to the schema relative to PBXGroup.
pass
# PBXReferenceProxy is also an XCFileLikeElement subclass. It is defined below
# because it uses PBXContainerItemProxy, defined below.
class XCBuildConfiguration(XCObject):
_schema = XCObject._schema.copy()
_schema.update({
'baseConfigurationReference': [0, PBXFileReference, 0, 0],
'buildSettings': [0, dict, 0, 1, {}],
'name': [0, str, 0, 1],
})
def HasBuildSetting(self, key):
return key in self._properties['buildSettings']
def GetBuildSetting(self, key):
return self._properties['buildSettings'][key]
def SetBuildSetting(self, key, value):
# TODO(mark): If a list, copy?
self._properties['buildSettings'][key] = value
def AppendBuildSetting(self, key, value):
if not key in self._properties['buildSettings']:
self._properties['buildSettings'][key] = []
self._properties['buildSettings'][key].append(value)
def DelBuildSetting(self, key):
if key in self._properties['buildSettings']:
del self._properties['buildSettings'][key]
def SetBaseConfiguration(self, value):
self._properties['baseConfigurationReference'] = value
class XCConfigurationList(XCObject):
# _configs is the default list of configurations.
_configs = [ XCBuildConfiguration({'name': 'Debug'}),
XCBuildConfiguration({'name': 'Release'}) ]
_schema = XCObject._schema.copy()
_schema.update({
'buildConfigurations': [1, XCBuildConfiguration, 1, 1, _configs],
'defaultConfigurationIsVisible': [0, int, 0, 1, 1],
'defaultConfigurationName': [0, str, 0, 1, 'Release'],
})
def Name(self):
return 'Build configuration list for ' + \
self.parent.__class__.__name__ + ' "' + self.parent.Name() + '"'
def ConfigurationNamed(self, name):
"""Convenience accessor to obtain an XCBuildConfiguration by name."""
for configuration in self._properties['buildConfigurations']:
if configuration._properties['name'] == name:
return configuration
raise KeyError(name)
def DefaultConfiguration(self):
"""Convenience accessor to obtain the default XCBuildConfiguration."""
return self.ConfigurationNamed(self._properties['defaultConfigurationName'])
def HasBuildSetting(self, key):
"""Determines the state of a build setting in all XCBuildConfiguration
child objects.
If all child objects have key in their build settings, and the value is the
same in all child objects, returns 1.
If no child objects have the key in their build settings, returns 0.
If some, but not all, child objects have the key in their build settings,
or if any children have different values for the key, returns -1.
"""
has = None
value = None
for configuration in self._properties['buildConfigurations']:
configuration_has = configuration.HasBuildSetting(key)
if has is None:
has = configuration_has
elif has != configuration_has:
return -1
if configuration_has:
configuration_value = configuration.GetBuildSetting(key)
if value is None:
value = configuration_value
elif value != configuration_value:
return -1
if not has:
return 0
return 1
def GetBuildSetting(self, key):
"""Gets the build setting for key.
All child XCConfiguration objects must have the same value set for the
setting, or a ValueError will be raised.
"""
# TODO(mark): This is wrong for build settings that are lists. The list
# contents should be compared (and a list copy returned?)
value = None
for configuration in self._properties['buildConfigurations']:
configuration_value = configuration.GetBuildSetting(key)
if value is None:
value = configuration_value
else:
if value != configuration_value:
raise ValueError('Variant values for ' + key)
return value
def SetBuildSetting(self, key, value):
"""Sets the build setting for key to value in all child
XCBuildConfiguration objects.
"""
for configuration in self._properties['buildConfigurations']:
configuration.SetBuildSetting(key, value)
def AppendBuildSetting(self, key, value):
"""Appends value to the build setting for key, which is treated as a list,
in all child XCBuildConfiguration objects.
"""
for configuration in self._properties['buildConfigurations']:
configuration.AppendBuildSetting(key, value)
def DelBuildSetting(self, key):
"""Deletes the build setting key from all child XCBuildConfiguration
objects.
"""
for configuration in self._properties['buildConfigurations']:
configuration.DelBuildSetting(key)
def SetBaseConfiguration(self, value):
"""Sets the build configuration in all child XCBuildConfiguration objects.
"""
for configuration in self._properties['buildConfigurations']:
configuration.SetBaseConfiguration(value)
class PBXBuildFile(XCObject):
_schema = XCObject._schema.copy()
_schema.update({
'fileRef': [0, XCFileLikeElement, 0, 1],
'settings': [0, str, 0, 0], # hack, it's a dict
})
# Weird output rules for PBXBuildFile.
_should_print_single_line = True
_encode_transforms = XCObject._alternate_encode_transforms
def Name(self):
# Example: "main.cc in Sources"
return self._properties['fileRef'].Name() + ' in ' + self.parent.Name()
def Hashables(self):
# super
hashables = XCObject.Hashables(self)
# It is not sufficient to just rely on Name() to get the
# XCFileLikeElement's name, because that is not a complete pathname.
# PathHashables returns hashables unique enough that no two
# PBXBuildFiles should wind up with the same set of hashables, unless
# someone adds the same file multiple times to the same target. That
# would be considered invalid anyway.
hashables.extend(self._properties['fileRef'].PathHashables())
return hashables
class XCBuildPhase(XCObject):
"""Abstract base for build phase classes. Not represented in a project
file.
Attributes:
_files_by_path: A dict mapping each path of a child in the files list by
path (keys) to the corresponding PBXBuildFile children (values).
_files_by_xcfilelikeelement: A dict mapping each XCFileLikeElement (keys)
to the corresponding PBXBuildFile children (values).
"""
# TODO(mark): Some build phase types, like PBXShellScriptBuildPhase, don't
# actually have a "files" list. XCBuildPhase should not have "files" but
# another abstract subclass of it should provide this, and concrete build
# phase types that do have "files" lists should be derived from that new
# abstract subclass. XCBuildPhase should only provide buildActionMask and
# runOnlyForDeploymentPostprocessing, and not files or the various
# file-related methods and attributes.
_schema = XCObject._schema.copy()
_schema.update({
'buildActionMask': [0, int, 0, 1, 0x7fffffff],
'files': [1, PBXBuildFile, 1, 1, []],
'runOnlyForDeploymentPostprocessing': [0, int, 0, 1, 0],
})
def __init__(self, properties=None, id=None, parent=None):
# super
XCObject.__init__(self, properties, id, parent)
self._files_by_path = {}
self._files_by_xcfilelikeelement = {}
for pbxbuildfile in self._properties.get('files', []):
self._AddBuildFileToDicts(pbxbuildfile)
def FileGroup(self, path):
# Subclasses must override this by returning a two-element tuple. The
# first item in the tuple should be the PBXGroup to which "path" should be
# added, either as a child or deeper descendant. The second item should
# be a boolean indicating whether files should be added into hierarchical
# groups or one single flat group.
raise NotImplementedError(
self.__class__.__name__ + ' must implement FileGroup')
def _AddPathToDict(self, pbxbuildfile, path):
"""Adds path to the dict tracking paths belonging to this build phase.
If the path is already a member of this build phase, raises an exception.
"""
if path in self._files_by_path:
raise ValueError('Found multiple build files with path ' + path)
self._files_by_path[path] = pbxbuildfile
def _AddBuildFileToDicts(self, pbxbuildfile, path=None):
"""Maintains the _files_by_path and _files_by_xcfilelikeelement dicts.
If path is specified, then it is the path that is being added to the
phase, and pbxbuildfile must contain either a PBXFileReference directly
referencing that path, or it must contain a PBXVariantGroup that itself
contains a PBXFileReference referencing the path.
If path is not specified, either the PBXFileReference's path or the paths
of all children of the PBXVariantGroup are taken as being added to the
phase.
If the path is already present in the phase, raises an exception.
If the PBXFileReference or PBXVariantGroup referenced by pbxbuildfile
are already present in the phase, referenced by a different PBXBuildFile
object, raises an exception. This does not raise an exception when
a PBXFileReference or PBXVariantGroup reappear and are referenced by the
same PBXBuildFile that has already introduced them, because in the case
of PBXVariantGroup objects, they may correspond to multiple paths that are
not all added simultaneously. When this situation occurs, the path needs
to be added to _files_by_path, but nothing needs to change in
_files_by_xcfilelikeelement, and the caller should have avoided adding
the PBXBuildFile if it is already present in the list of children.
"""
xcfilelikeelement = pbxbuildfile._properties['fileRef']
paths = []
if path != None:
# It's best when the caller provides the path.
if isinstance(xcfilelikeelement, PBXVariantGroup):
paths.append(path)
else:
# If the caller didn't provide a path, there can be either multiple
# paths (PBXVariantGroup) or one.
if isinstance(xcfilelikeelement, PBXVariantGroup):
for variant in xcfilelikeelement._properties['children']:
paths.append(variant.FullPath())
else:
paths.append(xcfilelikeelement.FullPath())
# Add the paths first, because if something's going to raise, the
# messages provided by _AddPathToDict are more useful owing to its
# having access to a real pathname and not just an object's Name().
for a_path in paths:
self._AddPathToDict(pbxbuildfile, a_path)
# If another PBXBuildFile references this XCFileLikeElement, there's a
# problem.
if xcfilelikeelement in self._files_by_xcfilelikeelement and \
self._files_by_xcfilelikeelement[xcfilelikeelement] != pbxbuildfile:
raise ValueError('Found multiple build files for ' + \
xcfilelikeelement.Name())
self._files_by_xcfilelikeelement[xcfilelikeelement] = pbxbuildfile
def AppendBuildFile(self, pbxbuildfile, path=None):
# Callers should use this instead of calling
# AppendProperty('files', pbxbuildfile) directly because this function
# maintains the object's dicts. Better yet, callers can just call AddFile
# with a pathname and not worry about building their own PBXBuildFile
# objects.
self.AppendProperty('files', pbxbuildfile)
self._AddBuildFileToDicts(pbxbuildfile, path)
def AddFile(self, path, settings=None):
(file_group, hierarchical) = self.FileGroup(path)
file_ref = file_group.AddOrGetFileByPath(path, hierarchical)
if file_ref in self._files_by_xcfilelikeelement and \
isinstance(file_ref, PBXVariantGroup):
# There's already a PBXBuildFile in this phase corresponding to the
# PBXVariantGroup. path just provides a new variant that belongs to
# the group. Add the path to the dict.
pbxbuildfile = self._files_by_xcfilelikeelement[file_ref]
self._AddBuildFileToDicts(pbxbuildfile, path)
else:
# Add a new PBXBuildFile to get file_ref into the phase.
if settings is None:
pbxbuildfile = PBXBuildFile({'fileRef': file_ref})
else:
pbxbuildfile = PBXBuildFile({'fileRef': file_ref, 'settings': settings})
self.AppendBuildFile(pbxbuildfile, path)
class PBXHeadersBuildPhase(XCBuildPhase):
# No additions to the schema relative to XCBuildPhase.
def Name(self):
return 'Headers'
def FileGroup(self, path):
return self.PBXProjectAncestor().RootGroupForPath(path)
class PBXResourcesBuildPhase(XCBuildPhase):
# No additions to the schema relative to XCBuildPhase.
def Name(self):
return 'Resources'
def FileGroup(self, path):
return self.PBXProjectAncestor().RootGroupForPath(path)
class PBXSourcesBuildPhase(XCBuildPhase):
# No additions to the schema relative to XCBuildPhase.
def Name(self):
return 'Sources'
def FileGroup(self, path):
return self.PBXProjectAncestor().RootGroupForPath(path)
class PBXFrameworksBuildPhase(XCBuildPhase):
# No additions to the schema relative to XCBuildPhase.
def Name(self):
return 'Frameworks'
def FileGroup(self, path):
(root, ext) = posixpath.splitext(path)
if ext != '':
ext = ext[1:].lower()
if ext == 'o':
# .o files are added to Xcode Frameworks phases, but conceptually aren't
# frameworks, they're more like sources or intermediates. Redirect them
# to show up in one of those other groups.
return self.PBXProjectAncestor().RootGroupForPath(path)
else:
return (self.PBXProjectAncestor().FrameworksGroup(), False)
class PBXShellScriptBuildPhase(XCBuildPhase):
_schema = XCBuildPhase._schema.copy()
_schema.update({
'inputPaths': [1, str, 0, 1, []],
'name': [0, str, 0, 0],
'outputPaths': [1, str, 0, 1, []],
'shellPath': [0, str, 0, 1, '/bin/sh'],
'shellScript': [0, str, 0, 1],
'showEnvVarsInLog': [0, int, 0, 0],
})
def Name(self):
if 'name' in self._properties:
return self._properties['name']
return 'ShellScript'
class PBXCopyFilesBuildPhase(XCBuildPhase):
_schema = XCBuildPhase._schema.copy()
_schema.update({
'dstPath': [0, str, 0, 1],
'dstSubfolderSpec': [0, int, 0, 1],
'name': [0, str, 0, 0],
})
# path_tree_re matches "$(DIR)/path" or just "$(DIR)". Match group 1 is
# "DIR", match group 3 is "path" or None.
path_tree_re = re.compile('^\\$\\((.*)\\)(/(.*)|)$')
# path_tree_to_subfolder maps names of Xcode variables to the associated
# dstSubfolderSpec property value used in a PBXCopyFilesBuildPhase object.
path_tree_to_subfolder = {
'BUILT_FRAMEWORKS_DIR': 10, # Frameworks Directory
'BUILT_PRODUCTS_DIR': 16, # Products Directory
# Other types that can be chosen via the Xcode UI.
# TODO(mark): Map Xcode variable names to these.
# : 1, # Wrapper
# : 6, # Executables: 6
# : 7, # Resources
# : 15, # Java Resources
# : 11, # Shared Frameworks
# : 12, # Shared Support
# : 13, # PlugIns
}
def Name(self):
if 'name' in self._properties:
return self._properties['name']
return 'CopyFiles'
def FileGroup(self, path):
return self.PBXProjectAncestor().RootGroupForPath(path)
def SetDestination(self, path):
"""Set the dstSubfolderSpec and dstPath properties from path.
path may be specified in the same notation used for XCHierarchicalElements,
specifically, "$(DIR)/path".
"""
path_tree_match = self.path_tree_re.search(path)
if path_tree_match:
# Everything else needs to be relative to an Xcode variable.
path_tree = path_tree_match.group(1)
relative_path = path_tree_match.group(3)
if path_tree in self.path_tree_to_subfolder:
subfolder = self.path_tree_to_subfolder[path_tree]
if relative_path is None:
relative_path = ''
else:
# The path starts with an unrecognized Xcode variable
# name like $(SRCROOT). Xcode will still handle this
# as an "absolute path" that starts with the variable.
subfolder = 0
relative_path = path
elif path.startswith('/'):
# Special case. Absolute paths are in dstSubfolderSpec 0.
subfolder = 0
relative_path = path[1:]
else:
raise ValueError('Can\'t use path %s in a %s' % \
(path, self.__class__.__name__))
self._properties['dstPath'] = relative_path
self._properties['dstSubfolderSpec'] = subfolder
class PBXBuildRule(XCObject):
_schema = XCObject._schema.copy()
_schema.update({
'compilerSpec': [0, str, 0, 1],
'filePatterns': [0, str, 0, 0],
'fileType': [0, str, 0, 1],
'isEditable': [0, int, 0, 1, 1],
'outputFiles': [1, str, 0, 1, []],
'script': [0, str, 0, 0],
})
def Name(self):
# Not very inspired, but it's what Xcode uses.
return self.__class__.__name__
def Hashables(self):
# super
hashables = XCObject.Hashables(self)
# Use the hashables of the weak objects that this object refers to.
hashables.append(self._properties['fileType'])
if 'filePatterns' in self._properties:
hashables.append(self._properties['filePatterns'])
return hashables
class PBXContainerItemProxy(XCObject):
# When referencing an item in this project file, containerPortal is the
# PBXProject root object of this project file. When referencing an item in
# another project file, containerPortal is a PBXFileReference identifying
# the other project file.
#
# When serving as a proxy to an XCTarget (in this project file or another),
# proxyType is 1. When serving as a proxy to a PBXFileReference (in another
# project file), proxyType is 2. Type 2 is used for references to the
# producs of the other project file's targets.
#
# Xcode is weird about remoteGlobalIDString. Usually, it's printed without
# a comment, indicating that it's tracked internally simply as a string, but
# sometimes it's printed with a comment (usually when the object is initially
# created), indicating that it's tracked as a project file object at least
# sometimes. This module always tracks it as an object, but contains a hack
# to prevent it from printing the comment in the project file output. See
# _XCKVPrint.
_schema = XCObject._schema.copy()
_schema.update({
'containerPortal': [0, XCContainerPortal, 0, 1],
'proxyType': [0, int, 0, 1],
'remoteGlobalIDString': [0, XCRemoteObject, 0, 1],
'remoteInfo': [0, str, 0, 1],
})
def __repr__(self):
props = self._properties
name = '%s.gyp:%s' % (props['containerPortal'].Name(), props['remoteInfo'])
return '<%s %r at 0x%x>' % (self.__class__.__name__, name, id(self))
def Name(self):
# Admittedly not the best name, but it's what Xcode uses.
return self.__class__.__name__
def Hashables(self):
# super
hashables = XCObject.Hashables(self)
# Use the hashables of the weak objects that this object refers to.
hashables.extend(self._properties['containerPortal'].Hashables())
hashables.extend(self._properties['remoteGlobalIDString'].Hashables())
return hashables
class PBXTargetDependency(XCObject):
# The "target" property accepts an XCTarget object, and obviously not
# NoneType. But XCTarget is defined below, so it can't be put into the
# schema yet. The definition of PBXTargetDependency can't be moved below
# XCTarget because XCTarget's own schema references PBXTargetDependency.
# Python doesn't deal well with this circular relationship, and doesn't have
# a real way to do forward declarations. To work around, the type of
# the "target" property is reset below, after XCTarget is defined.
#
# At least one of "name" and "target" is required.
_schema = XCObject._schema.copy()
_schema.update({
'name': [0, str, 0, 0],
'target': [0, None.__class__, 0, 0],
'targetProxy': [0, PBXContainerItemProxy, 1, 1],
})
def __repr__(self):
name = self._properties.get('name') or self._properties['target'].Name()
return '<%s %r at 0x%x>' % (self.__class__.__name__, name, id(self))
def Name(self):
# Admittedly not the best name, but it's what Xcode uses.
return self.__class__.__name__
def Hashables(self):
# super
hashables = XCObject.Hashables(self)
# Use the hashables of the weak objects that this object refers to.
hashables.extend(self._properties['targetProxy'].Hashables())
return hashables
class PBXReferenceProxy(XCFileLikeElement):
_schema = XCFileLikeElement._schema.copy()
_schema.update({
'fileType': [0, str, 0, 1],
'path': [0, str, 0, 1],
'remoteRef': [0, PBXContainerItemProxy, 1, 1],
})
class XCTarget(XCRemoteObject):
# An XCTarget is really just an XCObject, the XCRemoteObject thing is just
# to allow PBXProject to be used in the remoteGlobalIDString property of
# PBXContainerItemProxy.
#
# Setting a "name" property at instantiation may also affect "productName",
# which may in turn affect the "PRODUCT_NAME" build setting in children of
# "buildConfigurationList". See __init__ below.
_schema = XCRemoteObject._schema.copy()
_schema.update({
'buildConfigurationList': [0, XCConfigurationList, 1, 1,
XCConfigurationList()],
'buildPhases': [1, XCBuildPhase, 1, 1, []],
'dependencies': [1, PBXTargetDependency, 1, 1, []],
'name': [0, str, 0, 1],
'productName': [0, str, 0, 1],
})
def __init__(self, properties=None, id=None, parent=None,
force_outdir=None, force_prefix=None, force_extension=None):
# super
XCRemoteObject.__init__(self, properties, id, parent)
# Set up additional defaults not expressed in the schema. If a "name"
# property was supplied, set "productName" if it is not present. Also set
# the "PRODUCT_NAME" build setting in each configuration, but only if
# the setting is not present in any build configuration.
if 'name' in self._properties:
if not 'productName' in self._properties:
self.SetProperty('productName', self._properties['name'])
if 'productName' in self._properties:
if 'buildConfigurationList' in self._properties:
configs = self._properties['buildConfigurationList']
if configs.HasBuildSetting('PRODUCT_NAME') == 0:
configs.SetBuildSetting('PRODUCT_NAME',
self._properties['productName'])
def AddDependency(self, other):
pbxproject = self.PBXProjectAncestor()
other_pbxproject = other.PBXProjectAncestor()
if pbxproject == other_pbxproject:
# Add a dependency to another target in the same project file.
container = PBXContainerItemProxy({'containerPortal': pbxproject,
'proxyType': 1,
'remoteGlobalIDString': other,
'remoteInfo': other.Name()})
dependency = PBXTargetDependency({'target': other,
'targetProxy': container})
self.AppendProperty('dependencies', dependency)
else:
# Add a dependency to a target in a different project file.
other_project_ref = \
pbxproject.AddOrGetProjectReference(other_pbxproject)[1]
container = PBXContainerItemProxy({
'containerPortal': other_project_ref,
'proxyType': 1,
'remoteGlobalIDString': other,
'remoteInfo': other.Name(),
})
dependency = PBXTargetDependency({'name': other.Name(),
'targetProxy': container})
self.AppendProperty('dependencies', dependency)
# Proxy all of these through to the build configuration list.
def ConfigurationNamed(self, name):
return self._properties['buildConfigurationList'].ConfigurationNamed(name)
def DefaultConfiguration(self):
return self._properties['buildConfigurationList'].DefaultConfiguration()
def HasBuildSetting(self, key):
return self._properties['buildConfigurationList'].HasBuildSetting(key)
def GetBuildSetting(self, key):
return self._properties['buildConfigurationList'].GetBuildSetting(key)
def SetBuildSetting(self, key, value):
return self._properties['buildConfigurationList'].SetBuildSetting(key, \
value)
def AppendBuildSetting(self, key, value):
return self._properties['buildConfigurationList'].AppendBuildSetting(key, \
value)
def DelBuildSetting(self, key):
return self._properties['buildConfigurationList'].DelBuildSetting(key)
# Redefine the type of the "target" property. See PBXTargetDependency._schema
# above.
PBXTargetDependency._schema['target'][1] = XCTarget
class PBXNativeTarget(XCTarget):
# buildPhases is overridden in the schema to be able to set defaults.
#
# NOTE: Contrary to most objects, it is advisable to set parent when
# constructing PBXNativeTarget. A parent of an XCTarget must be a PBXProject
# object. A parent reference is required for a PBXNativeTarget during
# construction to be able to set up the target defaults for productReference,
# because a PBXBuildFile object must be created for the target and it must
# be added to the PBXProject's mainGroup hierarchy.
_schema = XCTarget._schema.copy()
_schema.update({
'buildPhases': [1, XCBuildPhase, 1, 1,
[PBXSourcesBuildPhase(), PBXFrameworksBuildPhase()]],
'buildRules': [1, PBXBuildRule, 1, 1, []],
'productReference': [0, PBXFileReference, 0, 1],
'productType': [0, str, 0, 1],
})
# Mapping from Xcode product-types to settings. The settings are:
# filetype : used for explicitFileType in the project file
# prefix : the prefix for the file name
# suffix : the suffix for the file name
_product_filetypes = {
'com.apple.product-type.application': ['wrapper.application',
'', '.app'],
'com.apple.product-type.application.watchapp': ['wrapper.application',
'', '.app'],
'com.apple.product-type.watchkit-extension': ['wrapper.app-extension',
'', '.appex'],
'com.apple.product-type.app-extension': ['wrapper.app-extension',
'', '.appex'],
'com.apple.product-type.bundle': ['wrapper.cfbundle',
'', '.bundle'],
'com.apple.product-type.framework': ['wrapper.framework',
'', '.framework'],
'com.apple.product-type.library.dynamic': ['compiled.mach-o.dylib',
'lib', '.dylib'],
'com.apple.product-type.library.static': ['archive.ar',
'lib', '.a'],
'com.apple.product-type.tool': ['compiled.mach-o.executable',
'', ''],
'com.apple.product-type.bundle.unit-test': ['wrapper.cfbundle',
'', '.xctest'],
'com.googlecode.gyp.xcode.bundle': ['compiled.mach-o.dylib',
'', '.so'],
'com.apple.product-type.kernel-extension': ['wrapper.kext',
'', '.kext'],
}
def __init__(self, properties=None, id=None, parent=None,
force_outdir=None, force_prefix=None, force_extension=None):
# super
XCTarget.__init__(self, properties, id, parent)
if 'productName' in self._properties and \
'productType' in self._properties and \
not 'productReference' in self._properties and \
self._properties['productType'] in self._product_filetypes:
products_group = None
pbxproject = self.PBXProjectAncestor()
if pbxproject != None:
products_group = pbxproject.ProductsGroup()
if products_group != None:
(filetype, prefix, suffix) = \
self._product_filetypes[self._properties['productType']]
# Xcode does not have a distinct type for loadable modules that are
# pure BSD targets (not in a bundle wrapper). GYP allows such modules
# to be specified by setting a target type to loadable_module without
# having mac_bundle set. These are mapped to the pseudo-product type
# com.googlecode.gyp.xcode.bundle.
#
# By picking up this special type and converting it to a dynamic
# library (com.apple.product-type.library.dynamic) with fix-ups,
# single-file loadable modules can be produced.
#
# MACH_O_TYPE is changed to mh_bundle to produce the proper file type
# (as opposed to mh_dylib). In order for linking to succeed,
# DYLIB_CURRENT_VERSION and DYLIB_COMPATIBILITY_VERSION must be
# cleared. They are meaningless for type mh_bundle.
#
# Finally, the .so extension is forcibly applied over the default
# (.dylib), unless another forced extension is already selected.
# .dylib is plainly wrong, and .bundle is used by loadable_modules in
# bundle wrappers (com.apple.product-type.bundle). .so seems an odd
# choice because it's used as the extension on many other systems that
# don't distinguish between linkable shared libraries and non-linkable
# loadable modules, but there's precedent: Python loadable modules on
# Mac OS X use an .so extension.
if self._properties['productType'] == 'com.googlecode.gyp.xcode.bundle':
self._properties['productType'] = \
'com.apple.product-type.library.dynamic'
self.SetBuildSetting('MACH_O_TYPE', 'mh_bundle')
self.SetBuildSetting('DYLIB_CURRENT_VERSION', '')
self.SetBuildSetting('DYLIB_COMPATIBILITY_VERSION', '')
if force_extension is None:
force_extension = suffix[1:]
if self._properties['productType'] == \
'com.apple.product-type-bundle.unit.test':
if force_extension is None:
force_extension = suffix[1:]
if force_extension is not None:
# If it's a wrapper (bundle), set WRAPPER_EXTENSION.
# Extension override.
suffix = '.' + force_extension
if filetype.startswith('wrapper.'):
self.SetBuildSetting('WRAPPER_EXTENSION', force_extension)
else:
self.SetBuildSetting('EXECUTABLE_EXTENSION', force_extension)
if filetype.startswith('compiled.mach-o.executable'):
product_name = self._properties['productName']
product_name += suffix
suffix = ''
self.SetProperty('productName', product_name)
self.SetBuildSetting('PRODUCT_NAME', product_name)
# Xcode handles most prefixes based on the target type, however there
# are exceptions. If a "BSD Dynamic Library" target is added in the
# Xcode UI, Xcode sets EXECUTABLE_PREFIX. This check duplicates that
# behavior.
if force_prefix is not None:
prefix = force_prefix
if filetype.startswith('wrapper.'):
self.SetBuildSetting('WRAPPER_PREFIX', prefix)
else:
self.SetBuildSetting('EXECUTABLE_PREFIX', prefix)
if force_outdir is not None:
self.SetBuildSetting('TARGET_BUILD_DIR', force_outdir)
# TODO(tvl): Remove the below hack.
# http://code.google.com/p/gyp/issues/detail?id=122
# Some targets include the prefix in the target_name. These targets
# really should just add a product_name setting that doesn't include
# the prefix. For example:
# target_name = 'libevent', product_name = 'event'
# This check cleans up for them.
product_name = self._properties['productName']
prefix_len = len(prefix)
if prefix_len and (product_name[:prefix_len] == prefix):
product_name = product_name[prefix_len:]
self.SetProperty('productName', product_name)
self.SetBuildSetting('PRODUCT_NAME', product_name)
ref_props = {
'explicitFileType': filetype,
'includeInIndex': 0,
'path': prefix + product_name + suffix,
'sourceTree': 'BUILT_PRODUCTS_DIR',
}
file_ref = PBXFileReference(ref_props)
products_group.AppendChild(file_ref)
self.SetProperty('productReference', file_ref)
def GetBuildPhaseByType(self, type):
if not 'buildPhases' in self._properties:
return None
the_phase = None
for phase in self._properties['buildPhases']:
if isinstance(phase, type):
# Some phases may be present in multiples in a well-formed project file,
# but phases like PBXSourcesBuildPhase may only be present singly, and
# this function is intended as an aid to GetBuildPhaseByType. Loop
# over the entire list of phases and assert if more than one of the
# desired type is found.
assert the_phase is None
the_phase = phase
return the_phase
def HeadersPhase(self):
headers_phase = self.GetBuildPhaseByType(PBXHeadersBuildPhase)
if headers_phase is None:
headers_phase = PBXHeadersBuildPhase()
# The headers phase should come before the resources, sources, and
# frameworks phases, if any.
insert_at = len(self._properties['buildPhases'])
for index in xrange(0, len(self._properties['buildPhases'])):
phase = self._properties['buildPhases'][index]
if isinstance(phase, PBXResourcesBuildPhase) or \
isinstance(phase, PBXSourcesBuildPhase) or \
isinstance(phase, PBXFrameworksBuildPhase):
insert_at = index
break
self._properties['buildPhases'].insert(insert_at, headers_phase)
headers_phase.parent = self
return headers_phase
def ResourcesPhase(self):
resources_phase = self.GetBuildPhaseByType(PBXResourcesBuildPhase)
if resources_phase is None:
resources_phase = PBXResourcesBuildPhase()
# The resources phase should come before the sources and frameworks
# phases, if any.
insert_at = len(self._properties['buildPhases'])
for index in xrange(0, len(self._properties['buildPhases'])):
phase = self._properties['buildPhases'][index]
if isinstance(phase, PBXSourcesBuildPhase) or \
isinstance(phase, PBXFrameworksBuildPhase):
insert_at = index
break
self._properties['buildPhases'].insert(insert_at, resources_phase)
resources_phase.parent = self
return resources_phase
def SourcesPhase(self):
sources_phase = self.GetBuildPhaseByType(PBXSourcesBuildPhase)
if sources_phase is None:
sources_phase = PBXSourcesBuildPhase()
self.AppendProperty('buildPhases', sources_phase)
return sources_phase
def FrameworksPhase(self):
frameworks_phase = self.GetBuildPhaseByType(PBXFrameworksBuildPhase)
if frameworks_phase is None:
frameworks_phase = PBXFrameworksBuildPhase()
self.AppendProperty('buildPhases', frameworks_phase)
return frameworks_phase
def AddDependency(self, other):
# super
XCTarget.AddDependency(self, other)
static_library_type = 'com.apple.product-type.library.static'
shared_library_type = 'com.apple.product-type.library.dynamic'
framework_type = 'com.apple.product-type.framework'
if isinstance(other, PBXNativeTarget) and \
'productType' in self._properties and \
self._properties['productType'] != static_library_type and \
'productType' in other._properties and \
(other._properties['productType'] == static_library_type or \
((other._properties['productType'] == shared_library_type or \
other._properties['productType'] == framework_type) and \
((not other.HasBuildSetting('MACH_O_TYPE')) or
other.GetBuildSetting('MACH_O_TYPE') != 'mh_bundle'))):
file_ref = other.GetProperty('productReference')
pbxproject = self.PBXProjectAncestor()
other_pbxproject = other.PBXProjectAncestor()
if pbxproject != other_pbxproject:
other_project_product_group = \
pbxproject.AddOrGetProjectReference(other_pbxproject)[0]
file_ref = other_project_product_group.GetChildByRemoteObject(file_ref)
self.FrameworksPhase().AppendProperty('files',
PBXBuildFile({'fileRef': file_ref}))
class PBXAggregateTarget(XCTarget):
pass
class PBXProject(XCContainerPortal):
# A PBXProject is really just an XCObject, the XCContainerPortal thing is
# just to allow PBXProject to be used in the containerPortal property of
# PBXContainerItemProxy.
"""
Attributes:
path: "sample.xcodeproj". TODO(mark) Document me!
_other_pbxprojects: A dictionary, keyed by other PBXProject objects. Each
value is a reference to the dict in the
projectReferences list associated with the keyed
PBXProject.
"""
_schema = XCContainerPortal._schema.copy()
_schema.update({
'attributes': [0, dict, 0, 0],
'buildConfigurationList': [0, XCConfigurationList, 1, 1,
XCConfigurationList()],
'compatibilityVersion': [0, str, 0, 1, 'Xcode 3.2'],
'hasScannedForEncodings': [0, int, 0, 1, 1],
'mainGroup': [0, PBXGroup, 1, 1, PBXGroup()],
'projectDirPath': [0, str, 0, 1, ''],
'projectReferences': [1, dict, 0, 0],
'projectRoot': [0, str, 0, 1, ''],
'targets': [1, XCTarget, 1, 1, []],
})
def __init__(self, properties=None, id=None, parent=None, path=None):
self.path = path
self._other_pbxprojects = {}
# super
return XCContainerPortal.__init__(self, properties, id, parent)
def Name(self):
name = self.path
if name[-10:] == '.xcodeproj':
name = name[:-10]
return posixpath.basename(name)
def Path(self):
return self.path
def Comment(self):
return 'Project object'
def Children(self):
# super
children = XCContainerPortal.Children(self)
# Add children that the schema doesn't know about. Maybe there's a more
# elegant way around this, but this is the only case where we need to own
# objects in a dictionary (that is itself in a list), and three lines for
# a one-off isn't that big a deal.
if 'projectReferences' in self._properties:
for reference in self._properties['projectReferences']:
children.append(reference['ProductGroup'])
return children
def PBXProjectAncestor(self):
return self
def _GroupByName(self, name):
if not 'mainGroup' in self._properties:
self.SetProperty('mainGroup', PBXGroup())
main_group = self._properties['mainGroup']
group = main_group.GetChildByName(name)
if group is None:
group = PBXGroup({'name': name})
main_group.AppendChild(group)
return group
# SourceGroup and ProductsGroup are created by default in Xcode's own
# templates.
def SourceGroup(self):
return self._GroupByName('Source')
def ProductsGroup(self):
return self._GroupByName('Products')
# IntermediatesGroup is used to collect source-like files that are generated
# by rules or script phases and are placed in intermediate directories such
# as DerivedSources.
def IntermediatesGroup(self):
return self._GroupByName('Intermediates')
# FrameworksGroup and ProjectsGroup are top-level groups used to collect
# frameworks and projects.
def FrameworksGroup(self):
return self._GroupByName('Frameworks')
def ProjectsGroup(self):
return self._GroupByName('Projects')
def RootGroupForPath(self, path):
"""Returns a PBXGroup child of this object to which path should be added.
This method is intended to choose between SourceGroup and
IntermediatesGroup on the basis of whether path is present in a source
directory or an intermediates directory. For the purposes of this
determination, any path located within a derived file directory such as
PROJECT_DERIVED_FILE_DIR is treated as being in an intermediates
directory.
The returned value is a two-element tuple. The first element is the
PBXGroup, and the second element specifies whether that group should be
organized hierarchically (True) or as a single flat list (False).
"""
# TODO(mark): make this a class variable and bind to self on call?
# Also, this list is nowhere near exhaustive.
# INTERMEDIATE_DIR and SHARED_INTERMEDIATE_DIR are used by
# gyp.generator.xcode. There should probably be some way for that module
# to push the names in, rather than having to hard-code them here.
source_tree_groups = {
'DERIVED_FILE_DIR': (self.IntermediatesGroup, True),
'INTERMEDIATE_DIR': (self.IntermediatesGroup, True),
'PROJECT_DERIVED_FILE_DIR': (self.IntermediatesGroup, True),
'SHARED_INTERMEDIATE_DIR': (self.IntermediatesGroup, True),
}
(source_tree, path) = SourceTreeAndPathFromPath(path)
if source_tree != None and source_tree in source_tree_groups:
(group_func, hierarchical) = source_tree_groups[source_tree]
group = group_func()
return (group, hierarchical)
# TODO(mark): make additional choices based on file extension.
return (self.SourceGroup(), True)
def AddOrGetFileInRootGroup(self, path):
"""Returns a PBXFileReference corresponding to path in the correct group
according to RootGroupForPath's heuristics.
If an existing PBXFileReference for path exists, it will be returned.
Otherwise, one will be created and returned.
"""
(group, hierarchical) = self.RootGroupForPath(path)
return group.AddOrGetFileByPath(path, hierarchical)
def RootGroupsTakeOverOnlyChildren(self, recurse=False):
"""Calls TakeOverOnlyChild for all groups in the main group."""
for group in self._properties['mainGroup']._properties['children']:
if isinstance(group, PBXGroup):
group.TakeOverOnlyChild(recurse)
def SortGroups(self):
# Sort the children of the mainGroup (like "Source" and "Products")
# according to their defined order.
self._properties['mainGroup']._properties['children'] = \
sorted(self._properties['mainGroup']._properties['children'],
cmp=lambda x,y: x.CompareRootGroup(y))
# Sort everything else by putting group before files, and going
# alphabetically by name within sections of groups and files. SortGroup
# is recursive.
for group in self._properties['mainGroup']._properties['children']:
if not isinstance(group, PBXGroup):
continue
if group.Name() == 'Products':
# The Products group is a special case. Instead of sorting
# alphabetically, sort things in the order of the targets that
# produce the products. To do this, just build up a new list of
# products based on the targets.
products = []
for target in self._properties['targets']:
if not isinstance(target, PBXNativeTarget):
continue
product = target._properties['productReference']
# Make sure that the product is already in the products group.
assert product in group._properties['children']
products.append(product)
# Make sure that this process doesn't miss anything that was already
# in the products group.
assert len(products) == len(group._properties['children'])
group._properties['children'] = products
else:
group.SortGroup()
def AddOrGetProjectReference(self, other_pbxproject):
"""Add a reference to another project file (via PBXProject object) to this
one.
Returns [ProductGroup, ProjectRef]. ProductGroup is a PBXGroup object in
this project file that contains a PBXReferenceProxy object for each
product of each PBXNativeTarget in the other project file. ProjectRef is
a PBXFileReference to the other project file.
If this project file already references the other project file, the
existing ProductGroup and ProjectRef are returned. The ProductGroup will
still be updated if necessary.
"""
if not 'projectReferences' in self._properties:
self._properties['projectReferences'] = []
product_group = None
project_ref = None
if not other_pbxproject in self._other_pbxprojects:
# This project file isn't yet linked to the other one. Establish the
# link.
product_group = PBXGroup({'name': 'Products'})
# ProductGroup is strong.
product_group.parent = self
# There's nothing unique about this PBXGroup, and if left alone, it will
# wind up with the same set of hashables as all other PBXGroup objects
# owned by the projectReferences list. Add the hashables of the
# remote PBXProject that it's related to.
product_group._hashables.extend(other_pbxproject.Hashables())
# The other project reports its path as relative to the same directory
# that this project's path is relative to. The other project's path
# is not necessarily already relative to this project. Figure out the
# pathname that this project needs to use to refer to the other one.
this_path = posixpath.dirname(self.Path())
projectDirPath = self.GetProperty('projectDirPath')
if projectDirPath:
if posixpath.isabs(projectDirPath[0]):
this_path = projectDirPath
else:
this_path = posixpath.join(this_path, projectDirPath)
other_path = gyp.common.RelativePath(other_pbxproject.Path(), this_path)
# ProjectRef is weak (it's owned by the mainGroup hierarchy).
project_ref = PBXFileReference({
'lastKnownFileType': 'wrapper.pb-project',
'path': other_path,
'sourceTree': 'SOURCE_ROOT',
})
self.ProjectsGroup().AppendChild(project_ref)
ref_dict = {'ProductGroup': product_group, 'ProjectRef': project_ref}
self._other_pbxprojects[other_pbxproject] = ref_dict
self.AppendProperty('projectReferences', ref_dict)
# Xcode seems to sort this list case-insensitively
self._properties['projectReferences'] = \
sorted(self._properties['projectReferences'], cmp=lambda x,y:
cmp(x['ProjectRef'].Name().lower(),
y['ProjectRef'].Name().lower()))
else:
# The link already exists. Pull out the relevnt data.
project_ref_dict = self._other_pbxprojects[other_pbxproject]
product_group = project_ref_dict['ProductGroup']
project_ref = project_ref_dict['ProjectRef']
self._SetUpProductReferences(other_pbxproject, product_group, project_ref)
inherit_unique_symroot = self._AllSymrootsUnique(other_pbxproject, False)
targets = other_pbxproject.GetProperty('targets')
if all(self._AllSymrootsUnique(t, inherit_unique_symroot) for t in targets):
dir_path = project_ref._properties['path']
product_group._hashables.extend(dir_path)
return [product_group, project_ref]
def _AllSymrootsUnique(self, target, inherit_unique_symroot):
# Returns True if all configurations have a unique 'SYMROOT' attribute.
# The value of inherit_unique_symroot decides, if a configuration is assumed
# to inherit a unique 'SYMROOT' attribute from its parent, if it doesn't
# define an explicit value for 'SYMROOT'.
symroots = self._DefinedSymroots(target)
for s in self._DefinedSymroots(target):
if (s is not None and not self._IsUniqueSymrootForTarget(s) or
s is None and not inherit_unique_symroot):
return False
return True if symroots else inherit_unique_symroot
def _DefinedSymroots(self, target):
# Returns all values for the 'SYMROOT' attribute defined in all
# configurations for this target. If any configuration doesn't define the
# 'SYMROOT' attribute, None is added to the returned set. If all
# configurations don't define the 'SYMROOT' attribute, an empty set is
# returned.
config_list = target.GetProperty('buildConfigurationList')
symroots = set()
for config in config_list.GetProperty('buildConfigurations'):
setting = config.GetProperty('buildSettings')
if 'SYMROOT' in setting:
symroots.add(setting['SYMROOT'])
else:
symroots.add(None)
if len(symroots) == 1 and None in symroots:
return set()
return symroots
def _IsUniqueSymrootForTarget(self, symroot):
# This method returns True if all configurations in target contain a
# 'SYMROOT' attribute that is unique for the given target. A value is
# unique, if the Xcode macro '$SRCROOT' appears in it in any form.
uniquifier = ['$SRCROOT', '$(SRCROOT)']
if any(x in symroot for x in uniquifier):
return True
return False
def _SetUpProductReferences(self, other_pbxproject, product_group,
project_ref):
# TODO(mark): This only adds references to products in other_pbxproject
# when they don't exist in this pbxproject. Perhaps it should also
# remove references from this pbxproject that are no longer present in
# other_pbxproject. Perhaps it should update various properties if they
# change.
for target in other_pbxproject._properties['targets']:
if not isinstance(target, PBXNativeTarget):
continue
other_fileref = target._properties['productReference']
if product_group.GetChildByRemoteObject(other_fileref) is None:
# Xcode sets remoteInfo to the name of the target and not the name
# of its product, despite this proxy being a reference to the product.
container_item = PBXContainerItemProxy({
'containerPortal': project_ref,
'proxyType': 2,
'remoteGlobalIDString': other_fileref,
'remoteInfo': target.Name()
})
# TODO(mark): Does sourceTree get copied straight over from the other
# project? Can the other project ever have lastKnownFileType here
# instead of explicitFileType? (Use it if so?) Can path ever be
# unset? (I don't think so.) Can other_fileref have name set, and
# does it impact the PBXReferenceProxy if so? These are the questions
# that perhaps will be answered one day.
reference_proxy = PBXReferenceProxy({
'fileType': other_fileref._properties['explicitFileType'],
'path': other_fileref._properties['path'],
'sourceTree': other_fileref._properties['sourceTree'],
'remoteRef': container_item,
})
product_group.AppendChild(reference_proxy)
def SortRemoteProductReferences(self):
# For each remote project file, sort the associated ProductGroup in the
# same order that the targets are sorted in the remote project file. This
# is the sort order used by Xcode.
def CompareProducts(x, y, remote_products):
# x and y are PBXReferenceProxy objects. Go through their associated
# PBXContainerItem to get the remote PBXFileReference, which will be
# present in the remote_products list.
x_remote = x._properties['remoteRef']._properties['remoteGlobalIDString']
y_remote = y._properties['remoteRef']._properties['remoteGlobalIDString']
x_index = remote_products.index(x_remote)
y_index = remote_products.index(y_remote)
# Use the order of each remote PBXFileReference in remote_products to
# determine the sort order.
return cmp(x_index, y_index)
for other_pbxproject, ref_dict in self._other_pbxprojects.iteritems():
# Build up a list of products in the remote project file, ordered the
# same as the targets that produce them.
remote_products = []
for target in other_pbxproject._properties['targets']:
if not isinstance(target, PBXNativeTarget):
continue
remote_products.append(target._properties['productReference'])
# Sort the PBXReferenceProxy children according to the list of remote
# products.
product_group = ref_dict['ProductGroup']
product_group._properties['children'] = sorted(
product_group._properties['children'],
cmp=lambda x, y, rp=remote_products: CompareProducts(x, y, rp))
class XCProjectFile(XCObject):
_schema = XCObject._schema.copy()
_schema.update({
'archiveVersion': [0, int, 0, 1, 1],
'classes': [0, dict, 0, 1, {}],
'objectVersion': [0, int, 0, 1, 46],
'rootObject': [0, PBXProject, 1, 1],
})
def ComputeIDs(self, recursive=True, overwrite=True, hash=None):
# Although XCProjectFile is implemented here as an XCObject, it's not a
# proper object in the Xcode sense, and it certainly doesn't have its own
# ID. Pass through an attempt to update IDs to the real root object.
if recursive:
self._properties['rootObject'].ComputeIDs(recursive, overwrite, hash)
def Print(self, file=sys.stdout):
self.VerifyHasRequiredProperties()
# Add the special "objects" property, which will be caught and handled
# separately during printing. This structure allows a fairly standard
# loop do the normal printing.
self._properties['objects'] = {}
self._XCPrint(file, 0, '// !$*UTF8*$!\n')
if self._should_print_single_line:
self._XCPrint(file, 0, '{ ')
else:
self._XCPrint(file, 0, '{\n')
for property, value in sorted(self._properties.iteritems(),
cmp=lambda x, y: cmp(x, y)):
if property == 'objects':
self._PrintObjects(file)
else:
self._XCKVPrint(file, 1, property, value)
self._XCPrint(file, 0, '}\n')
del self._properties['objects']
def _PrintObjects(self, file):
if self._should_print_single_line:
self._XCPrint(file, 0, 'objects = {')
else:
self._XCPrint(file, 1, 'objects = {\n')
objects_by_class = {}
for object in self.Descendants():
if object == self:
continue
class_name = object.__class__.__name__
if not class_name in objects_by_class:
objects_by_class[class_name] = []
objects_by_class[class_name].append(object)
for class_name in sorted(objects_by_class):
self._XCPrint(file, 0, '\n')
self._XCPrint(file, 0, '/* Begin ' + class_name + ' section */\n')
for object in sorted(objects_by_class[class_name],
cmp=lambda x, y: cmp(x.id, y.id)):
object.Print(file)
self._XCPrint(file, 0, '/* End ' + class_name + ' section */\n')
if self._should_print_single_line:
self._XCPrint(file, 0, '}; ')
else:
self._XCPrint(file, 1, '};\n')
| mit |
hollabaq86/haikuna-matata | env/lib/python2.7/site-packages/nltk/classify/senna.py | 5 | 7011 | # encoding: utf-8
# Natural Language Toolkit: Senna Interface
#
# Copyright (C) 2001-2017 NLTK Project
# Author: Rami Al-Rfou' <[email protected]>
# URL: <http://nltk.org/>
# For license information, see LICENSE.TXT
"""
A general interface to the SENNA pipeline that supports any of the
operations specified in SUPPORTED_OPERATIONS.
Applying multiple operations at once has the speed advantage. For example,
Senna will automatically determine POS tags if you are extracting named
entities. Applying both of the operations will cost only the time of
extracting the named entities.
The SENNA pipeline has a fixed maximum size of the sentences that it can read.
By default it is 1024 token/sentence. If you have larger sentences, changing
the MAX_SENTENCE_SIZE value in SENNA_main.c should be considered and your
system specific binary should be rebuilt. Otherwise this could introduce
misalignment errors.
The input is:
- path to the directory that contains SENNA executables. If the path is incorrect,
Senna will automatically search for executable file specified in SENNA environment variable
- List of the operations needed to be performed.
- (optionally) the encoding of the input data (default:utf-8)
Note: Unit tests for this module can be found in test/unit/test_senna.py
>>> from __future__ import unicode_literals
>>> from nltk.classify import Senna
>>> pipeline = Senna('/usr/share/senna-v3.0', ['pos', 'chk', 'ner'])
>>> sent = 'Dusseldorf is an international business center'.split()
>>> [(token['word'], token['chk'], token['ner'], token['pos']) for token in pipeline.tag(sent)] # doctest: +SKIP
[('Dusseldorf', 'B-NP', 'B-LOC', 'NNP'), ('is', 'B-VP', 'O', 'VBZ'), ('an', 'B-NP', 'O', 'DT'),
('international', 'I-NP', 'O', 'JJ'), ('business', 'I-NP', 'O', 'NN'), ('center', 'I-NP', 'O', 'NN')]
"""
from __future__ import unicode_literals
from os import path, sep, environ
from subprocess import Popen, PIPE
from platform import architecture, system
from nltk.tag.api import TaggerI
from nltk.compat import text_type, python_2_unicode_compatible
_senna_url = 'http://ml.nec-labs.com/senna/'
@python_2_unicode_compatible
class Senna(TaggerI):
SUPPORTED_OPERATIONS = ['pos', 'chk', 'ner']
def __init__(self, senna_path, operations, encoding='utf-8'):
self._encoding = encoding
self._path = path.normpath(senna_path) + sep
# Verifies the existence of the executable on the self._path first
#senna_binary_file_1 = self.executable(self._path)
exe_file_1 = self.executable(self._path)
if not path.isfile(exe_file_1):
# Check for the system environment
if 'SENNA' in environ:
#self._path = path.join(environ['SENNA'],'')
self._path = path.normpath(environ['SENNA']) + sep
exe_file_2 = self.executable(self._path)
if not path.isfile(exe_file_2):
raise OSError("Senna executable expected at %s or %s but not found" % (exe_file_1,exe_file_2))
self.operations = operations
def executable(self, base_path):
"""
The function that determines the system specific binary that should be
used in the pipeline. In case, the system is not known the default senna binary will
be used.
"""
os_name = system()
if os_name == 'Linux':
bits = architecture()[0]
if bits == '64bit':
return path.join(base_path, 'senna-linux64')
return path.join(base_path, 'senna-linux32')
if os_name == 'Windows':
return path.join(base_path, 'senna-win32.exe')
if os_name == 'Darwin':
return path.join(base_path, 'senna-osx')
return path.join(base_path, 'senna')
def _map(self):
"""
A method that calculates the order of the columns that SENNA pipeline
will output the tags into. This depends on the operations being ordered.
"""
_map = {}
i = 1
for operation in Senna.SUPPORTED_OPERATIONS:
if operation in self.operations:
_map[operation] = i
i+= 1
return _map
def tag(self, tokens):
"""
Applies the specified operation(s) on a list of tokens.
"""
return self.tag_sents([tokens])[0]
def tag_sents(self, sentences):
"""
Applies the tag method over a list of sentences. This method will return a
list of dictionaries. Every dictionary will contain a word with its
calculated annotations/tags.
"""
encoding = self._encoding
if not path.isfile(self.executable(self._path)):
raise OSError("Senna executable expected at %s but not found" % self.executable(self._path))
# Build the senna command to run the tagger
_senna_cmd = [self.executable(self._path), '-path', self._path, '-usrtokens', '-iobtags']
_senna_cmd.extend(['-'+op for op in self.operations])
# Serialize the actual sentences to a temporary string
_input = '\n'.join((' '.join(x) for x in sentences))+'\n'
if isinstance(_input, text_type) and encoding:
_input = _input.encode(encoding)
# Run the tagger and get the output
p = Popen(_senna_cmd, stdin=PIPE, stdout=PIPE, stderr=PIPE)
(stdout, stderr) = p.communicate(input=_input)
senna_output = stdout
# Check the return code.
if p.returncode != 0:
raise RuntimeError('Senna command failed! Details: %s' % stderr)
if encoding:
senna_output = stdout.decode(encoding)
# Output the tagged sentences
map_ = self._map()
tagged_sentences = [[]]
sentence_index = 0
token_index = 0
for tagged_word in senna_output.strip().split("\n"):
if not tagged_word:
tagged_sentences.append([])
sentence_index += 1
token_index = 0
continue
tags = tagged_word.split('\t')
result = {}
for tag in map_:
result[tag] = tags[map_[tag]].strip()
try:
result['word'] = sentences[sentence_index][token_index]
except IndexError:
raise IndexError(
"Misalignment error occurred at sentence number %d. Possible reason"
" is that the sentence size exceeded the maximum size. Check the "
"documentation of Senna class for more information."
% sentence_index)
tagged_sentences[-1].append(result)
token_index += 1
return tagged_sentences
# skip doctests if Senna is not installed
def setup_module(module):
from nose import SkipTest
try:
tagger = Senna('/usr/share/senna-v3.0', ['pos', 'chk', 'ner'])
except OSError:
raise SkipTest("Senna executable not found")
| mit |
mpvismer/pyqtgraph | examples/GLScatterPlotItem.py | 28 | 2864 | # -*- coding: utf-8 -*-
"""
Demonstrates use of GLScatterPlotItem with rapidly-updating plots.
"""
## Add path to library (just for examples; you do not need this)
import initExample
from pyqtgraph.Qt import QtCore, QtGui
import pyqtgraph.opengl as gl
import numpy as np
app = QtGui.QApplication([])
w = gl.GLViewWidget()
w.opts['distance'] = 20
w.show()
w.setWindowTitle('pyqtgraph example: GLScatterPlotItem')
g = gl.GLGridItem()
w.addItem(g)
##
## First example is a set of points with pxMode=False
## These demonstrate the ability to have points with real size down to a very small scale
##
pos = np.empty((53, 3))
size = np.empty((53))
color = np.empty((53, 4))
pos[0] = (1,0,0); size[0] = 0.5; color[0] = (1.0, 0.0, 0.0, 0.5)
pos[1] = (0,1,0); size[1] = 0.2; color[1] = (0.0, 0.0, 1.0, 0.5)
pos[2] = (0,0,1); size[2] = 2./3.; color[2] = (0.0, 1.0, 0.0, 0.5)
z = 0.5
d = 6.0
for i in range(3,53):
pos[i] = (0,0,z)
size[i] = 2./d
color[i] = (0.0, 1.0, 0.0, 0.5)
z *= 0.5
d *= 2.0
sp1 = gl.GLScatterPlotItem(pos=pos, size=size, color=color, pxMode=False)
sp1.translate(5,5,0)
w.addItem(sp1)
##
## Second example shows a volume of points with rapidly updating color
## and pxMode=True
##
pos = np.random.random(size=(100000,3))
pos *= [10,-10,10]
pos[0] = (0,0,0)
color = np.ones((pos.shape[0], 4))
d2 = (pos**2).sum(axis=1)**0.5
size = np.random.random(size=pos.shape[0])*10
sp2 = gl.GLScatterPlotItem(pos=pos, color=(1,1,1,1), size=size)
phase = 0.
w.addItem(sp2)
##
## Third example shows a grid of points with rapidly updating position
## and pxMode = False
##
pos3 = np.zeros((100,100,3))
pos3[:,:,:2] = np.mgrid[:100, :100].transpose(1,2,0) * [-0.1,0.1]
pos3 = pos3.reshape(10000,3)
d3 = (pos3**2).sum(axis=1)**0.5
sp3 = gl.GLScatterPlotItem(pos=pos3, color=(1,1,1,.3), size=0.1, pxMode=False)
w.addItem(sp3)
def update():
## update volume colors
global phase, sp2, d2
s = -np.cos(d2*2+phase)
color = np.empty((len(d2),4), dtype=np.float32)
color[:,3] = np.clip(s * 0.1, 0, 1)
color[:,0] = np.clip(s * 3.0, 0, 1)
color[:,1] = np.clip(s * 1.0, 0, 1)
color[:,2] = np.clip(s ** 3, 0, 1)
sp2.setData(color=color)
phase -= 0.1
## update surface positions and colors
global sp3, d3, pos3
z = -np.cos(d3*2+phase)
pos3[:,2] = z
color = np.empty((len(d3),4), dtype=np.float32)
color[:,3] = 0.3
color[:,0] = np.clip(z * 3.0, 0, 1)
color[:,1] = np.clip(z * 1.0, 0, 1)
color[:,2] = np.clip(z ** 3, 0, 1)
sp3.setData(pos=pos3, color=color)
t = QtCore.QTimer()
t.timeout.connect(update)
t.start(50)
## Start Qt event loop unless running in interactive mode.
if __name__ == '__main__':
import sys
if (sys.flags.interactive != 1) or not hasattr(QtCore, 'PYQT_VERSION'):
QtGui.QApplication.instance().exec_()
| mit |
vaygr/ansible | lib/ansible/modules/network/iosxr/iosxr_banner.py | 25 | 8287 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2017, Ansible by Red Hat, inc
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'network'}
DOCUMENTATION = """
---
module: iosxr_banner
version_added: "2.4"
author:
- Trishna Guha (@trishnaguha)
- Kedar Kekan (@kedarX)
short_description: Manage multiline banners on Cisco IOS XR devices
description:
- This module will configure both exec and motd banners on remote device
running Cisco IOS XR. It allows playbooks to add or remove
banner text from the running configuration.
extends_documentation_fragment: iosxr
notes:
- Tested against IOS XRv 6.1.2
options:
banner:
description:
- Specifies the type of banner to configure on remote device.
required: true
choices: ['login', 'motd']
text:
description:
- Banner text to be configured. Accepts multiline string,
without empty lines. Requires I(state=present).
state:
description:
- Existential state of the configuration on the device.
default: present
choices: ['present', 'absent']
"""
EXAMPLES = """
- name: configure the login banner
iosxr_banner:
banner: login
text: |
this is my login banner
that contains a multiline
string
state: present
- name: remove the motd banner
iosxr_banner:
banner: motd
state: absent
- name: Configure banner from file
iosxr_banner:
banner: motd
text: "{{ lookup('file', './config_partial/raw_banner.cfg') }}"
state: present
"""
RETURN = """
commands:
description: The list of configuration mode commands sent to device with transport C(cli)
returned: always (empty list when no commands to send)
type: list
sample:
- banner login
- this is my login banner
- that contains a multiline
- string
xml:
description: NetConf rpc xml sent to device with transport C(netconf)
returned: always (empty list when no xml rpc to send)
type: list
version_added: 2.5
sample:
- '<config xmlns:xc="urn:ietf:params:xml:ns:netconf:base:1.0">
<banners xmlns="http://cisco.com/ns/yang/Cisco-IOS-XR-infra-infra-cfg">
<banner xc:operation="merge">
<banner-name>motd</banner-name>
<banner-text>Ansible banner example</banner-text>
</banner>
</banners>
</config>'
"""
import re
import collections
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.network.iosxr.iosxr import get_config, load_config
from ansible.module_utils.network.iosxr.iosxr import iosxr_argument_spec
from ansible.module_utils.network.iosxr.iosxr import build_xml, is_cliconf
from ansible.module_utils.network.iosxr.iosxr import etree_find, is_netconf
class ConfigBase(object):
def __init__(self, module):
self._module = module
self._result = {'changed': False, 'warnings': []}
self._want = {}
self._have = {}
def map_params_to_obj(self):
text = self._module.params['text']
if text:
text = "{!r}".format(str(text).strip())
self._want.update({
'banner': self._module.params['banner'],
'text': text,
'state': self._module.params['state']
})
class CliConfiguration(ConfigBase):
def __init__(self, module):
super(CliConfiguration, self).__init__(module)
def map_obj_to_commands(self):
commands = list()
state = self._module.params['state']
if state == 'absent':
if self._have.get('state') != 'absent' and ('text' in self._have.keys() and self._have['text']):
commands.append('no banner {!s}'.format(self._module.params['banner']))
elif state == 'present':
if (self._want['text'] and
self._want['text'].encode().decode('unicode_escape').strip("'") != self._have.get('text')):
banner_cmd = 'banner {!s} '.format(self._module.params['banner'])
banner_cmd += self._want['text'].strip()
commands.append(banner_cmd)
self._result['commands'] = commands
if commands:
commit = not self._module.check_mode
diff = load_config(self._module, commands, commit=commit)
if diff:
self._result['diff'] = dict(prepared=diff)
self._result['changed'] = True
def map_config_to_obj(self):
cli_filter = 'banner {!s}'.format(self._module.params['banner'])
output = get_config(self._module, config_filter=cli_filter)
match = re.search(r'banner (\S+) (.*)', output, re.DOTALL)
if match:
text = match.group(2).strip("'")
else:
text = None
obj = {'banner': self._module.params['banner'], 'state': 'absent'}
if output:
obj['text'] = text
obj['state'] = 'present'
self._have.update(obj)
def run(self):
self.map_params_to_obj()
self.map_config_to_obj()
self.map_obj_to_commands()
return self._result
class NCConfiguration(ConfigBase):
def __init__(self, module):
super(NCConfiguration, self).__init__(module)
self._banners_meta = collections.OrderedDict()
self._banners_meta.update([
('banner', {'xpath': 'banners/banner', 'tag': True, 'attrib': "operation"}),
('a:banner', {'xpath': 'banner/banner-name'}),
('a:text', {'xpath': 'banner/banner-text', 'operation': 'edit'})
])
def map_obj_to_xml_rpc(self):
state = self._module.params['state']
_get_filter = build_xml('banners', xmap=self._banners_meta, params=self._module.params, opcode="filter")
running = get_config(self._module, source='running', config_filter=_get_filter)
banner_name = None
banner_text = None
if etree_find(running, 'banner-text') is not None:
banner_name = etree_find(running, 'banner-name').text
banner_text = etree_find(running, 'banner-text').text
opcode = None
if state == 'absent' and banner_name == self._module.params['banner'] and len(banner_text):
opcode = "delete"
elif state == 'present':
opcode = 'merge'
self._result['xml'] = []
if opcode:
_edit_filter = build_xml('banners', xmap=self._banners_meta, params=self._module.params, opcode=opcode)
if _edit_filter is not None:
commit = not self._module.check_mode
diff = load_config(self._module, _edit_filter, commit=commit, running=running, nc_get_filter=_get_filter)
if diff:
self._result['xml'] = _edit_filter
if self._module._diff:
self._result['diff'] = dict(prepared=diff)
self._result['changed'] = True
def run(self):
self.map_params_to_obj()
self.map_obj_to_xml_rpc()
return self._result
def main():
""" main entry point for module execution
"""
argument_spec = dict(
banner=dict(required=True, choices=['login', 'motd']),
text=dict(),
state=dict(default='present', choices=['present', 'absent'])
)
argument_spec.update(iosxr_argument_spec)
required_if = [('state', 'present', ('text',))]
module = AnsibleModule(argument_spec=argument_spec,
required_if=required_if,
supports_check_mode=True)
config_object = None
if is_cliconf(module):
module.deprecate(msg="cli support for 'iosxr_banner' is deprecated. Use transport netconf instead",
version="4 releases from v2.5")
config_object = CliConfiguration(module)
elif is_netconf(module):
config_object = NCConfiguration(module)
result = None
if config_object is not None:
result = config_object.run()
module.exit_json(**result)
if __name__ == '__main__':
main()
| gpl-3.0 |
jdarpinian/web-ui-skeleton | files-to-c-arrays.py | 2 | 2181 | #!/usr/bin/env python
# Copyright 2013 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
if len(sys.argv) < 3:
print 'Usage: ' + sys.argv[0] + 'output_file input_file1 input_file2 ... input_fileN'
print
print 'Generates a .c file containing all of the input files as static'
print 'character arrays, along with a function to retrieve them.'
print
print 'const char *get_file(const char *path, size_t *out_size)'
exit(1)
def chunk(list, n):
"""Split a list into size n chunks (the last chunk may be shorter)."""
return (list[i : i + n] for i in range(0, len(list), n))
filesizes = []
filepaths = []
filearrays = []
for filepath in sys.argv[2:]:
filepaths.append(filepath.replace('\\', '/').lstrip('./'))
file = open(filepath, 'rb').read()
filesizes.append(len(file))
escapedfile = '\\x' + '\\x'.join(chunk(file.encode('hex'), 2))
filearrays.append('"\n "'.join(chunk(escapedfile, 76)))
template = """#include <stdint.h>
#include <string.h>
static const char *file_paths[] = {"%s"};
static const size_t file_sizes[] = {%s};
static const int num_files = %d;
static const char *files[] = {
"%s"
};
const char *get_file(const char *path, size_t *out_size) {
for (int i = 0; i < num_files; i++) {
if (strcmp(file_paths[i], path) == 0) {
*out_size = file_sizes[i];
return files[i];
}
}
return NULL;
}
"""
output = open(sys.argv[1], 'w')
output.write(template % ('", "'.join(filepaths),
', '.join(str(x) for x in filesizes),
len(filepaths),
'",\n "'.join(filearrays)))
| apache-2.0 |
iulian787/spack | lib/spack/external/ruamel/yaml/error.py | 29 | 2752 | # coding: utf-8
from __future__ import absolute_import
__all__ = ['Mark', 'YAMLError', 'MarkedYAMLError']
try:
from .compat import utf8
except (ImportError, ValueError): # for Jython
from ruamel.yaml.compat import utf8
class Mark(object):
def __init__(self, name, index, line, column, buffer, pointer):
self.name = name
self.index = index
self.line = line
self.column = column
self.buffer = buffer
self.pointer = pointer
def get_snippet(self, indent=4, max_length=75):
if self.buffer is None:
return None
head = ''
start = self.pointer
while (start > 0 and
self.buffer[start-1] not in u'\0\r\n\x85\u2028\u2029'):
start -= 1
if self.pointer-start > max_length/2-1:
head = ' ... '
start += 5
break
tail = ''
end = self.pointer
while (end < len(self.buffer) and
self.buffer[end] not in u'\0\r\n\x85\u2028\u2029'):
end += 1
if end-self.pointer > max_length/2-1:
tail = ' ... '
end -= 5
break
snippet = utf8(self.buffer[start:end])
return ' '*indent + head + snippet + tail + '\n' \
+ ' '*(indent+self.pointer-start+len(head)) + '^'
def __str__(self):
snippet = self.get_snippet()
where = " in \"%s\", line %d, column %d" \
% (self.name, self.line+1, self.column+1)
if snippet is not None:
where += ":\n"+snippet
return where
class YAMLError(Exception):
pass
class MarkedYAMLError(YAMLError):
def __init__(self, context=None, context_mark=None,
problem=None, problem_mark=None, note=None):
self.context = context
self.context_mark = context_mark
self.problem = problem
self.problem_mark = problem_mark
self.note = note
def __str__(self):
lines = []
if self.context is not None:
lines.append(self.context)
if self.context_mark is not None \
and (self.problem is None or self.problem_mark is None or
self.context_mark.name != self.problem_mark.name or
self.context_mark.line != self.problem_mark.line or
self.context_mark.column != self.problem_mark.column):
lines.append(str(self.context_mark))
if self.problem is not None:
lines.append(self.problem)
if self.problem_mark is not None:
lines.append(str(self.problem_mark))
if self.note is not None:
lines.append(self.note)
return '\n'.join(lines)
| lgpl-2.1 |
google-code/arisgames | zxing-master/cpp/scons/scons-local-2.0.0.final.0/SCons/compat/_scons_collections.py | 34 | 1869 | #
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__doc__ = """
collections compatibility module for older (pre-2.4) Python versions
This does not not NOT (repeat, *NOT*) provide complete collections
functionality. It only wraps the portions of collections functionality
used by SCons, in an interface that looks enough like collections for
our purposes.
"""
__revision__ = "src/engine/SCons/compat/_scons_collections.py 5023 2010/06/14 22:05:46 scons"
# Use exec to hide old names from fixers.
exec("""if True:
from UserDict import UserDict
from UserList import UserList
from UserString import UserString""")
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| mit |
kelle/astropy | astropy/io/ascii/tests/test_read.py | 1 | 47925 | # -*- coding: utf-8 -*-
# Licensed under a 3-clause BSD style license - see LICENSE.rst
# TEST_UNICODE_LITERALS
import re
from io import BytesIO, open
from collections import OrderedDict
import locale
import platform
import pytest
import numpy as np
from ....extern import six # noqa
from ....extern.six.moves import zip, cStringIO as StringIO
from ... import ascii
from ....table import Table
from ....units import Unit
from .common import (raises, assert_equal, assert_almost_equal,
assert_true, setup_function, teardown_function)
from .. import core
from ..ui import _probably_html, get_read_trace
try:
import bz2 # pylint: disable=W0611
except ImportError:
HAS_BZ2 = False
else:
HAS_BZ2 = True
try:
import pathlib
except ImportError:
HAS_PATHLIB = False
else:
HAS_PATHLIB = True
@pytest.mark.parametrize('fast_reader', [True, False, {'use_fast_converter': False},
{'use_fast_converter': True}, 'force'])
def test_convert_overflow(fast_reader):
"""
Test reading an extremely large integer, which falls through to
string due to an overflow error (#2234). The C parsers used to
return inf (kind 'f') for this.
Kind should be 'S' in Python2, 'U' in Python3.
"""
expected_kind = ('S', 'U')
dat = ascii.read(['a', '1' * 10000], format='basic',
fast_reader=fast_reader, guess=False)
assert dat['a'].dtype.kind in expected_kind
def test_guess_with_names_arg():
"""
Make sure reading a table with guess=True gives the expected result when
the names arg is specified.
"""
# This is a NoHeader format table and so `names` should replace
# the default col0, col1 names. It fails as a Basic format
# table when guessing because the column names would be '1', '2'.
dat = ascii.read(['1,2', '3,4'], names=('a', 'b'))
assert len(dat) == 2
assert dat.colnames == ['a', 'b']
# This is a Basic format table and the first row
# gives the column names 'c', 'd', which get replaced by 'a', 'b'
dat = ascii.read(['c,d', '3,4'], names=('a', 'b'))
assert len(dat) == 1
assert dat.colnames == ['a', 'b']
# This is also a Basic format table and the first row
# gives the column names 'c', 'd', which get replaced by 'a', 'b'
dat = ascii.read(['c d', 'e f'], names=('a', 'b'))
assert len(dat) == 1
assert dat.colnames == ['a', 'b']
def test_guess_with_format_arg():
"""
When the format or Reader is explicitly given then disable the
strict column name checking in guessing.
"""
dat = ascii.read(['1,2', '3,4'], format='basic')
assert len(dat) == 1
assert dat.colnames == ['1', '2']
dat = ascii.read(['1,2', '3,4'], names=('a', 'b'), format='basic')
assert len(dat) == 1
assert dat.colnames == ['a', 'b']
dat = ascii.read(['1,2', '3,4'], Reader=ascii.Basic)
assert len(dat) == 1
assert dat.colnames == ['1', '2']
dat = ascii.read(['1,2', '3,4'], names=('a', 'b'), Reader=ascii.Basic)
assert len(dat) == 1
assert dat.colnames == ['a', 'b']
# For good measure check the same in the unified I/O interface
dat = Table.read(['1,2', '3,4'], format='ascii.basic')
assert len(dat) == 1
assert dat.colnames == ['1', '2']
dat = Table.read(['1,2', '3,4'], format='ascii.basic', names=('a', 'b'))
assert len(dat) == 1
assert dat.colnames == ['a', 'b']
@pytest.mark.parametrize('fast_reader', [True, False, 'force'])
def test_read_with_names_arg(fast_reader):
"""
Test that a bad value of `names` raises an exception.
"""
with pytest.raises(ValueError):
dat = ascii.read(['c d', 'e f'], names=('a', ), guess=False, fast_reader=fast_reader)
@pytest.mark.parametrize('fast_reader', [True, False, 'force'])
def test_read_all_files(fast_reader):
for testfile in get_testfiles():
if testfile.get('skip'):
print('\n\n******** SKIPPING {}'.format(testfile['name']))
continue
print('\n\n******** READING {}'.format(testfile['name']))
for guess in (True, False):
test_opts = testfile['opts'].copy()
if 'guess' not in test_opts:
test_opts['guess'] = guess
if 'Reader' in test_opts and 'fast_{0}'.format(test_opts['Reader']._format_name) \
in core.FAST_CLASSES: # has fast version
if 'Inputter' not in test_opts: # fast reader doesn't allow this
test_opts['fast_reader'] = fast_reader
table = ascii.read(testfile['name'], **test_opts)
assert_equal(table.dtype.names, testfile['cols'])
for colname in table.dtype.names:
assert_equal(len(table[colname]), testfile['nrows'])
@pytest.mark.parametrize('fast_reader', [True, False, 'force'])
def test_read_all_files_via_table(fast_reader):
for testfile in get_testfiles():
if testfile.get('skip'):
print('\n\n******** SKIPPING {}'.format(testfile['name']))
continue
print('\n\n******** READING {}'.format(testfile['name']))
for guess in (True, False):
test_opts = testfile['opts'].copy()
if 'guess' not in test_opts:
test_opts['guess'] = guess
if 'Reader' in test_opts:
format = 'ascii.{0}'.format(test_opts['Reader']._format_name)
del test_opts['Reader']
else:
format = 'ascii'
if 'fast_{0}'.format(format) in core.FAST_CLASSES:
test_opts['fast_reader'] = fast_reader
table = Table.read(testfile['name'], format=format, **test_opts)
assert_equal(table.dtype.names, testfile['cols'])
for colname in table.dtype.names:
assert_equal(len(table[colname]), testfile['nrows'])
def test_guess_all_files():
for testfile in get_testfiles():
if testfile.get('skip'):
print('\n\n******** SKIPPING {}'.format(testfile['name']))
continue
if not testfile['opts'].get('guess', True):
continue
print('\n\n******** READING {}'.format(testfile['name']))
for filter_read_opts in (['Reader', 'delimiter', 'quotechar'], []):
# Copy read options except for those in filter_read_opts
guess_opts = dict((k, v) for k, v in testfile['opts'].items()
if k not in filter_read_opts)
table = ascii.read(testfile['name'], guess=True, **guess_opts)
assert_equal(table.dtype.names, testfile['cols'])
for colname in table.dtype.names:
assert_equal(len(table[colname]), testfile['nrows'])
def test_daophot_indef():
"""Test that INDEF is correctly interpreted as a missing value"""
table = ascii.read('t/daophot2.dat', Reader=ascii.Daophot)
for colname in table.colnames:
# Three columns have all INDEF values and are masked
mask_value = colname in ('OTIME', 'MAG', 'MERR', 'XAIRMASS')
assert np.all(table[colname].mask == mask_value)
def test_daophot_types():
"""
Test specific data types which are different from what would be
inferred automatically based only data values. DAOphot reader uses
the header information to assign types.
"""
table = ascii.read('t/daophot2.dat', Reader=ascii.Daophot)
assert table['LID'].dtype.char in 'fd' # float or double
assert table['MAG'].dtype.char in 'fd' # even without any data values
assert table['PIER'].dtype.char in 'US' # string (data values are consistent with int)
assert table['ID'].dtype.char in 'il' # int or long
def test_daophot_header_keywords():
table = ascii.read('t/daophot.dat', Reader=ascii.Daophot)
expected_keywords = (('NSTARFILE', 'test.nst.1', 'filename', '%-23s'),
('REJFILE', '"hello world"', 'filename', '%-23s'),
('SCALE', '1.', 'units/pix', '%-23.7g'),)
keywords = table.meta['keywords'] # Ordered dict of keyword structures
for name, value, units, format_ in expected_keywords:
keyword = keywords[name]
assert_equal(keyword['value'], value)
assert_equal(keyword['units'], units)
assert_equal(keyword['format'], format_)
def test_daophot_multiple_aperture():
table = ascii.read('t/daophot3.dat', Reader=ascii.Daophot)
assert 'MAG5' in table.colnames # MAG5 is one of the newly created column names
assert table['MAG5'][4] == 22.13 # A sample entry in daophot3.dat file
assert table['MERR2'][0] == 1.171
assert np.all(table['RAPERT5'] == 23.3) # assert all the 5th apertures are same 23.3
def test_daophot_multiple_aperture2():
table = ascii.read('t/daophot4.dat', Reader=ascii.Daophot)
assert 'MAG15' in table.colnames # MAG15 is one of the newly created column name
assert table['MAG15'][1] == -7.573 # A sample entry in daophot4.dat file
assert table['MERR2'][0] == 0.049
assert np.all(table['RAPERT5'] == 5.) # assert all the 5th apertures are same 5.0
@pytest.mark.parametrize('fast_reader', [True, False, 'force'])
def test_empty_table_no_header(fast_reader):
with pytest.raises(ascii.InconsistentTableError):
table = ascii.read('t/no_data_without_header.dat', Reader=ascii.NoHeader,
guess=False, fast_reader=fast_reader)
@pytest.mark.parametrize('fast_reader', [True, False, 'force'])
def test_wrong_quote(fast_reader):
with pytest.raises(ascii.InconsistentTableError):
table = ascii.read('t/simple.txt', guess=False, fast_reader=fast_reader)
@pytest.mark.parametrize('fast_reader', [True, False, 'force'])
def test_extra_data_col(fast_reader):
with pytest.raises(ascii.InconsistentTableError):
table = ascii.read('t/bad.txt', fast_reader=fast_reader)
@pytest.mark.parametrize('fast_reader', [True, False, 'force'])
def test_extra_data_col2(fast_reader):
with pytest.raises(ascii.InconsistentTableError):
table = ascii.read('t/simple5.txt', delimiter='|', fast_reader=fast_reader)
@raises(IOError)
def test_missing_file():
table = ascii.read('does_not_exist')
@pytest.mark.parametrize('fast_reader', [True, False, 'force'])
def test_set_names(fast_reader):
names = ('c1', 'c2', 'c3', 'c4', 'c5', 'c6')
data = ascii.read('t/simple3.txt', names=names, delimiter='|',
fast_reader=fast_reader)
assert_equal(data.dtype.names, names)
@pytest.mark.parametrize('fast_reader', [True, False, 'force'])
def test_set_include_names(fast_reader):
names = ('c1', 'c2', 'c3', 'c4', 'c5', 'c6')
include_names = ('c1', 'c3')
data = ascii.read('t/simple3.txt', names=names, include_names=include_names,
delimiter='|', fast_reader=fast_reader)
assert_equal(data.dtype.names, include_names)
@pytest.mark.parametrize('fast_reader', [True, False, 'force'])
def test_set_exclude_names(fast_reader):
exclude_names = ('Y', 'object')
data = ascii.read('t/simple3.txt', exclude_names=exclude_names, delimiter='|',
fast_reader=fast_reader)
assert_equal(data.dtype.names, ('obsid', 'redshift', 'X', 'rad'))
def test_include_names_daophot():
include_names = ('ID', 'MAG', 'PIER')
data = ascii.read('t/daophot.dat', include_names=include_names)
assert_equal(data.dtype.names, include_names)
def test_exclude_names_daophot():
exclude_names = ('ID', 'YCENTER', 'MERR', 'NITER', 'CHI', 'PERROR')
data = ascii.read('t/daophot.dat', exclude_names=exclude_names)
assert_equal(data.dtype.names, ('XCENTER', 'MAG', 'MSKY', 'SHARPNESS', 'PIER'))
def test_custom_process_lines():
def process_lines(lines):
bars_at_ends = re.compile(r'^\| | \|$', re.VERBOSE)
striplines = (x.strip() for x in lines)
return [bars_at_ends.sub('', x) for x in striplines if len(x) > 0]
reader = ascii.get_reader(delimiter='|')
reader.inputter.process_lines = process_lines
data = reader.read('t/bars_at_ends.txt')
assert_equal(data.dtype.names, ('obsid', 'redshift', 'X', 'Y', 'object', 'rad'))
assert_equal(len(data), 3)
def test_custom_process_line():
def process_line(line):
line_out = re.sub(r'^\|\s*', '', line.strip())
return line_out
reader = ascii.get_reader(data_start=2, delimiter='|')
reader.header.splitter.process_line = process_line
reader.data.splitter.process_line = process_line
data = reader.read('t/nls1_stackinfo.dbout')
cols = get_testfiles('t/nls1_stackinfo.dbout')['cols']
assert_equal(data.dtype.names, cols[1:])
def test_custom_splitters():
reader = ascii.get_reader()
reader.header.splitter = ascii.BaseSplitter()
reader.data.splitter = ascii.BaseSplitter()
f = 't/test4.dat'
data = reader.read(f)
testfile = get_testfiles(f)
assert_equal(data.dtype.names, testfile['cols'])
assert_equal(len(data), testfile['nrows'])
assert_almost_equal(data.field('zabs1.nh')[2], 0.0839710433091)
assert_almost_equal(data.field('p1.gamma')[2], 1.25997502704)
assert_almost_equal(data.field('p1.ampl')[2], 0.000696444029148)
assert_equal(data.field('statname')[2], 'chi2modvar')
assert_almost_equal(data.field('statval')[2], 497.56468441)
def test_start_end():
data = ascii.read('t/test5.dat', header_start=1, data_start=3, data_end=-5)
assert_equal(len(data), 13)
assert_equal(data.field('statname')[0], 'chi2xspecvar')
assert_equal(data.field('statname')[-1], 'chi2gehrels')
def test_set_converters():
converters = {'zabs1.nh': [ascii.convert_numpy('int32'),
ascii.convert_numpy('float32')],
'p1.gamma': [ascii.convert_numpy('str')]
}
data = ascii.read('t/test4.dat', converters=converters)
assert_equal(str(data['zabs1.nh'].dtype), 'float32')
assert_equal(data['p1.gamma'][0], '1.26764500000')
@pytest.mark.parametrize('fast_reader', [True, False, 'force'])
def test_from_string(fast_reader):
f = 't/simple.txt'
with open(f) as fd:
table = fd.read()
testfile = get_testfiles(f)
data = ascii.read(table, fast_reader=fast_reader, **testfile['opts'])
assert_equal(data.dtype.names, testfile['cols'])
assert_equal(len(data), testfile['nrows'])
@pytest.mark.parametrize('fast_reader', [True, False, 'force'])
def test_from_filelike(fast_reader):
f = 't/simple.txt'
testfile = get_testfiles(f)
with open(f, 'rb') as fd:
data = ascii.read(fd, fast_reader=fast_reader, **testfile['opts'])
assert_equal(data.dtype.names, testfile['cols'])
assert_equal(len(data), testfile['nrows'])
@pytest.mark.parametrize('fast_reader', [True, False, 'force'])
def test_from_lines(fast_reader):
f = 't/simple.txt'
with open(f) as fd:
table = fd.readlines()
testfile = get_testfiles(f)
data = ascii.read(table, fast_reader=fast_reader, **testfile['opts'])
assert_equal(data.dtype.names, testfile['cols'])
assert_equal(len(data), testfile['nrows'])
def test_comment_lines():
table = ascii.get_reader(Reader=ascii.Rdb)
data = table.read('t/apostrophe.rdb')
assert_equal(table.comment_lines, ['# first comment', ' # second comment'])
assert_equal(data.meta['comments'], ['first comment', 'second comment'])
@pytest.mark.parametrize('fast_reader', [True, False, 'force'])
def test_fill_values(fast_reader):
f = 't/fill_values.txt'
testfile = get_testfiles(f)
data = ascii.read(f, fill_values=('a', '1'), fast_reader=fast_reader,
**testfile['opts'])
assert_true((data['a'].mask == [False, True]).all())
assert_true((data['a'] == [1, 1]).all())
assert_true((data['b'].mask == [False, True]).all())
assert_true((data['b'] == [2, 1]).all())
@pytest.mark.parametrize('fast_reader', [True, False, 'force'])
def test_fill_values_col(fast_reader):
f = 't/fill_values.txt'
testfile = get_testfiles(f)
data = ascii.read(f, fill_values=('a', '1', 'b'), fast_reader=fast_reader,
**testfile['opts'])
check_fill_values(data)
@pytest.mark.parametrize('fast_reader', [True, False, 'force'])
def test_fill_values_include_names(fast_reader):
f = 't/fill_values.txt'
testfile = get_testfiles(f)
data = ascii.read(f, fill_values=('a', '1'), fast_reader=fast_reader,
fill_include_names = ['b'], **testfile['opts'])
check_fill_values(data)
@pytest.mark.parametrize('fast_reader', [True, False, 'force'])
def test_fill_values_exclude_names(fast_reader):
f = 't/fill_values.txt'
testfile = get_testfiles(f)
data = ascii.read(f, fill_values=('a', '1'), fast_reader=fast_reader,
fill_exclude_names = ['a'], **testfile['opts'])
check_fill_values(data)
def check_fill_values(data):
"""compare array column by column with expectation """
assert_true((data['a'].mask == [False, False]).all())
assert_true((data['a'] == ['1', 'a']).all())
assert_true((data['b'].mask == [False, True]).all())
# Check that masked value is "do not care" in comparison
assert_true((data['b'] == [2, -999]).all())
data['b'].mask = False # explicitly unmask for comparison
assert_true((data['b'] == [2, 1]).all())
@pytest.mark.parametrize('fast_reader', [True, False, 'force'])
def test_fill_values_list(fast_reader):
f = 't/fill_values.txt'
testfile = get_testfiles(f)
data = ascii.read(f, fill_values=[('a', '42'), ('1', '42', 'a')],
fast_reader=fast_reader, **testfile['opts'])
data['a'].mask = False # explicitly unmask for comparison
assert_true((data['a'] == [42, 42]).all())
def test_masking_Cds():
f = 't/cds.dat'
testfile = get_testfiles(f)
data = ascii.read(f,
**testfile['opts'])
assert_true(data['AK'].mask[0])
assert_true(not data['Fit'].mask[0])
def test_null_Ipac():
f = 't/ipac.dat'
testfile = get_testfiles(f)
data = ascii.read(f, **testfile['opts'])
mask = np.array([(True, False, True, False, True),
(False, False, False, False, False)],
dtype=[(str('ra'), '|b1'),
(str('dec'), '|b1'),
(str('sai'), '|b1'),
(str('v2'), '|b1'),
(str('sptype'), '|b1')])
assert np.all(data.mask == mask)
def test_Ipac_meta():
keywords = OrderedDict((('intval', 1),
('floatval', 2.3e3),
('date', "Wed Sp 20 09:48:36 1995"),
('key_continue', 'IPAC keywords can continue across lines')))
comments = ['This is an example of a valid comment']
f = 't/ipac.dat'
testfile = get_testfiles(f)
data = ascii.read(f, **testfile['opts'])
assert data.meta['keywords'].keys() == keywords.keys()
for data_kv, kv in zip(data.meta['keywords'].values(), keywords.values()):
assert data_kv['value'] == kv
assert data.meta['comments'] == comments
def test_set_guess_kwarg():
"""Read a file using guess with one of the typical guess_kwargs explicitly set."""
data = ascii.read('t/space_delim_no_header.dat',
delimiter=',', guess=True)
assert(data.dtype.names == ('1 3.4 hello',))
assert(len(data) == 1)
@pytest.mark.parametrize('fast_reader', [True, False, 'force'])
def test_read_rdb_wrong_type(fast_reader):
"""Read RDB data with inconstent data type (except failure)"""
table = """col1\tcol2
N\tN
1\tHello"""
with pytest.raises(ValueError):
ascii.read(table, Reader=ascii.Rdb, fast_reader=fast_reader)
@pytest.mark.parametrize('fast_reader', [True, False, 'force'])
def test_default_missing(fast_reader):
"""Read a table with empty values and ensure that corresponding entries are masked"""
table = '\n'.join(['a,b,c,d',
'1,3,,',
'2, , 4.0 , ss '])
dat = ascii.read(table, fast_reader=fast_reader)
assert dat.masked is True
assert dat.pformat() == [' a b c d ',
'--- --- --- ---',
' 1 3 -- --',
' 2 -- 4.0 ss']
# Single row table with a single missing element
table = """ a \n "" """
dat = ascii.read(table, fast_reader=fast_reader)
assert dat.pformat() == [' a ',
'---',
' --']
assert dat['a'].dtype.kind == 'i'
# Same test with a fixed width reader
table = '\n'.join([' a b c d ',
'--- --- --- ---',
' 1 3 ',
' 2 4.0 ss'])
dat = ascii.read(table, Reader=ascii.FixedWidthTwoLine)
assert dat.masked is True
assert dat.pformat() == [' a b c d ',
'--- --- --- ---',
' 1 3 -- --',
' 2 -- 4.0 ss']
dat = ascii.read(table, Reader=ascii.FixedWidthTwoLine, fill_values=None)
assert dat.masked is False
assert dat.pformat() == [' a b c d ',
'--- --- --- ---',
' 1 3 ',
' 2 4.0 ss']
dat = ascii.read(table, Reader=ascii.FixedWidthTwoLine, fill_values=[])
assert dat.masked is False
assert dat.pformat() == [' a b c d ',
'--- --- --- ---',
' 1 3 ',
' 2 4.0 ss']
def get_testfiles(name=None):
"""Set up information about the columns, number of rows, and reader params to
read a bunch of test files and verify columns and number of rows."""
testfiles = [
{'cols': ('agasc_id', 'n_noids', 'n_obs'),
'name': 't/apostrophe.rdb',
'nrows': 2,
'opts': {'Reader': ascii.Rdb}},
{'cols': ('agasc_id', 'n_noids', 'n_obs'),
'name': 't/apostrophe.tab',
'nrows': 2,
'opts': {'Reader': ascii.Tab}},
{'cols': ('Index',
'RAh',
'RAm',
'RAs',
'DE-',
'DEd',
'DEm',
'DEs',
'Match',
'Class',
'AK',
'Fit'),
'name': 't/cds.dat',
'nrows': 1,
'opts': {'Reader': ascii.Cds}},
# Test malformed CDS file (issues #2241 #467)
{'cols': ('Index',
'RAh',
'RAm',
'RAs',
'DE-',
'DEd',
'DEm',
'DEs',
'Match',
'Class',
'AK',
'Fit'),
'name': 't/cds_malformed.dat',
'nrows': 1,
'opts': {'Reader': ascii.Cds, 'data_start': 'guess'}},
{'cols': ('a', 'b', 'c'),
'name': 't/commented_header.dat',
'nrows': 2,
'opts': {'Reader': ascii.CommentedHeader}},
{'cols': ('a', 'b', 'c'),
'name': 't/commented_header2.dat',
'nrows': 2,
'opts': {'Reader': ascii.CommentedHeader, 'header_start': -1}},
{'cols': ('col1', 'col2', 'col3', 'col4', 'col5'),
'name': 't/continuation.dat',
'nrows': 2,
'opts': {'Inputter': ascii.ContinuationLinesInputter,
'Reader': ascii.NoHeader}},
{'cols': ('ID',
'XCENTER',
'YCENTER',
'MAG',
'MERR',
'MSKY',
'NITER',
'SHARPNESS',
'CHI',
'PIER',
'PERROR'),
'name': 't/daophot.dat',
'nrows': 2,
'opts': {'Reader': ascii.Daophot}},
{'cols': ('NUMBER',
'FLUX_ISO',
'FLUXERR_ISO',
'VALU-ES',
'VALU-ES_1',
'FLAG'),
'name': 't/sextractor.dat',
'nrows': 3,
'opts': {'Reader': ascii.SExtractor}},
{'cols': ('ra', 'dec', 'sai', 'v2', 'sptype'),
'name': 't/ipac.dat',
'nrows': 2,
'opts': {'Reader': ascii.Ipac}},
{'cols': ('col0',
'objID',
'osrcid',
'xsrcid',
'SpecObjID',
'ra',
'dec',
'obsid',
'ccdid',
'z',
'modelMag_i',
'modelMagErr_i',
'modelMag_r',
'modelMagErr_r',
'expo',
'theta',
'rad_ecf_39',
'detlim90',
'fBlim90'),
'name': 't/nls1_stackinfo.dbout',
'nrows': 58,
'opts': {'data_start': 2, 'delimiter': '|', 'guess': False}},
{'cols': ('Index',
'RAh',
'RAm',
'RAs',
'DE-',
'DEd',
'DEm',
'DEs',
'Match',
'Class',
'AK',
'Fit'),
'name': 't/no_data_cds.dat',
'nrows': 0,
'opts': {'Reader': ascii.Cds}},
{'cols': ('ID',
'XCENTER',
'YCENTER',
'MAG',
'MERR',
'MSKY',
'NITER',
'SHARPNESS',
'CHI',
'PIER',
'PERROR'),
'name': 't/no_data_daophot.dat',
'nrows': 0,
'opts': {'Reader': ascii.Daophot}},
{'cols': ('NUMBER',
'FLUX_ISO',
'FLUXERR_ISO',
'VALUES',
'VALUES_1',
'FLAG'),
'name': 't/no_data_sextractor.dat',
'nrows': 0,
'opts': {'Reader': ascii.SExtractor}},
{'cols': ('ra', 'dec', 'sai', 'v2', 'sptype'),
'name': 't/no_data_ipac.dat',
'nrows': 0,
'opts': {'Reader': ascii.Ipac}},
{'cols': ('ra', 'v2'),
'name': 't/ipac.dat',
'nrows': 2,
'opts': {'Reader': ascii.Ipac, 'include_names': ['ra', 'v2']}},
{'cols': ('a', 'b', 'c'),
'name': 't/no_data_with_header.dat',
'nrows': 0,
'opts': {}},
{'cols': ('agasc_id', 'n_noids', 'n_obs'),
'name': 't/short.rdb',
'nrows': 7,
'opts': {'Reader': ascii.Rdb}},
{'cols': ('agasc_id', 'n_noids', 'n_obs'),
'name': 't/short.tab',
'nrows': 7,
'opts': {'Reader': ascii.Tab}},
{'cols': ('test 1a', 'test2', 'test3', 'test4'),
'name': 't/simple.txt',
'nrows': 2,
'opts': {'quotechar': "'"}},
{'cols': ('top1', 'top2', 'top3', 'top4'),
'name': 't/simple.txt',
'nrows': 1,
'opts': {'quotechar': "'", 'header_start': 1, 'data_start': 2}},
{'cols': ('top1', 'top2', 'top3', 'top4'),
'name': 't/simple.txt',
'nrows': 1,
'opts': {'quotechar': "'", 'header_start': 1}},
{'cols': ('top1', 'top2', 'top3', 'top4'),
'name': 't/simple.txt',
'nrows': 2,
'opts': {'quotechar': "'", 'header_start': 1, 'data_start': 1}},
{'cols': ('obsid', 'redshift', 'X', 'Y', 'object', 'rad'),
'name': 't/simple2.txt',
'nrows': 3,
'opts': {'delimiter': '|'}},
{'cols': ('obsid', 'redshift', 'X', 'Y', 'object', 'rad'),
'name': 't/simple3.txt',
'nrows': 2,
'opts': {'delimiter': '|'}},
{'cols': ('col1', 'col2', 'col3', 'col4', 'col5', 'col6'),
'name': 't/simple4.txt',
'nrows': 3,
'opts': {'Reader': ascii.NoHeader, 'delimiter': '|'}},
{'cols': ('col1', 'col2', 'col3'),
'name': 't/space_delim_no_header.dat',
'nrows': 2,
'opts': {'Reader': ascii.NoHeader}},
{'cols': ('col1', 'col2', 'col3'),
'name': 't/space_delim_no_header.dat',
'nrows': 2,
'opts': {'Reader': ascii.NoHeader, 'header_start': None}},
{'cols': ('obsid', 'offset', 'x', 'y', 'name', 'oaa'),
'name': 't/space_delim_blank_lines.txt',
'nrows': 3,
'opts': {}},
{'cols': ('zabs1.nh', 'p1.gamma', 'p1.ampl', 'statname', 'statval'),
'name': 't/test4.dat',
'nrows': 9,
'opts': {}},
{'cols': ('a', 'b', 'c'),
'name': 't/fill_values.txt',
'nrows': 2,
'opts': {'delimiter': ','}},
{'name': 't/whitespace.dat',
'cols': ('quoted colname with tab\tinside', 'col2', 'col3'),
'nrows': 2,
'opts': {'delimiter': r'\s'}},
{'name': 't/simple_csv.csv',
'cols': ('a','b','c'),
'nrows': 2,
'opts': {'Reader': ascii.Csv}},
{'name': 't/simple_csv_missing.csv',
'cols': ('a','b','c'),
'nrows': 2,
'skip': True,
'opts': {'Reader': ascii.Csv}},
{'cols': ('cola', 'colb', 'colc'),
'name': 't/latex1.tex',
'nrows': 2,
'opts': {'Reader': ascii.Latex}},
{'cols': ('Facility', 'Id', 'exposure', 'date'),
'name': 't/latex2.tex',
'nrows': 3,
'opts': {'Reader': ascii.AASTex}},
{'cols': ('cola', 'colb', 'colc'),
'name': 't/latex3.tex',
'nrows': 2,
'opts': {'Reader': ascii.Latex}},
{'cols': ('Col1', 'Col2', 'Col3', 'Col4'),
'name': 't/fixed_width_2_line.txt',
'nrows': 2,
'opts': {'Reader': ascii.FixedWidthTwoLine}},
]
try:
import bs4 # pylint: disable=W0611
testfiles.append({'cols': ('Column 1', 'Column 2', 'Column 3'),
'name': 't/html.html',
'nrows': 3,
'opts': {'Reader': ascii.HTML}})
except ImportError:
pass
if name is not None:
return [x for x in testfiles if x['name'] == name][0]
else:
return testfiles
def test_header_start_exception():
'''Check certain Readers throw an exception if ``header_start`` is set
For certain Readers it does not make sense to set the ``header_start``, they
throw an exception if you try.
This was implemented in response to issue #885.
'''
for readerclass in [ascii.NoHeader, ascii.SExtractor, ascii.Ipac,
ascii.BaseReader, ascii.FixedWidthNoHeader, ascii.Cds, ascii.Daophot]:
with pytest.raises(ValueError):
reader = ascii.core._get_reader(readerclass, header_start=5)
def test_csv_table_read():
"""
Check for a regression introduced by #1935. Pseudo-CSV file with
commented header line.
"""
lines = ['# a, b',
'1, 2',
'3, 4']
t = ascii.read(lines)
assert t.colnames == ['a', 'b']
@pytest.mark.parametrize('fast_reader', [True, False, 'force'])
def test_overlapping_names(fast_reader):
"""
Check that the names argument list can overlap with the existing column names.
This tests the issue in #1991.
"""
t = ascii.read(['a b', '1 2'], names=['b', 'a'], fast_reader=fast_reader)
assert t.colnames == ['b', 'a']
def test_sextractor_units():
"""
Make sure that the SExtractor reader correctly inputs descriptions and units.
"""
table = ascii.read('t/sextractor2.dat', Reader=ascii.SExtractor, guess=False)
expected_units = [None, Unit('pix'), Unit('pix'), Unit('mag'),
Unit('mag'), None, Unit('pix**2'), Unit('m**(-6)'),
Unit('mag * arcsec**(-2)')]
expected_descrs = ['Running object number',
'Windowed position estimate along x',
'Windowed position estimate along y',
'Kron-like elliptical aperture magnitude',
'RMS error for AUTO magnitude',
'Extraction flags',
None,
'Barycenter position along MAMA x axis',
'Peak surface brightness above background']
for i, colname in enumerate(table.colnames):
assert table[colname].unit == expected_units[i]
assert table[colname].description == expected_descrs[i]
def test_sextractor_last_column_array():
"""
Make sure that the SExtractor reader handles the last column correctly when it is array-like.
"""
table = ascii.read('t/sextractor3.dat', Reader=ascii.SExtractor, guess=False)
expected_columns = ['X_IMAGE', 'Y_IMAGE', 'ALPHA_J2000', 'DELTA_J2000',
'MAG_AUTO', 'MAGERR_AUTO',
'MAG_APER', 'MAG_APER_1', 'MAG_APER_2', 'MAG_APER_3', 'MAG_APER_4', 'MAG_APER_5', 'MAG_APER_6',
'MAGERR_APER', 'MAGERR_APER_1', 'MAGERR_APER_2', 'MAGERR_APER_3', 'MAGERR_APER_4', 'MAGERR_APER_5', 'MAGERR_APER_6']
expected_units = [Unit('pix'), Unit('pix'), Unit('deg'), Unit('deg'),
Unit('mag'), Unit('mag'),
Unit('mag'), Unit('mag'), Unit('mag'), Unit('mag'), Unit('mag'), Unit('mag'), Unit('mag'),
Unit('mag'), Unit('mag'), Unit('mag'), Unit('mag'), Unit('mag'), Unit('mag'), Unit('mag')]
expected_descrs = ['Object position along x', None,
'Right ascension of barycenter (J2000)',
'Declination of barycenter (J2000)',
'Kron-like elliptical aperture magnitude',
'RMS error for AUTO magnitude',] + [
'Fixed aperture magnitude vector'] * 7 + [
'RMS error vector for fixed aperture mag.'] * 7
for i, colname in enumerate(table.colnames):
assert table[colname].name == expected_columns[i]
assert table[colname].unit == expected_units[i]
assert table[colname].description == expected_descrs[i]
def test_list_with_newlines():
"""
Check that lists of strings where some strings consist of just a newline
("\n") are parsed correctly.
"""
t = ascii.read(["abc", "123\n", "456\n", "\n", "\n"])
assert t.colnames == ['abc']
assert len(t) == 2
assert t[0][0] == 123
assert t[1][0] == 456
def test_commented_csv():
"""
Check that Csv reader does not have ignore lines with the # comment
character which is defined for most Basic readers.
"""
t = ascii.read(['#a,b', '1,2', '#3,4'], format='csv')
assert t.colnames == ['#a', 'b']
assert len(t) == 2
assert t['#a'][1] == '#3'
def test_meta_comments():
"""
Make sure that line comments are included in the ``meta`` attribute
of the output Table.
"""
t = ascii.read(['#comment1', '# comment2 \t', 'a,b,c', '1,2,3'])
assert t.colnames == ['a', 'b', 'c']
assert t.meta['comments'] == ['comment1', 'comment2']
def test_guess_fail():
"""
Check the error message when guess fails
"""
with pytest.raises(ascii.InconsistentTableError) as err:
ascii.read('asfdasdf\n1 2 3', format='basic')
assert "** To figure out why the table did not read, use guess=False and" in str(err.value)
# Test the case with guessing enabled but for a format that has no free params
with pytest.raises(ValueError) as err:
ascii.read('asfdasdf\n1 2 3', format='ipac')
assert 'At least one header line beginning and ending with delimiter required' in str(err.value)
# Test the case with guessing enabled but with all params specified
with pytest.raises(ValueError) as err:
ascii.read('asfdasdf\n1 2 3', format='basic', quotechar='"', delimiter=' ', fast_reader=False)
assert 'Number of header columns (1) inconsistent with data columns (3)' in str(err.value)
@pytest.mark.xfail('not HAS_BZ2')
def test_guessing_file_object():
"""
Test guessing a file object. Fixes #3013 and similar issue noted in #3019.
"""
t = ascii.read(open('t/ipac.dat.bz2', 'rb'))
assert t.colnames == ['ra','dec','sai','v2','sptype']
def test_pformat_roundtrip():
"""Check that the screen output of ``print tab`` can be read. See #3025."""
"""Read a table with empty values and ensure that corresponding entries are masked"""
table = '\n'.join(['a,b,c,d',
'1,3,1.11,1',
'2, 2, 4.0 , ss '])
dat = ascii.read(table)
out = ascii.read(dat.pformat())
assert len(dat) == len(out)
assert dat.colnames == out.colnames
for c in dat.colnames:
assert np.all(dat[c] == out[c])
def test_ipac_abbrev():
lines = ['| c1 | c2 | c3 | c4 | c5| c6 | c7 | c8 | c9|c10|c11|c12|',
'| r | rE | rea | real | D | do | dou | f | i | l | da| c |',
' 1 2 3 4 5 6 7 8 9 10 11 12 ']
dat = ascii.read(lines, format='ipac')
for name in dat.columns[0:8]:
assert dat[name].dtype.kind == 'f'
for name in dat.columns[8:10]:
assert dat[name].dtype.kind == 'i'
for name in dat.columns[10:12]:
assert dat[name].dtype.kind in ('U', 'S')
def test_almost_but_not_quite_daophot():
'''Regression test for #3319.
This tables looks so close to a daophot table, that the daophot reader gets
quite far before it fails with an AttributeError.
Note that this table will actually be read as Commented Header table with
the columns ['some', 'header', 'info'].
'''
lines = ["# some header info",
"#F header info beginning with 'F'",
"1 2 3",
"4 5 6",
"7 8 9"]
dat = ascii.read(lines)
assert len(dat) == 3
@pytest.mark.parametrize('fast', [True, False])
def test_commented_header_comments(fast):
"""
Test that comments in commented_header are as expected and that the
table round-trips.
"""
lines = ['# a b',
'# comment 1',
'# comment 2',
'1 2',
'3 4']
dat = ascii.read(lines, format='commented_header', fast_reader=fast)
assert dat.meta['comments'] == ['comment 1', 'comment 2']
out = StringIO()
ascii.write(dat, out, format='commented_header', fast_writer=fast)
assert out.getvalue().splitlines() == lines
lines = ['# a b',
'1 2',
'3 4']
dat = ascii.read(lines, format='commented_header', fast_reader=fast)
assert 'comments' not in dat.meta
def test_probably_html():
"""
Test the routine for guessing if a table input to ascii.read is probably HTML
"""
for table in ('t/html.html',
'http://blah.com/table.html',
'https://blah.com/table.html',
'file://blah/table.htm',
'ftp://blah.com/table.html',
'file://blah.com/table.htm',
' <! doctype html > hello world',
'junk < table baz> <tr foo > <td bar> </td> </tr> </table> junk',
['junk < table baz>', ' <tr foo >', ' <td bar> ', '</td> </tr>', '</table> junk'],
(' <! doctype html > ', ' hello world'),
):
assert _probably_html(table) is True
for table in ('t/html.htms',
'Xhttp://blah.com/table.html',
' https://blah.com/table.htm',
'fole://blah/table.htm',
' < doctype html > hello world',
'junk < tble baz> <tr foo > <td bar> </td> </tr> </table> junk',
['junk < table baz>', ' <t foo >', ' <td bar> ', '</td> </tr>', '</table> junk'],
(' <! doctype htm > ', ' hello world'),
[[1, 2, 3]],
):
assert _probably_html(table) is False
@pytest.mark.parametrize('fast_reader', [True, False, 'force'])
def test_data_header_start(fast_reader):
tests = [(['# comment',
'',
' ',
'skip this line', # line 0
'a b', # line 1
'1 2'], # line 2
[{'header_start': 1},
{'header_start': 1, 'data_start': 2}
]
),
(['# comment',
'',
' \t',
'skip this line', # line 0
'a b', # line 1
'',
' \t',
'skip this line', # line 2
'1 2'], # line 3
[{'header_start': 1, 'data_start': 3}]),
(['# comment',
'',
' ',
'a b', # line 0
'',
' ',
'skip this line', # line 1
'1 2'], # line 2
[{'header_start': 0, 'data_start': 2},
{'data_start': 2}])]
for lines, kwargs_list in tests:
for kwargs in kwargs_list:
t = ascii.read(lines, format='basic', fast_reader=fast_reader,
guess=True, **kwargs)
assert t.colnames == ['a', 'b']
assert len(t) == 1
assert np.all(t['a'] == [1])
# Sanity check that the expected Reader is being used
assert get_read_trace()[-1]['kwargs']['Reader'] is (
ascii.Basic if (fast_reader is False) else ascii.FastBasic)
def test_table_with_no_newline():
"""
Test that an input file which is completely empty fails in the expected way.
Test that an input file with one line but no newline succeeds.
"""
# With guessing
table = BytesIO()
with pytest.raises(ascii.InconsistentTableError):
ascii.read(table)
# Without guessing
table = BytesIO()
with pytest.raises(ValueError) as err:
ascii.read(table, guess=False, fast_reader=False, format='basic')
assert 'No header line found' in str(err.value)
table = BytesIO()
with pytest.raises(ValueError) as err:
ascii.read(table, guess=False, fast_reader=True, format='fast_basic')
assert 'Inconsistent data column lengths' in str(err.value)
# Put a single line of column names but with no newline
for kwargs in [dict(),
dict(guess=False, fast_reader=False, format='basic'),
dict(guess=False, fast_reader=True, format='fast_basic')]:
table = BytesIO()
table.write(b'a b')
t = ascii.read(table, **kwargs)
assert t.colnames == ['a', 'b']
assert len(t) == 0
@pytest.mark.skipif('not HAS_PATHLIB')
def test_path_object():
fpath = pathlib.Path('t/simple.txt')
data = ascii.read(fpath)
assert len(data) == 2
assert sorted(list(data.columns)) == ['test 1a', 'test2', 'test3', 'test4']
assert data['test2'][1] == 'hat2'
def test_column_conversion_error():
"""
Test that context information (upstream exception message) from column
conversion error is provided.
"""
ipac = """\
| col0 |
| double |
1 2
"""
with pytest.raises(ValueError) as err:
ascii.read(ipac, guess=False, format='ipac')
assert 'Column col0 failed to convert:' in str(err.value)
with pytest.raises(ValueError) as err:
ascii.read(['a b', '1 2'], guess=False, format='basic', converters={'a': []})
assert 'no converters' in str(err.value)
def test_non_C_locale_with_fast_reader():
"""Test code that forces "C" locale while calling fast reader (#4364)"""
current = locale.setlocale(locale.LC_ALL)
try:
if platform.system() == 'Darwin':
locale.setlocale(locale.LC_ALL, str('de_DE'))
else:
locale.setlocale(locale.LC_ALL, str('de_DE.utf8'))
for fast_reader in (True, False, {'use_fast_converter': False}, {'use_fast_converter': True}):
t = ascii.read(['a b', '1.5 2'], format='basic', guess=False,
fast_reader=fast_reader)
assert t['a'].dtype.kind == 'f'
except locale.Error as e:
pytest.skip('Locale error: {}'.format(e))
finally:
locale.setlocale(locale.LC_ALL, current)
def test_no_units_for_char_columns():
'''Test that a char column of a Table is assigned no unit and not
a dimensionless unit.'''
t1 = Table([["A"]], names="B")
out = StringIO()
ascii.write(t1, out, format="ipac")
t2 = ascii.read(out.getvalue(), format="ipac", guess=False)
assert t2["B"].unit is None
def test_initial_column_fill_values():
"""Regression test for #5336, #5338."""
class TestHeader(ascii.BasicHeader):
def _set_cols_from_names(self):
self.cols = [ascii.Column(name=x) for x in self.names]
# Set some initial fill values
for col in self.cols:
col.fill_values = {'--': '0'}
class Tester(ascii.Basic):
header_class = TestHeader
reader = ascii.get_reader(Reader=Tester)
assert reader.read("""# Column definition is the first uncommented line
# Default delimiter is the space character.
a b c
# Data starts after the header column definition, blank lines ignored
-- 2 3
4 5 6 """)['a'][0] is np.ma.masked
def test_latex_no_trailing_backslash():
"""
Test that latex/aastex file with no trailing backslash can be read.
"""
lines = r"""
\begin{table}
\begin{tabular}{ccc}
a & b & c \\
1 & 1.0 & c \\ % comment
3\% & 3.0 & e % comment
\end{tabular}
\end{table}
"""
dat = ascii.read(lines, format='latex')
assert dat.colnames == ['a', 'b', 'c']
assert np.all(dat['a'] == ['1', r'3\%'])
assert np.all(dat['c'] == ['c', 'e'])
def text_aastex_no_trailing_backslash():
lines = r"""
\begin{deluxetable}{ccc}
\tablehead{\colhead{a} & \colhead{b} & \colhead{c}}
\startdata
1 & 1.0 & c \\
2 & 2.0 & d \\ % comment
3\% & 3.0 & e % comment
\enddata
\end{deluxetable}
"""
dat = ascii.read(lines, format='aastex')
assert dat.colnames == ['a', 'b', 'c']
assert np.all(dat['a'] == ['1', r'3\%'])
assert np.all(dat['c'] == ['c', 'e'])
@pytest.mark.skipif('six.PY2')
@pytest.mark.parametrize('encoding', ['utf8', 'latin1', 'cp1252'])
def test_read_with_encoding(tmpdir, encoding):
data = {
'commented_header': u'# à b è \n 1 2 héllo',
'csv': u'à,b,è\n1,2,héllo'
}
testfile = str(tmpdir.join('test.txt'))
for fmt, content in data.items():
with open(testfile, 'w', encoding=encoding) as f:
f.write(content)
table = ascii.read(testfile, encoding=encoding)
assert table.pformat() == [' à b è ',
'--- --- -----',
' 1 2 héllo']
for guess in (True, False):
table = ascii.read(testfile, format=fmt, fast_reader=False,
encoding=encoding, guess=guess)
assert table['è'].dtype.kind == 'U'
assert table.pformat() == [' à b è ',
'--- --- -----',
' 1 2 héllo']
def test_unsupported_read_with_encoding(tmpdir):
# Fast reader is not supported, make sure it raises an exception
with pytest.raises(ascii.ParameterError):
ascii.read('t/simple3.txt', guess=False, fast_reader='force',
encoding='latin1', format='fast_csv')
# Python 2 is not supported, make sure it raises an exception
if six.PY2:
with pytest.raises(ValueError):
ascii.read('t/simple3.txt', guess=False, fast_reader=False,
encoding='latin1', format='csv')
| bsd-3-clause |
mljar/mljar-api-python | tests/result_client_test.py | 1 | 4641 | '''
ResultClient tests.
'''
import os
import unittest
import pandas as pd
import time
from mljar.client.project import ProjectClient
from mljar.client.dataset import DatasetClient
from mljar.client.experiment import ExperimentClient
from mljar.client.result import ResultClient
from mljar.exceptions import BadRequestException
from .project_based_test import ProjectBasedTest, get_postfix
class ResultClientTest(ProjectBasedTest):
def setUp(self):
proj_title = 'Test project-01'+get_postfix()
proj_task = 'bin_class'
self.expt_title = 'Test experiment-01'
self.validation_kfolds = 5
self.validation_shuffle = True
self.validation_stratify = True
self.validation_train_split = None
self.algorithms = ['xgb']
self.metric = 'logloss'
self.tuning_mode = 'Normal'
self.time_constraint = 1
self.create_enseble = False
# setup project
self.project_client = ProjectClient()
self.project = self.project_client.create_project(title = proj_title, task = proj_task)
# load data
df = pd.read_csv('tests/data/test_1.csv')
cols = ['sepal length', 'sepal width', 'petal length', 'petal width']
target = 'class'
# add dataset
self.dataset = DatasetClient(self.project.hid).add_dataset_if_not_exists(df[cols], df[target])
def tearDown(self):
# clean
self.project_client.delete_project(self.project.hid)
def test_get_results_for_wrong_project(self):
with self.assertRaises(BadRequestException) as context:
# init result client
rc = ResultClient('wrong-hid')
self.assertTrue(rc is not None)
# get results - should raise exception
rc.get_results()
def test_get_results_for_project(self):
# init result client
rc = ResultClient(self.project.hid)
self.assertNotEqual(rc, None)
# get results - should be empty
results = rc.get_results()
self.assertEqual(results, [])
# add experiment
ec = ExperimentClient(self.project.hid)
# create new experiment
self.experiment = ec.add_experiment_if_not_exists(self.dataset, None, self.expt_title, self.project.task,
self.validation_kfolds, self.validation_shuffle,
self.validation_stratify, self.validation_train_split,
self.algorithms, self.metric,
self.tuning_mode, self.time_constraint, self.create_enseble)
# wait some time till models are initialized
time.sleep(60)
# get results - should be some models there
results = rc.get_results()
self.assertNotEqual(len(results), 0)
def test_get_results_for_experiment(self):
# init result client
rc = ResultClient(self.project.hid)
self.assertNotEqual(rc, None)
# get results - should be empty
results = rc.get_results()
self.assertEqual(results, [])
# get results for wrong experiment hid
results = rc.get_results('wrong-hid')
self.assertEqual(results, [])
# add experiment
ec = ExperimentClient(self.project.hid)
# create new experiment
self.experiment = ec.add_experiment_if_not_exists(self.dataset, None, self.expt_title, self.project.task,
self.validation_kfolds, self.validation_shuffle,
self.validation_stratify, self.validation_train_split,
self.algorithms, self.metric,
self.tuning_mode, self.time_constraint, self.create_enseble)
# wait some time till models are initialized
time.sleep(60)
# get results for experiment - should be some models there
results = rc.get_results(self.experiment.hid)
self.assertNotEqual(len(results), 0)
# get results for project
project_results = rc.get_results()
self.assertNotEqual(results, [])
# get results for wrong experiment hid
# all results from project should be returned
results_2 = rc.get_results('wrong-hid')
self.assertEqual(len(project_results), len(results_2))
for r in project_results:
# test __str__ method
self.assertTrue('id' in str(r))
self.assertTrue('model' in str(r))
self.assertTrue('status' in str(r))
| apache-2.0 |
jakevdp/networkx | networkx/readwrite/leda.py | 42 | 2847 | """
Read graphs in LEDA format.
LEDA is a C++ class library for efficient data types and algorithms.
Format
------
See http://www.algorithmic-solutions.info/leda_guide/graphs/leda_native_graph_fileformat.html
"""
# Original author: D. Eppstein, UC Irvine, August 12, 2003.
# The original code at http://www.ics.uci.edu/~eppstein/PADS/ is public domain.
__author__ = """Aric Hagberg ([email protected])"""
# Copyright (C) 2004-2015 by
# Aric Hagberg <[email protected]>
# Dan Schult <[email protected]>
# Pieter Swart <[email protected]>
# All rights reserved.
# BSD license.
__all__ = ['read_leda', 'parse_leda']
import networkx as nx
from networkx.exception import NetworkXError
from networkx.utils import open_file, is_string_like
@open_file(0,mode='rb')
def read_leda(path, encoding='UTF-8'):
"""Read graph in LEDA format from path.
Parameters
----------
path : file or string
File or filename to read. Filenames ending in .gz or .bz2 will be
uncompressed.
Returns
-------
G : NetworkX graph
Examples
--------
G=nx.read_leda('file.leda')
References
----------
.. [1] http://www.algorithmic-solutions.info/leda_guide/graphs/leda_native_graph_fileformat.html
"""
lines=(line.decode(encoding) for line in path)
G=parse_leda(lines)
return G
def parse_leda(lines):
"""Read graph in LEDA format from string or iterable.
Parameters
----------
lines : string or iterable
Data in LEDA format.
Returns
-------
G : NetworkX graph
Examples
--------
G=nx.parse_leda(string)
References
----------
.. [1] http://www.algorithmic-solutions.info/leda_guide/graphs/leda_native_graph_fileformat.html
"""
if is_string_like(lines): lines=iter(lines.split('\n'))
lines = iter([line.rstrip('\n') for line in lines \
if not (line.startswith('#') or line.startswith('\n') or line=='')])
for i in range(3):
next(lines)
# Graph
du = int(next(lines)) # -1=directed, -2=undirected
if du==-1:
G = nx.DiGraph()
else:
G = nx.Graph()
# Nodes
n =int(next(lines)) # number of nodes
node={}
for i in range(1,n+1): # LEDA counts from 1 to n
symbol=next(lines).rstrip().strip('|{}| ')
if symbol=="": symbol=str(i) # use int if no label - could be trouble
node[i]=symbol
G.add_nodes_from([s for i,s in node.items()])
# Edges
m = int(next(lines)) # number of edges
for i in range(m):
try:
s,t,reversal,label=next(lines).split()
except:
raise NetworkXError('Too few fields in LEDA.GRAPH edge %d'%(i+1))
# BEWARE: no handling of reversal edges
G.add_edge(node[int(s)],node[int(t)],label=label[2:-2])
return G
| bsd-3-clause |
HatsuneMiku0309/2014cdag10 | wsgi/static/Brython2.1.0-20140419-113919/Lib/unittest/test/test_break.py | 785 | 8138 | import gc
import io
import os
import sys
import signal
import weakref
import unittest
@unittest.skipUnless(hasattr(os, 'kill'), "Test requires os.kill")
@unittest.skipIf(sys.platform =="win32", "Test cannot run on Windows")
@unittest.skipIf(sys.platform == 'freebsd6', "Test kills regrtest on freebsd6 "
"if threads have been used")
class TestBreak(unittest.TestCase):
def setUp(self):
self._default_handler = signal.getsignal(signal.SIGINT)
def tearDown(self):
signal.signal(signal.SIGINT, self._default_handler)
unittest.signals._results = weakref.WeakKeyDictionary()
unittest.signals._interrupt_handler = None
def testInstallHandler(self):
default_handler = signal.getsignal(signal.SIGINT)
unittest.installHandler()
self.assertNotEqual(signal.getsignal(signal.SIGINT), default_handler)
try:
pid = os.getpid()
os.kill(pid, signal.SIGINT)
except KeyboardInterrupt:
self.fail("KeyboardInterrupt not handled")
self.assertTrue(unittest.signals._interrupt_handler.called)
def testRegisterResult(self):
result = unittest.TestResult()
unittest.registerResult(result)
for ref in unittest.signals._results:
if ref is result:
break
elif ref is not result:
self.fail("odd object in result set")
else:
self.fail("result not found")
def testInterruptCaught(self):
default_handler = signal.getsignal(signal.SIGINT)
result = unittest.TestResult()
unittest.installHandler()
unittest.registerResult(result)
self.assertNotEqual(signal.getsignal(signal.SIGINT), default_handler)
def test(result):
pid = os.getpid()
os.kill(pid, signal.SIGINT)
result.breakCaught = True
self.assertTrue(result.shouldStop)
try:
test(result)
except KeyboardInterrupt:
self.fail("KeyboardInterrupt not handled")
self.assertTrue(result.breakCaught)
def testSecondInterrupt(self):
result = unittest.TestResult()
unittest.installHandler()
unittest.registerResult(result)
def test(result):
pid = os.getpid()
os.kill(pid, signal.SIGINT)
result.breakCaught = True
self.assertTrue(result.shouldStop)
os.kill(pid, signal.SIGINT)
self.fail("Second KeyboardInterrupt not raised")
try:
test(result)
except KeyboardInterrupt:
pass
else:
self.fail("Second KeyboardInterrupt not raised")
self.assertTrue(result.breakCaught)
def testTwoResults(self):
unittest.installHandler()
result = unittest.TestResult()
unittest.registerResult(result)
new_handler = signal.getsignal(signal.SIGINT)
result2 = unittest.TestResult()
unittest.registerResult(result2)
self.assertEqual(signal.getsignal(signal.SIGINT), new_handler)
result3 = unittest.TestResult()
def test(result):
pid = os.getpid()
os.kill(pid, signal.SIGINT)
try:
test(result)
except KeyboardInterrupt:
self.fail("KeyboardInterrupt not handled")
self.assertTrue(result.shouldStop)
self.assertTrue(result2.shouldStop)
self.assertFalse(result3.shouldStop)
def testHandlerReplacedButCalled(self):
# If our handler has been replaced (is no longer installed) but is
# called by the *new* handler, then it isn't safe to delay the
# SIGINT and we should immediately delegate to the default handler
unittest.installHandler()
handler = signal.getsignal(signal.SIGINT)
def new_handler(frame, signum):
handler(frame, signum)
signal.signal(signal.SIGINT, new_handler)
try:
pid = os.getpid()
os.kill(pid, signal.SIGINT)
except KeyboardInterrupt:
pass
else:
self.fail("replaced but delegated handler doesn't raise interrupt")
def testRunner(self):
# Creating a TextTestRunner with the appropriate argument should
# register the TextTestResult it creates
runner = unittest.TextTestRunner(stream=io.StringIO())
result = runner.run(unittest.TestSuite())
self.assertIn(result, unittest.signals._results)
def testWeakReferences(self):
# Calling registerResult on a result should not keep it alive
result = unittest.TestResult()
unittest.registerResult(result)
ref = weakref.ref(result)
del result
# For non-reference counting implementations
gc.collect();gc.collect()
self.assertIsNone(ref())
def testRemoveResult(self):
result = unittest.TestResult()
unittest.registerResult(result)
unittest.installHandler()
self.assertTrue(unittest.removeResult(result))
# Should this raise an error instead?
self.assertFalse(unittest.removeResult(unittest.TestResult()))
try:
pid = os.getpid()
os.kill(pid, signal.SIGINT)
except KeyboardInterrupt:
pass
self.assertFalse(result.shouldStop)
def testMainInstallsHandler(self):
failfast = object()
test = object()
verbosity = object()
result = object()
default_handler = signal.getsignal(signal.SIGINT)
class FakeRunner(object):
initArgs = []
runArgs = []
def __init__(self, *args, **kwargs):
self.initArgs.append((args, kwargs))
def run(self, test):
self.runArgs.append(test)
return result
class Program(unittest.TestProgram):
def __init__(self, catchbreak):
self.exit = False
self.verbosity = verbosity
self.failfast = failfast
self.catchbreak = catchbreak
self.testRunner = FakeRunner
self.test = test
self.result = None
p = Program(False)
p.runTests()
self.assertEqual(FakeRunner.initArgs, [((), {'buffer': None,
'verbosity': verbosity,
'failfast': failfast,
'warnings': None})])
self.assertEqual(FakeRunner.runArgs, [test])
self.assertEqual(p.result, result)
self.assertEqual(signal.getsignal(signal.SIGINT), default_handler)
FakeRunner.initArgs = []
FakeRunner.runArgs = []
p = Program(True)
p.runTests()
self.assertEqual(FakeRunner.initArgs, [((), {'buffer': None,
'verbosity': verbosity,
'failfast': failfast,
'warnings': None})])
self.assertEqual(FakeRunner.runArgs, [test])
self.assertEqual(p.result, result)
self.assertNotEqual(signal.getsignal(signal.SIGINT), default_handler)
def testRemoveHandler(self):
default_handler = signal.getsignal(signal.SIGINT)
unittest.installHandler()
unittest.removeHandler()
self.assertEqual(signal.getsignal(signal.SIGINT), default_handler)
# check that calling removeHandler multiple times has no ill-effect
unittest.removeHandler()
self.assertEqual(signal.getsignal(signal.SIGINT), default_handler)
def testRemoveHandlerAsDecorator(self):
default_handler = signal.getsignal(signal.SIGINT)
unittest.installHandler()
@unittest.removeHandler
def test():
self.assertEqual(signal.getsignal(signal.SIGINT), default_handler)
test()
self.assertNotEqual(signal.getsignal(signal.SIGINT), default_handler)
| gpl-2.0 |
mlperf/training_results_v0.7 | NVIDIA/benchmarks/ssd/implementations/mxnet/mlperf_log_utils.py | 1 | 2395 | import collections
import os
import subprocess
import numpy as np
from mlperf_logging.mllog import constants as mlperf_constants
from mlperf_logging import mllog
class MPIWrapper(object):
def __init__(self):
self.comm = None
self.MPI = None
def get_comm(self):
if self.comm is None:
import mpi4py
from mpi4py import MPI
self.comm = MPI.COMM_WORLD
self.MPI = MPI
return self.comm
def barrier(self):
c = self.get_comm()
# NOTE: MPI_Barrier is *not* working reliably at scale. Using MPI_Allreduce instead.
#c.Barrier()
val = np.ones(1, dtype=np.int32)
result = np.zeros(1, dtype=np.int32)
c.Allreduce(val, result)
def allreduce(self, x):
c = self.get_comm()
rank = c.Get_rank()
val = np.array(x, dtype=np.int32)
result = np.zeros_like(val, dtype=np.int32)
c.Allreduce([val, self.MPI.INT], [result, self.MPI.INT]) #, op=self.MPI.SUM)
return result
def rank(self):
c = self.get_comm()
return c.Get_rank()
mpiwrapper=MPIWrapper()
def all_reduce(v):
return mpiwrapper.allreduce(v)
mllogger = mllog.get_mllogger()
def log_start(*args, **kwargs):
_log_print(mllogger.start, *args, **kwargs)
def log_end(*args, **kwargs):
_log_print(mllogger.end, *args, **kwargs)
def log_event(*args, **kwargs):
_log_print(mllogger.event, *args, **kwargs)
def _log_print(logger, *args, **kwargs):
rank = mpiwrapper.rank()
uniq = kwargs.pop('uniq', True)
if 'stack_offset' not in kwargs:
kwargs['stack_offset'] = 3
if 'value' not in kwargs:
kwargs['value'] = None
if (uniq and rank == 0) or (not uniq):
logger(*args, **kwargs)
return
def mlperf_submission_log(benchmark):
num_nodes = os.environ.get('SLURM_JOB_NUM_NODES', 1)
log_event(
key=mlperf_constants.SUBMISSION_BENCHMARK,
value=benchmark,
)
log_event(
key=mlperf_constants.SUBMISSION_ORG,
value='NVIDIA')
log_event(
key=mlperf_constants.SUBMISSION_DIVISION,
value='closed')
log_event(
key=mlperf_constants.SUBMISSION_STATUS,
value='onprem')
log_event(
key=mlperf_constants.SUBMISSION_PLATFORM,
value='{}xSUBMISSION_PLATFORM_PLACEHOLDER'.format(num_nodes))
| apache-2.0 |
felipecocco/oppia | core/jobs_test.py | 6 | 33071 | # coding: utf-8
#
# Copyright 2014 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for long running jobs and continuous computations."""
__author__ = 'Sean Lip'
import ast
from core import jobs
from core import jobs_registry
from core.domain import event_services
from core.domain import exp_domain
from core.domain import exp_services
from core.platform import models
(base_models, exp_models, stats_models) = models.Registry.import_models([
models.NAMES.base_model, models.NAMES.exploration, models.NAMES.statistics])
taskqueue_services = models.Registry.import_taskqueue_services()
transaction_services = models.Registry.import_transaction_services()
from core.tests import test_utils
import feconf
from google.appengine.ext import ndb
JOB_FAILED_MESSAGE = 'failed (as expected)'
class DummyJobManager(jobs.BaseDeferredJobManager):
@classmethod
def _run(cls):
return 'output'
class AnotherDummyJobManager(jobs.BaseDeferredJobManager):
@classmethod
def _run(cls):
return 'output'
class DummyFailingJobManager(jobs.BaseDeferredJobManager):
@classmethod
def _run(cls):
raise Exception(JOB_FAILED_MESSAGE)
class JobWithNoRunMethodManager(jobs.BaseDeferredJobManager):
pass
class JobManagerUnitTests(test_utils.GenericTestBase):
"""Test basic job manager operations."""
def test_create_new(self):
"""Test the creation of a new job."""
job_id = DummyJobManager.create_new()
self.assertTrue(job_id.startswith('DummyJob'))
self.assertEqual(
DummyJobManager.get_status_code(job_id), jobs.STATUS_CODE_NEW)
self.assertIsNone(DummyJobManager.get_time_queued_msec(job_id))
self.assertIsNone(DummyJobManager.get_time_started_msec(job_id))
self.assertIsNone(DummyJobManager.get_time_finished_msec(job_id))
self.assertIsNone(DummyJobManager.get_metadata(job_id))
self.assertIsNone(DummyJobManager.get_output(job_id))
self.assertIsNone(DummyJobManager.get_error(job_id))
self.assertFalse(DummyJobManager.is_active(job_id))
self.assertFalse(DummyJobManager.has_finished(job_id))
def test_enqueue_job(self):
"""Test the enqueueing of a job."""
job_id = DummyJobManager.create_new()
DummyJobManager.enqueue(job_id)
self.assertEqual(self.count_jobs_in_taskqueue(), 1)
self.assertEqual(
DummyJobManager.get_status_code(job_id), jobs.STATUS_CODE_QUEUED)
self.assertIsNotNone(DummyJobManager.get_time_queued_msec(job_id))
self.assertIsNone(DummyJobManager.get_output(job_id))
def test_failure_for_job_enqueued_using_wrong_manager(self):
job_id = DummyJobManager.create_new()
with self.assertRaisesRegexp(Exception, 'Invalid job type'):
AnotherDummyJobManager.enqueue(job_id)
def test_failure_for_job_with_no_run_method(self):
job_id = JobWithNoRunMethodManager.create_new()
JobWithNoRunMethodManager.enqueue(job_id)
self.assertEqual(self.count_jobs_in_taskqueue(), 1)
with self.assertRaisesRegexp(Exception, 'NotImplementedError'):
self.process_and_flush_pending_tasks()
def test_complete_job(self):
job_id = DummyJobManager.create_new()
DummyJobManager.enqueue(job_id)
self.assertEqual(self.count_jobs_in_taskqueue(), 1)
self.process_and_flush_pending_tasks()
self.assertEqual(
DummyJobManager.get_status_code(job_id),
jobs.STATUS_CODE_COMPLETED)
time_queued_msec = DummyJobManager.get_time_queued_msec(job_id)
time_started_msec = DummyJobManager.get_time_started_msec(job_id)
time_finished_msec = DummyJobManager.get_time_finished_msec(job_id)
self.assertIsNotNone(time_queued_msec)
self.assertIsNotNone(time_started_msec)
self.assertIsNotNone(time_finished_msec)
self.assertLess(time_queued_msec, time_started_msec)
self.assertLess(time_started_msec, time_finished_msec)
metadata = DummyJobManager.get_metadata(job_id)
output = DummyJobManager.get_output(job_id)
error = DummyJobManager.get_error(job_id)
self.assertIsNone(metadata)
self.assertEqual(output, 'output')
self.assertIsNone(error)
self.assertFalse(DummyJobManager.is_active(job_id))
self.assertTrue(DummyJobManager.has_finished(job_id))
def test_job_failure(self):
job_id = DummyFailingJobManager.create_new()
DummyFailingJobManager.enqueue(job_id)
self.assertEqual(self.count_jobs_in_taskqueue(), 1)
with self.assertRaisesRegexp(Exception, 'Task failed'):
self.process_and_flush_pending_tasks()
self.assertEqual(
DummyFailingJobManager.get_status_code(job_id),
jobs.STATUS_CODE_FAILED)
time_queued_msec = DummyFailingJobManager.get_time_queued_msec(job_id)
time_started_msec = DummyFailingJobManager.get_time_started_msec(
job_id)
time_finished_msec = DummyFailingJobManager.get_time_finished_msec(
job_id)
self.assertIsNotNone(time_queued_msec)
self.assertIsNotNone(time_started_msec)
self.assertIsNotNone(time_finished_msec)
self.assertLess(time_queued_msec, time_started_msec)
self.assertLess(time_started_msec, time_finished_msec)
metadata = DummyFailingJobManager.get_metadata(job_id)
output = DummyFailingJobManager.get_output(job_id)
error = DummyFailingJobManager.get_error(job_id)
self.assertIsNone(metadata)
self.assertIsNone(output)
self.assertIn(JOB_FAILED_MESSAGE, error)
self.assertFalse(DummyFailingJobManager.is_active(job_id))
self.assertTrue(DummyFailingJobManager.has_finished(job_id))
def test_status_code_transitions(self):
"""Test that invalid status code transitions are caught."""
job_id = DummyJobManager.create_new()
DummyJobManager.enqueue(job_id)
DummyJobManager.register_start(job_id)
DummyJobManager.register_completion(job_id, 'output')
with self.assertRaisesRegexp(Exception, 'Invalid status code change'):
DummyJobManager.enqueue(job_id)
with self.assertRaisesRegexp(Exception, 'Invalid status code change'):
DummyJobManager.register_completion(job_id, 'output')
with self.assertRaisesRegexp(Exception, 'Invalid status code change'):
DummyJobManager.register_failure(job_id, 'error')
def test_different_jobs_are_independent(self):
job_id = DummyJobManager.create_new()
another_job_id = AnotherDummyJobManager.create_new()
DummyJobManager.enqueue(job_id)
DummyJobManager.register_start(job_id)
AnotherDummyJobManager.enqueue(another_job_id)
self.assertEqual(
DummyJobManager.get_status_code(job_id), jobs.STATUS_CODE_STARTED)
self.assertEqual(
AnotherDummyJobManager.get_status_code(another_job_id),
jobs.STATUS_CODE_QUEUED)
def test_cannot_instantiate_jobs_from_abstract_base_classes(self):
with self.assertRaisesRegexp(
Exception, 'directly create a job using the abstract base'):
jobs.BaseJobManager.create_new()
def test_cannot_enqueue_same_job_twice(self):
job_id = DummyJobManager.create_new()
DummyJobManager.enqueue(job_id)
with self.assertRaisesRegexp(Exception, 'Invalid status code change'):
DummyJobManager.enqueue(job_id)
def test_can_enqueue_two_instances_of_the_same_job(self):
job_id = DummyJobManager.create_new()
DummyJobManager.enqueue(job_id)
job_id_2 = DummyJobManager.create_new()
DummyJobManager.enqueue(job_id_2)
def test_cancel_kills_queued_job(self):
job_id = DummyJobManager.create_new()
DummyJobManager.enqueue(job_id)
self.assertTrue(DummyJobManager.is_active(job_id))
DummyJobManager.cancel(job_id, 'admin_user_id')
self.assertFalse(DummyJobManager.is_active(job_id))
self.assertEquals(
DummyJobManager.get_status_code(job_id), jobs.STATUS_CODE_CANCELED)
self.assertIsNone(DummyJobManager.get_output(job_id))
self.assertEquals(
DummyJobManager.get_error(job_id), 'Canceled by admin_user_id')
def test_cancel_kills_started_job(self):
job_id = DummyJobManager.create_new()
DummyJobManager.enqueue(job_id)
self.assertTrue(DummyJobManager.is_active(job_id))
DummyJobManager.register_start(job_id)
# Cancel the job immediately after it has started.
DummyJobManager.cancel(job_id, 'admin_user_id')
# The job then finishes.
with self.assertRaisesRegexp(Exception, 'Invalid status code change'):
DummyJobManager.register_completion(job_id, 'job_output')
self.assertFalse(DummyJobManager.is_active(job_id))
self.assertEquals(
DummyJobManager.get_status_code(job_id), jobs.STATUS_CODE_CANCELED)
# Note that no results are recorded for this job.
self.assertIsNone(DummyJobManager.get_output(job_id))
self.assertEquals(
DummyJobManager.get_error(job_id), 'Canceled by admin_user_id')
def test_cancel_does_not_kill_completed_job(self):
job_id = DummyJobManager.create_new()
DummyJobManager.enqueue(job_id)
self.assertTrue(DummyJobManager.is_active(job_id))
# Complete the job.
self.process_and_flush_pending_tasks()
self.assertFalse(DummyJobManager.is_active(job_id))
self.assertEquals(
DummyJobManager.get_status_code(job_id),
jobs.STATUS_CODE_COMPLETED)
# Cancel the job after it has finished.
with self.assertRaisesRegexp(Exception, 'Invalid status code change'):
DummyJobManager.cancel(job_id, 'admin_user_id')
# The job should still have 'completed' status.
self.assertFalse(DummyJobManager.is_active(job_id))
self.assertEquals(
DummyJobManager.get_status_code(job_id),
jobs.STATUS_CODE_COMPLETED)
self.assertEquals(DummyJobManager.get_output(job_id), 'output')
self.assertIsNone(DummyJobManager.get_error(job_id))
def test_cancel_does_not_kill_failed_job(self):
job_id = DummyFailingJobManager.create_new()
DummyFailingJobManager.enqueue(job_id)
self.assertTrue(DummyFailingJobManager.is_active(job_id))
with self.assertRaisesRegexp(Exception, 'Task failed'):
self.process_and_flush_pending_tasks()
self.assertFalse(DummyFailingJobManager.is_active(job_id))
self.assertEquals(
DummyFailingJobManager.get_status_code(job_id),
jobs.STATUS_CODE_FAILED)
# Cancel the job after it has finished.
with self.assertRaisesRegexp(Exception, 'Invalid status code change'):
DummyFailingJobManager.cancel(job_id, 'admin_user_id')
# The job should still have 'failed' status.
self.assertFalse(DummyFailingJobManager.is_active(job_id))
self.assertEquals(
DummyFailingJobManager.get_status_code(job_id),
jobs.STATUS_CODE_FAILED)
self.assertIsNone(DummyFailingJobManager.get_output(job_id))
self.assertIn(
'raise Exception', DummyFailingJobManager.get_error(job_id))
def test_cancelling_multiple_unfinished_jobs(self):
job1_id = DummyJobManager.create_new()
DummyJobManager.enqueue(job1_id)
job2_id = DummyJobManager.create_new()
DummyJobManager.enqueue(job2_id)
DummyJobManager.register_start(job1_id)
DummyJobManager.register_start(job2_id)
DummyJobManager.cancel_all_unfinished_jobs('admin_user_id')
self.assertFalse(DummyJobManager.is_active(job1_id))
self.assertFalse(DummyJobManager.is_active(job2_id))
self.assertEquals(
DummyJobManager.get_status_code(job1_id),
jobs.STATUS_CODE_CANCELED)
self.assertEquals(
DummyJobManager.get_status_code(job2_id),
jobs.STATUS_CODE_CANCELED)
self.assertIsNone(DummyJobManager.get_output(job1_id))
self.assertIsNone(DummyJobManager.get_output(job2_id))
self.assertEquals(
'Canceled by admin_user_id', DummyJobManager.get_error(job1_id))
self.assertEquals(
'Canceled by admin_user_id', DummyJobManager.get_error(job2_id))
def test_cancelling_one_unfinished_job(self):
job1_id = DummyJobManager.create_new()
DummyJobManager.enqueue(job1_id)
job2_id = DummyJobManager.create_new()
DummyJobManager.enqueue(job2_id)
DummyJobManager.register_start(job1_id)
DummyJobManager.register_start(job2_id)
DummyJobManager.cancel(job1_id, 'admin_user_id')
with self.assertRaisesRegexp(Exception, 'Invalid status code change'):
self.process_and_flush_pending_tasks()
DummyJobManager.register_completion(job2_id, 'output')
self.assertFalse(DummyJobManager.is_active(job1_id))
self.assertFalse(DummyJobManager.is_active(job2_id))
self.assertEquals(
DummyJobManager.get_status_code(job1_id),
jobs.STATUS_CODE_CANCELED)
self.assertEquals(
DummyJobManager.get_status_code(job2_id),
jobs.STATUS_CODE_COMPLETED)
self.assertIsNone(DummyJobManager.get_output(job1_id))
self.assertEquals(DummyJobManager.get_output(job2_id), 'output')
self.assertEquals(
'Canceled by admin_user_id', DummyJobManager.get_error(job1_id))
self.assertIsNone(DummyJobManager.get_error(job2_id))
TEST_INPUT_DATA = [(1, 2), (3, 4), (1, 5)]
SUM_MODEL_ID = 'all_data_id'
class NumbersModel(ndb.Model):
number = ndb.IntegerProperty()
class SumModel(ndb.Model):
total = ndb.IntegerProperty(default=0)
failed = ndb.BooleanProperty(default=False)
class TestDeferredJobManager(jobs.BaseDeferredJobManager):
"""Base class for testing deferred jobs."""
pass
class TestAdditionJobManager(TestDeferredJobManager):
"""Test job that sums all NumbersModel data.
The result is stored in a SumModel entity with id SUM_MODEL_ID.
"""
@classmethod
def _run(cls):
total = sum([
numbers_model.number for numbers_model in NumbersModel.query()])
SumModel(id=SUM_MODEL_ID, total=total).put()
class FailingAdditionJobManager(TestDeferredJobManager):
"""Test job that stores stuff in SumModel and then fails."""
IS_VALID_JOB_CLASS = True
@classmethod
def _run(cls):
total = sum([
numbers_model.number for numbers_model in NumbersModel.query()])
SumModel(id=SUM_MODEL_ID, total=total).put()
raise Exception('Oops, I failed.')
@classmethod
def _post_failure_hook(cls, job_id):
model = SumModel.get_by_id(SUM_MODEL_ID)
model.failed = True
model.put()
class DatastoreJobIntegrationTests(test_utils.GenericTestBase):
"""Tests the behavior of a job that affects data in the datastore.
This job gets all NumbersModel instances and sums their values, and puts
the summed values in a SumModel instance with id SUM_MODEL_ID. The
computation is redone from scratch each time the job is run.
"""
def _get_stored_total(self):
sum_model = SumModel.get_by_id(SUM_MODEL_ID)
return sum_model.total if sum_model else 0
def _populate_data(self):
"""Populate the datastore with four NumbersModel instances."""
NumbersModel(number=1).put()
NumbersModel(number=2).put()
NumbersModel(number=1).put()
NumbersModel(number=2).put()
def test_sequential_jobs(self):
self._populate_data()
self.assertEqual(self._get_stored_total(), 0)
TestAdditionJobManager.enqueue(
TestAdditionJobManager.create_new())
self.assertEqual(self.count_jobs_in_taskqueue(), 1)
self.process_and_flush_pending_tasks()
self.assertEqual(self._get_stored_total(), 6)
NumbersModel(number=3).put()
TestAdditionJobManager.enqueue(
TestAdditionJobManager.create_new())
self.assertEqual(self.count_jobs_in_taskqueue(), 1)
self.process_and_flush_pending_tasks()
self.assertEqual(self._get_stored_total(), 9)
def test_multiple_enqueued_jobs(self):
self._populate_data()
TestAdditionJobManager.enqueue(
TestAdditionJobManager.create_new())
NumbersModel(number=3).put()
TestAdditionJobManager.enqueue(
TestAdditionJobManager.create_new())
self.assertEqual(self.count_jobs_in_taskqueue(), 2)
self.process_and_flush_pending_tasks()
self.assertEqual(self._get_stored_total(), 9)
def test_failing_job(self):
self._populate_data()
job_id = FailingAdditionJobManager.create_new()
FailingAdditionJobManager.enqueue(job_id)
self.assertEqual(self.count_jobs_in_taskqueue(), 1)
with self.assertRaisesRegexp(
taskqueue_services.PermanentTaskFailure, 'Oops, I failed'):
self.process_and_flush_pending_tasks()
# The work that the failing job did before it failed is still done.
self.assertEqual(self._get_stored_total(), 6)
# The post-failure hook should have run.
self.assertTrue(SumModel.get_by_id(SUM_MODEL_ID).failed)
self.assertTrue(
FailingAdditionJobManager.get_status_code(job_id),
jobs.STATUS_CODE_FAILED)
class SampleMapReduceJobManager(jobs.BaseMapReduceJobManager):
"""Test job that counts the total number of explorations."""
@classmethod
def entity_classes_to_map_over(cls):
return [exp_models.ExplorationModel]
@staticmethod
def map(item):
yield ('sum', 1)
@staticmethod
def reduce(key, values):
yield (key, sum([int(value) for value in values]))
class MapReduceJobIntegrationTests(test_utils.GenericTestBase):
"""Tests MapReduce jobs end-to-end."""
def setUp(self):
"""Create an exploration so that there is something to count."""
super(MapReduceJobIntegrationTests, self).setUp()
exploration = exp_domain.Exploration.create_default_exploration(
'exp_id', 'title', 'A category')
exp_services.save_new_exploration('owner_id', exploration)
self.process_and_flush_pending_tasks()
def test_count_all_explorations(self):
job_id = SampleMapReduceJobManager.create_new()
SampleMapReduceJobManager.enqueue(job_id)
self.assertEqual(self.count_jobs_in_taskqueue(), 1)
self.process_and_flush_pending_tasks()
self.assertEqual(
SampleMapReduceJobManager.get_output(job_id), [['sum', 1]])
self.assertEqual(
SampleMapReduceJobManager.get_status_code(job_id),
jobs.STATUS_CODE_COMPLETED)
class JobRegistryTests(test_utils.GenericTestBase):
"""Tests job registry."""
def test_each_one_off_class_is_subclass_of_BaseJobManager(self):
for klass in jobs_registry.ONE_OFF_JOB_MANAGERS:
self.assertTrue(issubclass(klass, jobs.BaseJobManager))
def test_each_one_off_class_is_not_abstract(self):
for klass in jobs_registry.ONE_OFF_JOB_MANAGERS:
self.assertFalse(klass._is_abstract())
def test_validity_of_each_continuous_computation_class(self):
for klass in jobs_registry.ALL_CONTINUOUS_COMPUTATION_MANAGERS:
self.assertTrue(
issubclass(klass, jobs.BaseContinuousComputationManager))
event_types_listened_to = klass.get_event_types_listened_to()
self.assertTrue(isinstance(event_types_listened_to, list))
for event_type in event_types_listened_to:
self.assertTrue(isinstance(event_type, basestring))
self.assertTrue(issubclass(
event_services.Registry.get_event_class_by_type(
event_type),
event_services.BaseEventHandler))
rdc = klass._get_realtime_datastore_class()
self.assertTrue(issubclass(
rdc, jobs.BaseRealtimeDatastoreClassForContinuousComputations))
# The list of allowed base classes. This can be extended as the
# need arises, though we may also want to implement
# _get_continuous_computation_class() and
# _entity_created_before_job_queued() for other base classes
# that are added to this list.
ALLOWED_BASE_BATCH_JOB_CLASSES = [
jobs.BaseMapReduceJobManagerForContinuousComputations]
self.assertTrue(any([
issubclass(klass._get_batch_job_manager_class(), superclass)
for superclass in ALLOWED_BASE_BATCH_JOB_CLASSES]))
class JobQueriesTests(test_utils.GenericTestBase):
"""Tests queries for jobs."""
def test_get_data_for_recent_jobs(self):
self.assertEqual(jobs.get_data_for_recent_jobs(), [])
job_id = DummyJobManager.create_new()
DummyJobManager.enqueue(job_id)
recent_jobs = jobs.get_data_for_recent_jobs()
self.assertEqual(len(recent_jobs), 1)
self.assertDictContainsSubset({
'id': job_id,
'status_code': jobs.STATUS_CODE_QUEUED,
'job_type': 'DummyJobManager',
'is_cancelable': True,
'error': None
}, recent_jobs[0])
class TwoClassesMapReduceJobManager(jobs.BaseMapReduceJobManager):
"""A test job handler that counts entities in two datastore classes."""
@classmethod
def entity_classes_to_map_over(cls):
return [exp_models.ExplorationModel, exp_models.ExplorationRightsModel]
@staticmethod
def map(item):
yield ('sum', 1)
@staticmethod
def reduce(key, values):
yield (key, sum([int(value) for value in values]))
class TwoClassesMapReduceJobIntegrationTests(test_utils.GenericTestBase):
"""Tests MapReduce jobs using two classes end-to-end."""
def setUp(self):
"""Create an exploration so that there is something to count."""
super(TwoClassesMapReduceJobIntegrationTests, self).setUp()
exploration = exp_domain.Exploration.create_default_exploration(
'exp_id', 'title', 'A category')
# Note that this ends up creating an entry in the
# ExplorationRightsModel as well.
exp_services.save_new_exploration('owner_id', exploration)
self.process_and_flush_pending_tasks()
def test_count_entities(self):
self.assertEqual(exp_models.ExplorationModel.query().count(), 1)
self.assertEqual(exp_models.ExplorationRightsModel.query().count(), 1)
job_id = TwoClassesMapReduceJobManager.create_new()
TwoClassesMapReduceJobManager.enqueue(job_id)
self.assertEqual(self.count_jobs_in_taskqueue(), 1)
self.process_and_flush_pending_tasks()
self.assertEqual(
TwoClassesMapReduceJobManager.get_output(job_id), [['sum', 2]])
self.assertEqual(
TwoClassesMapReduceJobManager.get_status_code(job_id),
jobs.STATUS_CODE_COMPLETED)
class StartExplorationRealtimeModel(
jobs.BaseRealtimeDatastoreClassForContinuousComputations):
count = ndb.IntegerProperty(default=0)
class StartExplorationMRJobManager(
jobs.BaseMapReduceJobManagerForContinuousComputations):
@classmethod
def _get_continuous_computation_class(cls):
return StartExplorationEventCounter
@classmethod
def entity_classes_to_map_over(cls):
return [stats_models.StartExplorationEventLogEntryModel]
@staticmethod
def map(item):
current_class = StartExplorationMRJobManager
if current_class._entity_created_before_job_queued(item):
yield (item.exploration_id, {
'event_type': item.event_type,
})
@staticmethod
def reduce(key, stringified_values):
started_count = 0
for value_str in stringified_values:
value = ast.literal_eval(value_str)
if value['event_type'] == feconf.EVENT_TYPE_START_EXPLORATION:
started_count += 1
stats_models.ExplorationAnnotationsModel(
id=key, num_starts=started_count).put()
class StartExplorationEventCounter(jobs.BaseContinuousComputationManager):
"""A continuous-computation job that counts 'start exploration' events.
This class should only be used in tests.
"""
@classmethod
def get_event_types_listened_to(cls):
return [feconf.EVENT_TYPE_START_EXPLORATION]
@classmethod
def _get_realtime_datastore_class(cls):
return StartExplorationRealtimeModel
@classmethod
def _get_batch_job_manager_class(cls):
return StartExplorationMRJobManager
@classmethod
def _kickoff_batch_job_after_previous_one_ends(cls):
"""Override this method so that it does not immediately start a
new MapReduce job. Non-test subclasses should not do this."""
pass
@classmethod
def _handle_incoming_event(
cls, active_realtime_layer, event_type, exp_id, exp_version,
state_name, session_id, params, play_type):
def _increment_counter():
realtime_class = cls._get_realtime_datastore_class()
realtime_model_id = realtime_class.get_realtime_id(
active_realtime_layer, exp_id)
realtime_model = realtime_class.get(
realtime_model_id, strict=False)
if realtime_model is None:
realtime_class(
id=realtime_model_id, count=1,
realtime_layer=active_realtime_layer).put()
else:
realtime_model.count += 1
realtime_model.put()
transaction_services.run_in_transaction(_increment_counter)
# Public query method.
@classmethod
def get_count(cls, exploration_id):
"""Return the number of 'start exploration' events received.
Answers the query by combining the existing MR job output and the
active realtime_datastore_class.
"""
mr_model = stats_models.ExplorationAnnotationsModel.get(
exploration_id, strict=False)
realtime_model = cls._get_realtime_datastore_class().get(
cls.get_active_realtime_layer_id(exploration_id), strict=False)
answer = 0
if mr_model is not None:
answer += mr_model.num_starts
if realtime_model is not None:
answer += realtime_model.count
return answer
class ContinuousComputationTests(test_utils.GenericTestBase):
"""Tests continuous computations for 'start exploration' events."""
EXP_ID = 'exp_id'
ALL_CONTINUOUS_COMPUTATION_MANAGERS_FOR_TESTS = [
StartExplorationEventCounter]
def setUp(self):
"""Create an exploration and register the event listener manually."""
super(ContinuousComputationTests, self).setUp()
exploration = exp_domain.Exploration.create_default_exploration(
self.EXP_ID, 'title', 'A category')
exp_services.save_new_exploration('owner_id', exploration)
self.process_and_flush_pending_tasks()
def test_continuous_computation_workflow(self):
"""An integration test for continuous computations."""
with self.swap(
jobs_registry, 'ALL_CONTINUOUS_COMPUTATION_MANAGERS',
self.ALL_CONTINUOUS_COMPUTATION_MANAGERS_FOR_TESTS):
self.assertEqual(
StartExplorationEventCounter.get_count(self.EXP_ID), 0)
# Record an event. This will put the event in the task queue.
event_services.StartExplorationEventHandler.record(
self.EXP_ID, 1, feconf.DEFAULT_INIT_STATE_NAME, 'session_id', {},
feconf.PLAY_TYPE_NORMAL)
self.assertEqual(
StartExplorationEventCounter.get_count(self.EXP_ID), 0)
self.assertEqual(self.count_jobs_in_taskqueue(), 1)
# When the task queue is flushed, the data is recorded in the two
# realtime layers.
self.process_and_flush_pending_tasks()
self.assertEqual(self.count_jobs_in_taskqueue(), 0)
self.assertEqual(
StartExplorationEventCounter.get_count(self.EXP_ID), 1)
self.assertEqual(StartExplorationRealtimeModel.get(
'0:%s' % self.EXP_ID).count, 1)
self.assertEqual(StartExplorationRealtimeModel.get(
'1:%s' % self.EXP_ID).count, 1)
# The batch job has not run yet, so no entity for self.EXP_ID will
# have been created in the batch model yet.
with self.assertRaises(base_models.BaseModel.EntityNotFoundError):
stats_models.ExplorationAnnotationsModel.get(self.EXP_ID)
# Launch the batch computation.
StartExplorationEventCounter.start_computation()
# Data in realtime layer 0 is still there.
self.assertEqual(StartExplorationRealtimeModel.get(
'0:%s' % self.EXP_ID).count, 1)
# Data in realtime layer 1 has been deleted.
self.assertIsNone(StartExplorationRealtimeModel.get(
'1:%s' % self.EXP_ID, strict=False))
self.assertEqual(self.count_jobs_in_taskqueue(), 1)
self.process_and_flush_pending_tasks()
self.assertEqual(
stats_models.ExplorationAnnotationsModel.get(
self.EXP_ID).num_starts, 1)
# The overall count is still 1.
self.assertEqual(
StartExplorationEventCounter.get_count(self.EXP_ID), 1)
# Data in realtime layer 0 has been deleted.
self.assertIsNone(StartExplorationRealtimeModel.get(
'0:%s' % self.EXP_ID, strict=False))
# Data in realtime layer 1 has been deleted.
self.assertIsNone(StartExplorationRealtimeModel.get(
'1:%s' % self.EXP_ID, strict=False))
def test_events_coming_in_while_batch_job_is_running(self):
with self.swap(
jobs_registry, 'ALL_CONTINUOUS_COMPUTATION_MANAGERS',
self.ALL_CONTINUOUS_COMPUTATION_MANAGERS_FOR_TESTS):
# Currently no events have been recorded.
self.assertEqual(
StartExplorationEventCounter.get_count(self.EXP_ID), 0)
# Enqueue the batch computation. (It is running on 0 events.)
StartExplorationEventCounter._kickoff_batch_job()
# Record an event while this job is in the queue. Simulate
# this by directly calling on_incoming_event(), because using
# StartExplorationEventHandler.record() would just put the event
# in the task queue, which we don't want to flush yet.
event_services.StartExplorationEventHandler._handle_event(
self.EXP_ID, 1, feconf.DEFAULT_INIT_STATE_NAME, 'session_id', {},
feconf.PLAY_TYPE_NORMAL)
StartExplorationEventCounter.on_incoming_event(
event_services.StartExplorationEventHandler.EVENT_TYPE,
self.EXP_ID, 1, feconf.DEFAULT_INIT_STATE_NAME, 'session_id', {},
feconf.PLAY_TYPE_NORMAL)
# The overall count is now 1.
self.assertEqual(
StartExplorationEventCounter.get_count(self.EXP_ID), 1)
# Finish the job.
self.process_and_flush_pending_tasks()
# When the batch job completes, the overall count is still 1.
self.assertEqual(
StartExplorationEventCounter.get_count(self.EXP_ID), 1)
# The batch job result should still be 0, since the event arrived
# after the batch job started.
with self.assertRaises(base_models.BaseModel.EntityNotFoundError):
stats_models.ExplorationAnnotationsModel.get(self.EXP_ID)
# TODO(sll): When we have some concrete ContinuousComputations running in
# production, add an integration test to ensure that the registration of event
# handlers in the main codebase is happening correctly.
| apache-2.0 |
vladrulez/old_scripts | crtfile_check.py | 1 | 2457 | #!/usr/bin/env python
# vim: set filetype=python ts=4 sw=4 et si
# -*- coding: utf-8 -*-
# Author: Vladimir Blokhin
###########################
import sys,os,re,tempfile,subprocess
def run_command(command):
p = subprocess.Popen(command,stdout=subprocess.PIPE, shell=True)
return p.communicate()[0]
def check_cert_openssl(cert_file,ca_file):
global fail
command = 'openssl verify -verbose -untrusted ' + str(ca_file) + ' ' + str(cert_file)
# print command
good_result = str(cert_file) + ': OK\n'
result = run_command(command)
cert_info = run_command('openssl x509 -noout -subject -issuer -in '+ str(cert_file))
if not result == good_result:
print "CA authority certificate check failed for the following cert:\n %s, error is \n%s" % (cert_info,result)
fail = True
def check_crtfile(f):
global fail
tempfiles = []
print "%s - checking ..." % f
current_file = open(f,'r')
filetext = current_file.read()
current_file.close()
for output in re.findall(r"(-+BEGIN CERTIFICATE-+.*?-+END CERTIFICATE-+)", filetext, re.DOTALL):
tf = tempfile.NamedTemporaryFile(delete=False)
tempfiles.append(tf.name)
tf.write(output)
# print tf.name
tf.close()
if len(tempfiles) < 2:
print "couldn't find more than one SSL certificate in %s" % f
return
for i in range(len(tempfiles)-1):
check_cert_openssl(tempfiles[i],tempfiles[i+1])
if fail:
print "%s - CHECK FAILED!" % f
else:
print "%s - CA authority check complete, all ok" % f
for f in tempfiles:
tf = os.remove(f)
if __name__ == "__main__":
if len(sys.argv) < 2 :
print "\nMissing parameters, exiting..."
print "Usage: "+sys.argv[0]+" crt filename or path to a folder with crt files\n"
sys.exit(1)
if not os.path.exists(sys.argv[1]) :
print "\n %s is not file or directory, exiting...\n" % sys.argv[1]
sys.exit(1)
if os.path.isfile(sys.argv[1]) :
crt_filename = [sys.argv[1]]
if os.path.isdir(sys.argv[1]):
crt_filename = [sys.argv[1]+'/'+f for f in os.listdir(sys.argv[1]) if re.match(r'.*\.crt', f)]
for f in crt_filename:
fail = False
check_crtfile(f) | apache-2.0 |
markcharyk/data-structures | data_structures/b_tree.py | 1 | 5000 | from data_structures.stack import Stack
class Node(object):
def __init__(self, key=None, val=None, capacity=3):
self.count = 0
if key is not None:
self.count = 1
self.elems = [(key, val)]
for i in xrange(capacity-1):
self.elems.append((None, None))
self.children = [None] * (capacity+1)
def __repr__(self):
"""For printing out the nodes
It's here to save me typing during debugging"""
result = "["
for i in range(len(self.elems)):
result += '%s, ' % str(self.elems[i])
return '%s]' % result
def add_to_node(self, key, val):
for i in range(len(self.elems)):
if self.elems[i][0] is None or self.elems[i][0] > key:
self.elems.pop()
self.elems.insert(i, (key, val))
self.count += 1
break
def del_from_node(self, idx):
self.elems.pop(idx)
self.count -= 1
self.elems.append((None, None))
def has(self, key):
for i in range(len(self.elems)):
if self.elems[i][0] == key:
return True
return False
def sort_children(self):
self.children.sort(key=lambda nod: nod.elems[0][0] if nod else None)
while self.children[0] is None:
self.children.pop(0)
self.children.append(None)
class BTree(object):
def __init__(self, degree=2):
self.root = Node()
self.stack = Stack()
if degree < 2:
raise InvalidDegreeError
self.degree = degree
def __repr__(self):
"""For printing out the tree and its nodes
It's here to save me typing during debugging"""
result = ''
for i in self._bft():
result += i
return result
def _bft(self):
import queue
keeper = queue.Queue()
keeper.enqueue(self.root)
while keeper.size() > 0:
temp = keeper.dequeue()
yield str(temp)
if temp is not '\n' and temp.children[0]:
keeper.enqueue('\n')
for nod in temp.children:
if nod is not None:
keeper.enqueue(nod)
def search(self, key):
"""Returns the value of the searched-for key"""
nod, idx = self._recursive_search(self.root, key)
return nod.elems[idx][1]
def _recursive_search(self, node, key):
"""Searches the subtree for a specific key and returns
where to find it if it is found
If it is not found, raises a custom error"""
# The index of the node in which the key is found
idx = 0
while idx <= node.count - 1 and key > node.elems[idx][0]:
# Look to the next key in the node
idx += 1
if idx <= node.count - 1 and key == node.elems[idx][0]:
# Found the key in the node
return node, idx
if not node.children[0]:
raise MissingError
else:
# Look to the appropriate child
return self._recursive_search(node.children[idx], key)
def insert(self, key, val):
"""Inserts a key-value pair into the tree"""
self._recursive_insert(self.root, key, val)
def _split_child(self, parent, child):
new = Node()
for i in xrange(self.degree-1):
new.add_to_node(*child.elems[i+self.degree])
child.del_from_node(i+self.degree)
parent.add_to_node(*child.elems[self.degree-1])
child.del_from_node(self.degree-1)
if child.children[0]:
for i in xrange(self.degree):
new.children[i], child.children[i+self.degree] = \
child.children[i+self.degree], None
child.sort_children
parent.children[2*self.degree-1] = new
parent.sort_children()
if parent.count == 2 * self.degree - 1:
self._split_child(self.stack.pop().val, parent)
def _recursive_insert(self, node, key, val):
if not node.children[0]:
node.add_to_node(key, val)
if node.count == 2 * self.degree - 1:
if node is self.root:
new = Node()
new.children[0], self.root = self.root, new
self.stack.push(new)
self._split_child(self.stack.pop().val, node)
else:
self.stack.push(node)
idx = node.count - 1
while idx >= 0 and key < node.elems[idx][0]:
idx -= 1
self._recursive_insert(node.children[idx+1], key, val)
def delete(self, key):
self._recursive_delete(self.root, key)
def _recursive_delete(self, node, key):
pass
def _move_key(self, key, src, dest):
pass
def _merge_nodes(self, node1, node2):
pass
class InvalidDegreeError(BaseException):
pass
class MissingError(BaseException):
pass
| mit |
daniel-leschkowski/generateDSv2 | tests/attr_groups1_sub.py | 2 | 5005 | #!/usr/bin/env python
#
# Generated by generateDS.py.
#
import sys
import attr_groups2_sup as supermod
etree_ = None
Verbose_import_ = False
( XMLParser_import_none, XMLParser_import_lxml,
XMLParser_import_elementtree
) = range(3)
XMLParser_import_library = None
try:
# lxml
from lxml import etree as etree_
XMLParser_import_library = XMLParser_import_lxml
if Verbose_import_:
print("running with lxml.etree")
except ImportError:
try:
# cElementTree from Python 2.5+
import xml.etree.cElementTree as etree_
XMLParser_import_library = XMLParser_import_elementtree
if Verbose_import_:
print("running with cElementTree on Python 2.5+")
except ImportError:
try:
# ElementTree from Python 2.5+
import xml.etree.ElementTree as etree_
XMLParser_import_library = XMLParser_import_elementtree
if Verbose_import_:
print("running with ElementTree on Python 2.5+")
except ImportError:
try:
# normal cElementTree install
import cElementTree as etree_
XMLParser_import_library = XMLParser_import_elementtree
if Verbose_import_:
print("running with cElementTree")
except ImportError:
try:
# normal ElementTree install
import elementtree.ElementTree as etree_
XMLParser_import_library = XMLParser_import_elementtree
if Verbose_import_:
print("running with ElementTree")
except ImportError:
raise ImportError(
"Failed to import ElementTree from any known place")
def parsexml_(*args, **kwargs):
if (XMLParser_import_library == XMLParser_import_lxml and
'parser' not in kwargs):
# Use the lxml ElementTree compatible parser so that, e.g.,
# we ignore comments.
kwargs['parser'] = etree_.ETCompatXMLParser()
doc = etree_.parse(*args, **kwargs)
return doc
#
# Globals
#
ExternalEncoding = 'ascii'
#
# Data representation classes
#
class GetUserReqSub(supermod.GetUserReq):
def __init__(self, value04=None, value05=None, value06=None, value07=None, value01=None, value02=None, value03=None, sequence=None, returnedTags=None):
super(GetUserReqSub, self).__init__(value04, value05, value06, value07, value01, value02, value03, sequence, returnedTags, )
supermod.GetUserReq.subclass = GetUserReqSub
# end class GetUserReqSub
def get_root_tag(node):
tag = supermod.Tag_pattern_.match(node.tag).groups()[-1]
rootClass = None
rootClass = supermod.GDSClassesMapping.get(tag)
if rootClass is None and hasattr(supermod, tag):
rootClass = getattr(supermod, tag)
return tag, rootClass
def parse(inFilename):
doc = parsexml_(inFilename)
rootNode = doc.getroot()
rootTag, rootClass = get_root_tag(rootNode)
if rootClass is None:
rootTag = 'getUser'
rootClass = supermod.GetUserReq
rootObj = rootClass.factory()
rootObj.build(rootNode)
# Enable Python to collect the space used by the DOM.
doc = None
## sys.stdout.write('<?xml version="1.0" ?>\n')
## rootObj.export(sys.stdout, 0, name_=rootTag,
## namespacedef_='',
## pretty_print=True)
doc = None
return rootObj
def parseString(inString):
from StringIO import StringIO
doc = parsexml_(StringIO(inString))
rootNode = doc.getroot()
rootTag, rootClass = get_root_tag(rootNode)
if rootClass is None:
rootTag = 'getUser'
rootClass = supermod.GetUserReq
rootObj = rootClass.factory()
rootObj.build(rootNode)
# Enable Python to collect the space used by the DOM.
doc = None
## sys.stdout.write('<?xml version="1.0" ?>\n')
## rootObj.export(sys.stdout, 0, name_=rootTag,
## namespacedef_='')
return rootObj
def parseLiteral(inFilename):
doc = parsexml_(inFilename)
rootNode = doc.getroot()
rootTag, rootClass = get_root_tag(rootNode)
if rootClass is None:
rootTag = 'getUser'
rootClass = supermod.GetUserReq
rootObj = rootClass.factory()
rootObj.build(rootNode)
# Enable Python to collect the space used by the DOM.
doc = None
## sys.stdout.write('#from attr_groups2_sup import *\n\n')
## sys.stdout.write('import attr_groups2_sup as model_\n\n')
## sys.stdout.write('rootObj = model_.getUser(\n')
## rootObj.exportLiteral(sys.stdout, 0, name_="getUser")
## sys.stdout.write(')\n')
return rootObj
USAGE_TEXT = """
Usage: python ???.py <infilename>
"""
def usage():
print USAGE_TEXT
sys.exit(1)
def main():
args = sys.argv[1:]
if len(args) != 1:
usage()
infilename = args[0]
root = parse(infilename)
if __name__ == '__main__':
#import pdb; pdb.set_trace()
main()
| mit |
lakshmi-kannan/st2 | st2tests/st2tests/http.py | 13 | 1095 | # Licensed to the StackStorm, Inc ('StackStorm') under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
class FakeResponse(object):
def __init__(self, text, status_code, reason):
self.text = text
self.status_code = status_code
self.reason = reason
def json(self):
return json.loads(self.text)
def raise_for_status(self):
raise Exception(self.reason)
| apache-2.0 |
tbabej/freeipa | ipapython/dnssec/bindmgr.py | 2 | 8625 | #
# Copyright (C) 2014 FreeIPA Contributors see COPYING for license
#
from datetime import datetime
import dns.name
import errno
import os
import shutil
import stat
import ipalib.constants
from ipapython.dn import DN
from ipapython import ipa_log_manager, ipautil
from ipaplatform.paths import paths
from ipapython.dnssec.temp import TemporaryDirectory
time_bindfmt = '%Y%m%d%H%M%S'
# this daemon should run under ods:named user:group
# user has to be ods because ODSMgr.py sends signal to ods-enforcerd
FILE_PERM = (stat.S_IRUSR | stat.S_IRGRP | stat.S_IWGRP | stat.S_IWUSR)
DIR_PERM = (stat.S_IRWXU | stat.S_IRWXG)
class BINDMgr(object):
"""BIND key manager. It does LDAP->BIND key files synchronization.
One LDAP object with idnsSecKey object class will produce
single pair of BIND key files.
"""
def __init__(self, api):
self.api = api
self.log = ipa_log_manager.log_mgr.get_logger(self)
self.ldap_keys = {}
self.modified_zones = set()
def notify_zone(self, zone):
cmd = ['rndc', 'sign', zone.to_text()]
result = ipautil.run(cmd, capture_output=True)
self.log.info('%s', result.output_log)
def dn2zone_name(self, dn):
"""cn=KSK-20140813162153Z-cede9e182fc4af76c4bddbc19123a565,cn=keys,idnsname=test,cn=dns,dc=ipa,dc=example"""
# verify that metadata object is under DNS sub-tree
dn = DN(dn)
container = DN(self.api.env.container_dns, self.api.env.basedn)
idx = dn.rfind(container)
assert idx != -1, 'Metadata object %s is not inside %s' % (dn, container)
assert len(dn[idx - 1]) == 1, 'Multi-valued RDN as zone name is not supported'
return dns.name.from_text(dn[idx - 1]['idnsname'])
def time_ldap2bindfmt(self, str_val):
dt = datetime.strptime(str_val, ipalib.constants.LDAP_GENERALIZED_TIME_FORMAT)
return dt.strftime(time_bindfmt)
def dates2params(self, ldap_attrs):
"""Convert LDAP timestamps to list of parameters suitable
for dnssec-keyfromlabel utility"""
attr2param = {'idnsseckeypublish': '-P',
'idnsseckeyactivate': '-A',
'idnsseckeyinactive': '-I',
'idnsseckeydelete': '-D'}
params = []
for attr, param in attr2param.items():
params.append(param)
if attr in ldap_attrs:
assert len(ldap_attrs[attr]) == 1, 'Timestamp %s is expected to be single-valued' % attr
params.append(self.time_ldap2bindfmt(ldap_attrs[attr][0]))
else:
params.append('none')
return params
def ldap_event(self, op, uuid, attrs):
"""Record single LDAP event - key addition, deletion or modification.
Change is only recorded to memory.
self.sync() has to be called to synchronize change to BIND."""
assert op == 'add' or op == 'del' or op == 'mod'
zone = self.dn2zone_name(attrs['dn'])
self.modified_zones.add(zone)
zone_keys = self.ldap_keys.setdefault(zone, {})
if op == 'add':
self.log.info('Key metadata %s added to zone %s' % (attrs['dn'], zone))
zone_keys[uuid] = attrs
elif op == 'del':
self.log.info('Key metadata %s deleted from zone %s' % (attrs['dn'], zone))
zone_keys.pop(uuid)
elif op == 'mod':
self.log.info('Key metadata %s updated in zone %s' % (attrs['dn'], zone))
zone_keys[uuid] = attrs
def install_key(self, zone, uuid, attrs, workdir):
"""Run dnssec-keyfromlabel on given LDAP object.
:returns: base file name of output files, e.g. Kaaa.test.+008+19719"""
self.log.info('attrs: %s', attrs)
assert attrs.get('idnsseckeyzone', ['FALSE'])[0] == 'TRUE', \
'object %s is not a DNS zone key' % attrs['dn']
uri = "%s;pin-source=%s" % (attrs['idnsSecKeyRef'][0], paths.DNSSEC_SOFTHSM_PIN)
cmd = [paths.DNSSEC_KEYFROMLABEL, '-K', workdir, '-a', attrs['idnsSecAlgorithm'][0], '-l', uri]
cmd += self.dates2params(attrs)
if attrs.get('idnsSecKeySep', ['FALSE'])[0].upper() == 'TRUE':
cmd += ['-f', 'KSK']
if attrs.get('idnsSecKeyRevoke', ['FALSE'])[0].upper() == 'TRUE':
cmd += ['-R', datetime.now().strftime(time_bindfmt)]
cmd.append(zone.to_text())
# keys has to be readable by ODS & named
result = ipautil.run(cmd, capture_output=True)
basename = result.output.strip()
private_fn = "%s/%s.private" % (workdir, basename)
os.chmod(private_fn, FILE_PERM)
# this is useful mainly for debugging
with open("%s/%s.uuid" % (workdir, basename), 'w') as uuid_file:
uuid_file.write(uuid)
with open("%s/%s.dn" % (workdir, basename), 'w') as dn_file:
dn_file.write(attrs['dn'])
def get_zone_dir_name(self, zone):
"""Escape zone name to form suitable for file-system.
This method has to be equivalent to zr_get_zone_path()
in bind-dyndb-ldap/zone_register.c."""
if zone == dns.name.root:
return "@"
# strip final (empty) label
zone = zone.relativize(dns.name.root)
escaped = ""
for label in zone:
for char in label:
c = ord(char)
if ((c >= 0x30 and c <= 0x39) or # digit
(c >= 0x41 and c <= 0x5A) or # uppercase
(c >= 0x61 and c <= 0x7A) or # lowercase
c == 0x2D or # hyphen
c == 0x5F): # underscore
if (c >= 0x41 and c <= 0x5A): # downcase
c += 0x20
escaped += chr(c)
else:
escaped += "%%%02X" % c
escaped += '.'
# strip trailing period
return escaped[:-1]
def sync_zone(self, zone):
self.log.info('Synchronizing zone %s' % zone)
zone_path = os.path.join(paths.BIND_LDAP_DNS_ZONE_WORKDIR,
self.get_zone_dir_name(zone))
try:
os.makedirs(zone_path)
except OSError as e:
if e.errno != errno.EEXIST:
raise e
# fix HSM permissions
# TODO: move out
for prefix, dirs, files in os.walk(paths.DNSSEC_TOKENS_DIR, topdown=True):
for name in dirs:
fpath = os.path.join(prefix, name)
self.log.debug('Fixing directory permissions: %s', fpath)
os.chmod(fpath, DIR_PERM | stat.S_ISGID)
for name in files:
fpath = os.path.join(prefix, name)
self.log.debug('Fixing file permissions: %s', fpath)
os.chmod(fpath, FILE_PERM)
# TODO: move out
with TemporaryDirectory(zone_path) as tempdir:
for uuid, attrs in self.ldap_keys[zone].items():
self.install_key(zone, uuid, attrs, tempdir)
# keys were generated in a temporary directory, swap directories
target_dir = "%s/keys" % zone_path
try:
shutil.rmtree(target_dir)
except OSError as e:
if e.errno != errno.ENOENT:
raise e
shutil.move(tempdir, target_dir)
os.chmod(target_dir, DIR_PERM)
self.notify_zone(zone)
def sync(self, dnssec_zones):
"""Synchronize list of zones in LDAP with BIND.
dnssec_zones lists zones which should be processed. All other zones
will be ignored even though they were modified using ldap_event().
This filter is useful in cases where LDAP contains DNS zones which
have old metadata objects and DNSSEC disabled. Such zones must be
ignored to prevent errors while calling dnssec-keyfromlabel or rndc.
"""
self.log.debug('Key metadata in LDAP: %s' % self.ldap_keys)
self.log.debug('Zones modified but skipped during bindmgr.sync: %s',
self.modified_zones - dnssec_zones)
for zone in self.modified_zones.intersection(dnssec_zones):
self.sync_zone(zone)
self.modified_zones = set()
def diff_zl(self, s1, s2):
"""Compute zones present in s1 but not present in s2.
Returns: List of (uuid, name) tuples with zones present only in s1."""
s1_extra = s1.uuids - s2.uuids
removed = [(uuid, name) for (uuid, name) in s1.mapping.items()
if uuid in s1_extra]
return removed
| gpl-3.0 |
Antiun/yelizariev-saas | saas_portal/models/saas_portal.py | 1 | 19270 | # -*- coding: utf-8 -*-
import openerp
from openerp import models, fields, api, SUPERUSER_ID, exceptions
from openerp.addons.saas_utils import connector, database
from openerp import http
from openerp.tools import config, scan_languages
from openerp.tools.translate import _
from openerp.addons.base.res.res_partner import _tz_get
import time
from datetime import datetime, timedelta
from openerp.tools import DEFAULT_SERVER_DATETIME_FORMAT
import urllib2
import simplejson
import werkzeug
import requests
import random
import logging
_logger = logging.getLogger(__name__)
class SaasPortalServer(models.Model):
_name = 'saas_portal.server'
_description = 'SaaS Server'
_rec_name = 'name'
_inherit = ['mail.thread']
_inherits = {'oauth.application': 'oauth_application_id'}
name = fields.Char('Database name')
oauth_application_id = fields.Many2one('oauth.application', 'OAuth Application', required=True, ondelete='cascade')
sequence = fields.Integer('Sequence')
active = fields.Boolean('Active', default=True)
request_scheme = fields.Selection([('http', 'http'), ('https', 'https')], 'Scheme', default='http', required=True)
verify_ssl = fields.Boolean('Verify SSL', default=True, help="verify SSL certificates for HTTPS requests, just like a web browser")
request_port = fields.Integer('Request Port', default=80)
client_ids = fields.One2many('saas_portal.client', 'server_id', string='Clients')
@api.model
def create(self, vals):
self = super(SaasPortalServer, self).create(vals)
self.oauth_application_id._get_access_token(create=True)
return self
@api.one
def _request_params(self, path='/web', scheme=None, port=None, state={}, scope=None, client_id=None):
scheme = scheme or self.request_scheme
port = port or self.request_port
scope = scope or ['userinfo', 'force_login', 'trial', 'skiptheuse']
scope = ' '.join(scope)
client_id = client_id or self.env['saas_portal.client'].generate_client_id()
params = {
'scope': scope,
'state': simplejson.dumps(state),
'redirect_uri': '{scheme}://{saas_server}:{port}{path}'.format(scheme=scheme, port=port, saas_server=self.name, path=path),
'response_type': 'token',
'client_id': client_id,
}
return params
@api.one
def _request(self, **kwargs):
params = self._request_params(**kwargs)[0]
url = '/oauth2/auth?%s' % werkzeug.url_encode(params)
return url
@api.one
def _request_server(self, path=None, scheme=None, port=None, **kwargs):
scheme = scheme or self.request_scheme
port = port or self.request_port
params = self._request_params(**kwargs)[0]
access_token = self.oauth_application_id._get_access_token(create=True)
params.update({
'token_type': 'Bearer',
'access_token': access_token,
'expires_in': 3600,
})
url = '{scheme}://{saas_server}:{port}{path}?{params}'.format(scheme=scheme, saas_server=self.name, port=port, path=path, params=werkzeug.url_encode(params))
return url
@api.multi
def action_redirect_to_server(self):
r = self[0]
url = '{scheme}://{saas_server}:{port}{path}'.format(scheme=r.request_scheme, saas_server=r.name, port=r.request_port, path='/web')
return {
'type': 'ir.actions.act_url',
'target': 'new',
'name': 'Redirection',
'url': url
}
@api.model
def action_sync_server_all(self):
self.search([]).action_sync_server()
@api.one
def action_sync_server(self):
state = {
'd': self.name,
'client_id': self.client_id,
}
url = self._request_server(path='/saas_server/sync_server', state=state, client_id=self.client_id)[0]
res = requests.get(url, verify=(self.request_scheme == 'https' and self.verify_ssl))
if res.ok != True:
msg = """Status Code - %s
Reason - %s
URL - %s
""" % (res.status_code, res.reason, res.url)
raise Warning(msg)
data = simplejson.loads(res.text)
for r in data:
r['server_id'] = self.id
client = self.env['saas_portal.client'].search([
('client_id', '=', r.get('client_id')),
])
if not client:
database = self.env['saas_portal.database'].search([('client_id', '=', r.get('client_id'))])
if database:
database.write(r)
continue
client = self.env['saas_portal.client'].create(r)
else:
client.write(r)
return None
@api.model
def get_saas_server(self):
saas_server_list = self.env['saas_portal.server'].sudo().search([])
return saas_server_list[random.randint(0, len(saas_server_list) - 1)]
class SaasPortalPlan(models.Model):
_name = 'saas_portal.plan'
name = fields.Char('Plan', required=True)
summary = fields.Char('Summary')
template_id = fields.Many2one('saas_portal.database', 'Template')
demo = fields.Boolean('Install Demo Data')
def _get_default_lang(self):
return self.env.lang
def _default_tz(self):
return self.env.user.tz
lang = fields.Selection(scan_languages(), 'Language', default=_get_default_lang)
tz = fields.Selection(_tz_get, 'TimeZone', default=_default_tz)
sequence = fields.Integer('Sequence')
state = fields.Selection([('draft', 'Draft'), ('confirmed', 'Confirmed')],
'State', compute='_get_state', store=True)
expiration = fields.Integer('Expiration (hours)', help='time to delete database. Use for demo')
_order = 'sequence'
dbname_template = fields.Char('DB Names', help='Template for db name. Use %i for numbering. Ignore if you use manually created db names', placeholder='crm-%i.odoo.com')
server_id = fields.Many2one('saas_portal.server', string='SaaS Server',
help='User this saas server or choose random')
website_description = fields.Text('Website description')
logo = fields.Binary('Logo')
@api.one
@api.depends('template_id.state')
def _get_state(self):
if self.template_id.state == 'template':
self.state = 'confirmed'
else:
self.state = 'draft'
@api.one
def _new_database_vals(self, vals):
if self.expiration:
now = datetime.now()
delta = timedelta(hours=self.expiration)
vals['expiration_datetime'] = (now + delta).strftime(DEFAULT_SERVER_DATETIME_FORMAT)
return vals
@api.multi
def create_new_database(self, dbname=None, client_id=None, partner_id=None, user_id=None):
self.ensure_one()
server = self.server_id
if not server:
server = self.env['saas_portal.server'].get_saas_server()
server.action_sync_server()
vals = {'name': dbname or self.generate_dbname()[0],
'server_id': server.id,
'plan_id': self.id,
'partner_id': partner_id,
}
client = None
if client_id:
vals['client_id'] = client_id
client = self.env['saas_portal.client'].search([('client_id', '=', client_id)])
vals = self._new_database_vals(vals)[0]
if client:
client.write(vals)
else:
client = self.env['saas_portal.client'].create(vals)
client_id = client.client_id
scheme = server.request_scheme
port = server.request_port
if user_id:
owner_user = self.env['res.users'].browse(user_id)
else:
owner_user = self.env.user
owner_user_data = {
'user_id': owner_user.id,
'login': owner_user.login,
'name': owner_user.name,
'email': owner_user.email,
}
state = {
'd': client.name,
'e': client.expiration_datetime,
'r': '%s://%s:%s/web' % (scheme, client.name, port),
'owner_user': owner_user_data,
}
if self.template_id:
state.update({'db_template': self.template_id.name})
scope = ['userinfo', 'force_login', 'trial', 'skiptheuse']
url = server._request_server(path='/saas_server/new_database',
scheme=scheme,
port=port,
state=state,
client_id=client_id,
scope=scope,)[0]
res = requests.get(url, verify=(self.server_id.request_scheme == 'https' and self.server_id.verify_ssl))
if res.status_code != 200:
# TODO /saas_server/new_database show more details here
raise exceptions.Warning('Error %s' % res.status_code)
data = simplejson.loads(res.text)
params = {
'state': data.get('state'),
'access_token': client.oauth_application_id._get_access_token(user_id, create=True),
}
url = '{url}?{params}'.format(url=data.get('url'), params=werkzeug.url_encode(params))
return {'url': url, 'id': client.id, 'client_id': client_id}
@api.one
def generate_dbname(self, raise_error=True):
if not self.dbname_template:
if raise_error:
raise exceptions.Warning(_('Template for db name is not configured'))
return ''
sequence = self.env['ir.sequence'].get('saas_portal.plan')
return self.dbname_template.replace('%i', sequence)
@api.multi
def create_template(self):
assert len(self)==1, 'This method is applied only for single record'
# TODO use create_new_database function
plan = self[0]
state = {
'd': plan.template_id.name,
'demo': plan.demo and 1 or 0,
'addons': [],
'lang': plan.lang,
'tz': plan.tz,
'is_template_db': 1,
}
client_id = plan.template_id.client_id
plan.template_id.server_id = plan.server_id
params = plan.server_id._request_params(path='/saas_server/new_database', state=state, client_id=client_id)[0]
access_token = plan.template_id.oauth_application_id._get_access_token(create=True)
params.update({
'token_type': 'Bearer',
'access_token': access_token,
'expires_in': 3600,
})
url = '{scheme}://{saas_server}:{port}{path}?{params}'.format(scheme=plan.server_id.request_scheme,
saas_server=plan.server_id.name,
port=plan.server_id.request_port,
path='/saas_server/new_database',
params=werkzeug.url_encode(params))
res = requests.get(url, verify=(plan.server_id.request_scheme == 'https' and plan.server_id.verify_ssl))
if res.ok != True:
msg = """Status Code - %s
Reason - %s
URL - %s
""" % (res.status_code, res.reason, res.url)
raise Warning(msg)
return self.action_sync_server()
@api.one
def action_sync_server(self):
self.server_id.action_sync_server()
@api.multi
def edit_template(self):
return self[0].template_id.edit_database()
@api.multi
def upgrade_template(self):
return self[0].template_id.upgrade_database()
@api.multi
def delete_template(self):
res = self[0].template_id.delete_database()
return res
class OauthApplication(models.Model):
_inherit = 'oauth.application'
client_id = fields.Char('Database UUID')
last_connection = fields.Char(compute='_get_last_connection',
string='Last Connection', size=64)
@api.one
def _get_last_connection(self):
oat = self.pool.get('oauth.access_token')
to_search = [('application_id', '=', self.id)]
access_token_ids = oat.search(self.env.cr, self.env.uid, to_search)
if access_token_ids:
access_token = oat.browse(self.env.cr, self.env.uid,
access_token_ids[0])
self.last_connection = access_token.user_id.login_date
class SaasPortalDatabase(models.Model):
_name = 'saas_portal.database'
_inherits = {'oauth.application': 'oauth_application_id'}
name = fields.Char('Database name', readonly=False)
oauth_application_id = fields.Many2one('oauth.application', 'OAuth Application', required=True, ondelete='cascade')
server_id = fields.Many2one('saas_portal.server', string='Server', readonly=True)
state = fields.Selection([('draft','New'),
('open','In Progress'),
('cancelled', 'Cancelled'),
('pending','Pending'),
('deleted','Deleted'),
('template','Template'),
],
'State', default='draft', track_visibility='onchange')
@api.one
def action_sync_server(self):
self.server_id.action_sync_server()
@api.model
def _proceed_url(self, url):
return {
'type': 'ir.actions.act_url',
'target': 'new',
'name': 'Redirection',
'url': url
}
@api.multi
def _request(self, path):
r = self[0]
state = {
'd': r.name,
'client_id': r.client_id,
}
url = r.server_id._request(path=path, state=state, client_id=r.client_id)
return self._proceed_url(url)
@api.multi
def edit_database(self):
return self._request('/saas_server/edit_database')
@api.multi
def delete_database(self):
return self._request('/saas_server/delete_database')
@api.one
def delete_database_server(self, **kwargs):
return self._delete_database_server(**kwargs)
@api.one
def _delete_database_server(self, force_delete=False):
state = {
'd': self.name,
'client_id': self.client_id,
}
if force_delete:
state['force_delete'] = 1
url = self.server_id._request_server(path='/saas_server/delete_database', state=state, client_id=self.client_id)[0]
res = requests.get(url, verify=(self.server_id.request_scheme == 'https' and self.server_id.verify_ssl))
_logger.info('delete database: %s', res.text)
if res.status_code != 500:
self.state = 'deleted'
@api.multi
def upgrade_database(self):
obj = self[0]
return {
'type': 'ir.actions.act_window',
'view_type': 'form',
'view_mode': 'form',
'res_model': 'saas.config',
'target': 'new',
'context': {
'default_action': 'upgrade',
'default_database': obj.name
}
}
class SaasPortalClient(models.Model):
_name = 'saas_portal.client'
_description = 'Client'
_rec_name = 'name'
_inherit = ['mail.thread', 'saas_portal.database', 'saas_base.client']
name = fields.Char(required=True)
partner_id = fields.Many2one('res.partner', string='Partner', track_visibility='onchange')
plan_id = fields.Many2one('saas_portal.plan', string='Plan', track_visibility='onchange')
expired = fields.Boolean('Expiration', compute='_get_expired')
@api.one
def _get_expired(self):
now = fields.Datetime.now()
self.expired = self.expiration_datetime and self.expiration_datetime < now
def unlink(self, cr, uid, ids, context=None):
user_model = self.pool.get('res.users')
token_model = self.pool.get('oauth.access_token')
for obj in self.browse(cr, uid, ids):
to_search1 = [('application_id', '=', obj.id)]
tk_ids = token_model.search(cr, uid, to_search1, context=context)
if tk_ids:
token_model.unlink(cr, uid, tk_ids)
# TODO: it seems we don't need stuff below
#to_search2 = [('database', '=', obj.name)]
#user_ids = user_model.search(cr, uid, to_search2, context=context)
#if user_ids:
# user_model.unlink(cr, uid, user_ids)
#openerp.service.db.exp_drop(obj.name)
return super(SaasPortalClient, self).unlink(cr, uid, ids, context)
@api.multi
def duplicate_database(self, dbname=None, partner_id=None, expiration=None, user_id=None):
self.ensure_one()
server = self.server_id
if not server:
server = self.env['saas_portal.server'].get_saas_server()
server.action_sync_server()
vals = {'name': dbname,
'server_id': server.id,
'plan_id': self.plan_id.id,
'partner_id': partner_id or self.partner_id.id,
}
if expiration:
now = datetime.now()
delta = timedelta(hours=expiration)
vals['expiration_datetime'] = (now + delta).strftime(DEFAULT_SERVER_DATETIME_FORMAT)
client = self.env['saas_portal.client'].create(vals)
client_id = client.client_id
scheme = server.request_scheme
port = server.request_port
if user_id:
owner_user = self.env['res.users'].browse(user_id)
else:
owner_user = self.env.user
owner_user_data = {
'user_id': owner_user.id,
'login': owner_user.login,
'name': owner_user.name,
'email': owner_user.email,
}
state = {
'd': client.name,
'e': client.expiration_datetime,
'r': '%s://%s:%s/web' % (scheme, client.name, port),
'owner_user': owner_user_data,
'db_template': self.name,
'disable_mail_server' : True
}
scope = ['userinfo', 'force_login', 'trial', 'skiptheuse']
url = server._request_server(path='/saas_server/new_database',
scheme=scheme,
port=port,
state=state,
client_id=client_id,
scope=scope,)[0]
res = requests.get(url, verify=(self.server_id.request_scheme == 'https' and self.server_id.verify_ssl))
if res.status_code != 200:
raise exceptions.Warning('Error %s' % res.status_code)
data = simplejson.loads(res.text)
params = {
'state': data.get('state'),
'access_token': client.oauth_application_id._get_access_token(user_id, create=True),
}
url = '{url}?{params}'.format(url=data.get('url'), params=werkzeug.url_encode(params))
return {'url': url, 'id': client.id, 'client_id': client_id}
| lgpl-3.0 |
overtherain/scriptfile | software/googleAppEngine/lib/webob_0_9/setup.py | 32 | 1070 | from setuptools import setup, find_packages
import sys, os
version = '0.9'
setup(name='WebOb',
version=version,
description="WSGI request and response object",
long_description="""\
WebOb provides wrappers around the WSGI request environment, and an
object to help create WSGI responses.
The objects map much of the specified behavior of HTTP, including
header parsing and accessors for other standard parts of the
environment.
""",
classifiers=[
"Development Status :: 4 - Beta",
"Framework :: Paste",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Topic :: Internet :: WWW/HTTP :: WSGI",
"Topic :: Internet :: WWW/HTTP :: WSGI :: Application",
],
keywords='wsgi request web http',
author='Ian Bicking',
author_email='[email protected]',
url='http://pythonpaste.org/webob/',
license='MIT',
packages=find_packages(exclude=['ez_setup', 'examples', 'tests']),
include_package_data=True,
zip_safe=True,
)
| mit |
gylian/sickrage | lib/requests/packages/urllib3/util/timeout.py | 1004 | 9544 | # The default socket timeout, used by httplib to indicate that no timeout was
# specified by the user
from socket import _GLOBAL_DEFAULT_TIMEOUT
import time
from ..exceptions import TimeoutStateError
# A sentinel value to indicate that no timeout was specified by the user in
# urllib3
_Default = object()
def current_time():
"""
Retrieve the current time. This function is mocked out in unit testing.
"""
return time.time()
class Timeout(object):
""" Timeout configuration.
Timeouts can be defined as a default for a pool::
timeout = Timeout(connect=2.0, read=7.0)
http = PoolManager(timeout=timeout)
response = http.request('GET', 'http://example.com/')
Or per-request (which overrides the default for the pool)::
response = http.request('GET', 'http://example.com/', timeout=Timeout(10))
Timeouts can be disabled by setting all the parameters to ``None``::
no_timeout = Timeout(connect=None, read=None)
response = http.request('GET', 'http://example.com/, timeout=no_timeout)
:param total:
This combines the connect and read timeouts into one; the read timeout
will be set to the time leftover from the connect attempt. In the
event that both a connect timeout and a total are specified, or a read
timeout and a total are specified, the shorter timeout will be applied.
Defaults to None.
:type total: integer, float, or None
:param connect:
The maximum amount of time to wait for a connection attempt to a server
to succeed. Omitting the parameter will default the connect timeout to
the system default, probably `the global default timeout in socket.py
<http://hg.python.org/cpython/file/603b4d593758/Lib/socket.py#l535>`_.
None will set an infinite timeout for connection attempts.
:type connect: integer, float, or None
:param read:
The maximum amount of time to wait between consecutive
read operations for a response from the server. Omitting
the parameter will default the read timeout to the system
default, probably `the global default timeout in socket.py
<http://hg.python.org/cpython/file/603b4d593758/Lib/socket.py#l535>`_.
None will set an infinite timeout.
:type read: integer, float, or None
.. note::
Many factors can affect the total amount of time for urllib3 to return
an HTTP response.
For example, Python's DNS resolver does not obey the timeout specified
on the socket. Other factors that can affect total request time include
high CPU load, high swap, the program running at a low priority level,
or other behaviors.
In addition, the read and total timeouts only measure the time between
read operations on the socket connecting the client and the server,
not the total amount of time for the request to return a complete
response. For most requests, the timeout is raised because the server
has not sent the first byte in the specified time. This is not always
the case; if a server streams one byte every fifteen seconds, a timeout
of 20 seconds will not trigger, even though the request will take
several minutes to complete.
If your goal is to cut off any request after a set amount of wall clock
time, consider having a second "watcher" thread to cut off a slow
request.
"""
#: A sentinel object representing the default timeout value
DEFAULT_TIMEOUT = _GLOBAL_DEFAULT_TIMEOUT
def __init__(self, total=None, connect=_Default, read=_Default):
self._connect = self._validate_timeout(connect, 'connect')
self._read = self._validate_timeout(read, 'read')
self.total = self._validate_timeout(total, 'total')
self._start_connect = None
def __str__(self):
return '%s(connect=%r, read=%r, total=%r)' % (
type(self).__name__, self._connect, self._read, self.total)
@classmethod
def _validate_timeout(cls, value, name):
""" Check that a timeout attribute is valid.
:param value: The timeout value to validate
:param name: The name of the timeout attribute to validate. This is
used to specify in error messages.
:return: The validated and casted version of the given value.
:raises ValueError: If the type is not an integer or a float, or if it
is a numeric value less than zero.
"""
if value is _Default:
return cls.DEFAULT_TIMEOUT
if value is None or value is cls.DEFAULT_TIMEOUT:
return value
try:
float(value)
except (TypeError, ValueError):
raise ValueError("Timeout value %s was %s, but it must be an "
"int or float." % (name, value))
try:
if value < 0:
raise ValueError("Attempted to set %s timeout to %s, but the "
"timeout cannot be set to a value less "
"than 0." % (name, value))
except TypeError: # Python 3
raise ValueError("Timeout value %s was %s, but it must be an "
"int or float." % (name, value))
return value
@classmethod
def from_float(cls, timeout):
""" Create a new Timeout from a legacy timeout value.
The timeout value used by httplib.py sets the same timeout on the
connect(), and recv() socket requests. This creates a :class:`Timeout`
object that sets the individual timeouts to the ``timeout`` value
passed to this function.
:param timeout: The legacy timeout value.
:type timeout: integer, float, sentinel default object, or None
:return: Timeout object
:rtype: :class:`Timeout`
"""
return Timeout(read=timeout, connect=timeout)
def clone(self):
""" Create a copy of the timeout object
Timeout properties are stored per-pool but each request needs a fresh
Timeout object to ensure each one has its own start/stop configured.
:return: a copy of the timeout object
:rtype: :class:`Timeout`
"""
# We can't use copy.deepcopy because that will also create a new object
# for _GLOBAL_DEFAULT_TIMEOUT, which socket.py uses as a sentinel to
# detect the user default.
return Timeout(connect=self._connect, read=self._read,
total=self.total)
def start_connect(self):
""" Start the timeout clock, used during a connect() attempt
:raises urllib3.exceptions.TimeoutStateError: if you attempt
to start a timer that has been started already.
"""
if self._start_connect is not None:
raise TimeoutStateError("Timeout timer has already been started.")
self._start_connect = current_time()
return self._start_connect
def get_connect_duration(self):
""" Gets the time elapsed since the call to :meth:`start_connect`.
:return: Elapsed time.
:rtype: float
:raises urllib3.exceptions.TimeoutStateError: if you attempt
to get duration for a timer that hasn't been started.
"""
if self._start_connect is None:
raise TimeoutStateError("Can't get connect duration for timer "
"that has not started.")
return current_time() - self._start_connect
@property
def connect_timeout(self):
""" Get the value to use when setting a connection timeout.
This will be a positive float or integer, the value None
(never timeout), or the default system timeout.
:return: Connect timeout.
:rtype: int, float, :attr:`Timeout.DEFAULT_TIMEOUT` or None
"""
if self.total is None:
return self._connect
if self._connect is None or self._connect is self.DEFAULT_TIMEOUT:
return self.total
return min(self._connect, self.total)
@property
def read_timeout(self):
""" Get the value for the read timeout.
This assumes some time has elapsed in the connection timeout and
computes the read timeout appropriately.
If self.total is set, the read timeout is dependent on the amount of
time taken by the connect timeout. If the connection time has not been
established, a :exc:`~urllib3.exceptions.TimeoutStateError` will be
raised.
:return: Value to use for the read timeout.
:rtype: int, float, :attr:`Timeout.DEFAULT_TIMEOUT` or None
:raises urllib3.exceptions.TimeoutStateError: If :meth:`start_connect`
has not yet been called on this object.
"""
if (self.total is not None and
self.total is not self.DEFAULT_TIMEOUT and
self._read is not None and
self._read is not self.DEFAULT_TIMEOUT):
# In case the connect timeout has not yet been established.
if self._start_connect is None:
return self._read
return max(0, min(self.total - self.get_connect_duration(),
self._read))
elif self.total is not None and self.total is not self.DEFAULT_TIMEOUT:
return max(0, self.total - self.get_connect_duration())
else:
return self._read
| gpl-3.0 |
miyakz1192/neutron | neutron/tests/unit/api/v2/test_resource.py | 17 | 14344 | # Copyright (c) 2012 Intel Corporation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
import oslo_i18n
from webob import exc
import webtest
from neutron.api.v2 import resource as wsgi_resource
from neutron.common import exceptions as n_exc
from neutron import context
from neutron.tests import base
from neutron import wsgi
class RequestTestCase(base.BaseTestCase):
def setUp(self):
super(RequestTestCase, self).setUp()
self.req = wsgi_resource.Request({'foo': 'bar'})
def test_content_type_missing(self):
request = wsgi.Request.blank('/tests/123', method='POST')
request.body = "<body />"
self.assertIsNone(request.get_content_type())
def test_content_type_with_charset(self):
request = wsgi.Request.blank('/tests/123')
request.headers["Content-Type"] = "application/json; charset=UTF-8"
result = request.get_content_type()
self.assertEqual(result, "application/json")
def test_content_type_from_accept(self):
content_type = 'application/json'
request = wsgi.Request.blank('/tests/123')
request.headers["Accept"] = content_type
result = request.best_match_content_type()
self.assertEqual(result, content_type)
def test_content_type_from_accept_best(self):
request = wsgi.Request.blank('/tests/123')
request.headers["Accept"] = "application/json"
result = request.best_match_content_type()
self.assertEqual(result, "application/json")
request = wsgi.Request.blank('/tests/123')
request.headers["Accept"] = ("application/json; q=0.3, "
"application/xml; q=0.9")
result = request.best_match_content_type()
self.assertEqual(result, "application/json")
def test_content_type_from_query_extension(self):
request = wsgi.Request.blank('/tests/123.json')
result = request.best_match_content_type()
self.assertEqual(result, "application/json")
request = wsgi.Request.blank('/tests/123.invalid')
result = request.best_match_content_type()
self.assertEqual(result, "application/json")
def test_content_type_accept_and_query_extension(self):
request = wsgi.Request.blank('/tests/123.json')
request.headers["Accept"] = "application/xml"
result = request.best_match_content_type()
self.assertEqual(result, "application/json")
def test_content_type_accept_default(self):
request = wsgi.Request.blank('/tests/123.unsupported')
request.headers["Accept"] = "application/unsupported1"
result = request.best_match_content_type()
self.assertEqual(result, "application/json")
def test_context_with_neutron_context(self):
ctxt = context.Context('fake_user', 'fake_tenant')
self.req.environ['neutron.context'] = ctxt
self.assertEqual(self.req.context, ctxt)
def test_context_without_neutron_context(self):
self.assertTrue(self.req.context.is_admin)
def test_request_context_elevated(self):
user_context = context.Context(
'fake_user', 'fake_project', admin=False)
self.assertFalse(user_context.is_admin)
admin_context = user_context.elevated()
self.assertFalse(user_context.is_admin)
self.assertTrue(admin_context.is_admin)
self.assertNotIn('admin', user_context.roles)
self.assertIn('admin', admin_context.roles)
def test_best_match_language(self):
# Test that we are actually invoking language negotiation by webop
request = wsgi.Request.blank('/')
oslo_i18n.get_available_languages = mock.MagicMock()
oslo_i18n.get_available_languages.return_value = ['known-language',
'es', 'zh']
request.headers['Accept-Language'] = 'known-language'
language = request.best_match_language()
self.assertEqual(language, 'known-language')
# If the Accept-Leader is an unknown language, missing or empty,
# the best match locale should be None
request.headers['Accept-Language'] = 'unknown-language'
language = request.best_match_language()
self.assertIsNone(language)
request.headers['Accept-Language'] = ''
language = request.best_match_language()
self.assertIsNone(language)
request.headers.pop('Accept-Language')
language = request.best_match_language()
self.assertIsNone(language)
class ResourceTestCase(base.BaseTestCase):
@staticmethod
def _get_deserializer():
return wsgi.JSONDeserializer()
def test_unmapped_neutron_error_with_json(self):
msg = u'\u7f51\u7edc'
class TestException(n_exc.NeutronException):
message = msg
expected_res = {'body': {
'NeutronError': {
'type': 'TestException',
'message': msg,
'detail': ''}}}
controller = mock.MagicMock()
controller.test.side_effect = TestException()
resource = webtest.TestApp(wsgi_resource.Resource(controller))
environ = {'wsgiorg.routing_args': (None, {'action': 'test',
'format': 'json'})}
res = resource.get('', extra_environ=environ, expect_errors=True)
self.assertEqual(res.status_int, exc.HTTPInternalServerError.code)
self.assertEqual(wsgi.JSONDeserializer().deserialize(res.body),
expected_res)
@mock.patch('oslo_i18n.translate')
def test_unmapped_neutron_error_localized(self, mock_translation):
msg_translation = 'Translated error'
mock_translation.return_value = msg_translation
msg = _('Unmapped error')
class TestException(n_exc.NeutronException):
message = msg
controller = mock.MagicMock()
controller.test.side_effect = TestException()
resource = webtest.TestApp(wsgi_resource.Resource(controller))
environ = {'wsgiorg.routing_args': (None, {'action': 'test',
'format': 'json'})}
res = resource.get('', extra_environ=environ, expect_errors=True)
self.assertEqual(res.status_int, exc.HTTPInternalServerError.code)
self.assertIn(msg_translation,
str(wsgi.JSONDeserializer().deserialize(res.body)))
def test_mapped_neutron_error_with_json(self):
msg = u'\u7f51\u7edc'
class TestException(n_exc.NeutronException):
message = msg
expected_res = {'body': {
'NeutronError': {
'type': 'TestException',
'message': msg,
'detail': ''}}}
controller = mock.MagicMock()
controller.test.side_effect = TestException()
faults = {TestException: exc.HTTPGatewayTimeout}
resource = webtest.TestApp(wsgi_resource.Resource(controller,
faults=faults))
environ = {'wsgiorg.routing_args': (None, {'action': 'test',
'format': 'json'})}
res = resource.get('', extra_environ=environ, expect_errors=True)
self.assertEqual(res.status_int, exc.HTTPGatewayTimeout.code)
self.assertEqual(wsgi.JSONDeserializer().deserialize(res.body),
expected_res)
@mock.patch('oslo_i18n.translate')
def test_mapped_neutron_error_localized(self, mock_translation):
msg_translation = 'Translated error'
mock_translation.return_value = msg_translation
msg = _('Unmapped error')
class TestException(n_exc.NeutronException):
message = msg
controller = mock.MagicMock()
controller.test.side_effect = TestException()
faults = {TestException: exc.HTTPGatewayTimeout}
resource = webtest.TestApp(wsgi_resource.Resource(controller,
faults=faults))
environ = {'wsgiorg.routing_args': (None, {'action': 'test',
'format': 'json'})}
res = resource.get('', extra_environ=environ, expect_errors=True)
self.assertEqual(res.status_int, exc.HTTPGatewayTimeout.code)
self.assertIn(msg_translation,
str(wsgi.JSONDeserializer().deserialize(res.body)))
@staticmethod
def _make_request_with_side_effect(side_effect):
controller = mock.MagicMock()
controller.test.side_effect = side_effect
resource = webtest.TestApp(wsgi_resource.Resource(controller))
routing_args = {'action': 'test'}
environ = {'wsgiorg.routing_args': (None, routing_args)}
res = resource.get('', extra_environ=environ, expect_errors=True)
return res
def test_http_error(self):
res = self._make_request_with_side_effect(exc.HTTPGatewayTimeout())
# verify that the exception structure is the one expected
# by the python-neutronclient
self.assertEqual(exc.HTTPGatewayTimeout().explanation,
res.json['NeutronError']['message'])
self.assertEqual('HTTPGatewayTimeout',
res.json['NeutronError']['type'])
self.assertEqual('', res.json['NeutronError']['detail'])
self.assertEqual(exc.HTTPGatewayTimeout.code, res.status_int)
def test_unhandled_error(self):
expected_res = {'body': {'NeutronError':
{'detail': '',
'message': _(
'Request Failed: internal server '
'error while processing your request.'),
'type': 'HTTPInternalServerError'}}}
res = self._make_request_with_side_effect(side_effect=Exception())
self.assertEqual(exc.HTTPInternalServerError.code,
res.status_int)
self.assertEqual(expected_res,
self._get_deserializer().deserialize(res.body))
def test_not_implemented_error(self):
expected_res = {'body': {'NeutronError':
{'detail': '',
'message': _(
'The server has either erred or is '
'incapable of performing the requested '
'operation.'),
'type': 'HTTPNotImplemented'}}}
res = self._make_request_with_side_effect(exc.HTTPNotImplemented())
self.assertEqual(exc.HTTPNotImplemented.code, res.status_int)
self.assertEqual(expected_res,
self._get_deserializer().deserialize(res.body))
def test_status_200(self):
controller = mock.MagicMock()
controller.test = lambda request: {'foo': 'bar'}
resource = webtest.TestApp(wsgi_resource.Resource(controller))
environ = {'wsgiorg.routing_args': (None, {'action': 'test'})}
res = resource.get('', extra_environ=environ)
self.assertEqual(res.status_int, 200)
def test_status_204(self):
controller = mock.MagicMock()
controller.test = lambda request: {'foo': 'bar'}
resource = webtest.TestApp(wsgi_resource.Resource(controller))
environ = {'wsgiorg.routing_args': (None, {'action': 'delete'})}
res = resource.delete('', extra_environ=environ)
self.assertEqual(res.status_int, 204)
def _test_error_log_level(self, map_webob_exc, expect_log_info=False,
use_fault_map=True):
class TestException(n_exc.NeutronException):
message = 'Test Exception'
controller = mock.MagicMock()
controller.test.side_effect = TestException()
faults = {TestException: map_webob_exc} if use_fault_map else {}
resource = webtest.TestApp(wsgi_resource.Resource(controller, faults))
environ = {'wsgiorg.routing_args': (None, {'action': 'test'})}
with mock.patch.object(wsgi_resource, 'LOG') as log:
res = resource.get('', extra_environ=environ, expect_errors=True)
self.assertEqual(res.status_int, map_webob_exc.code)
self.assertEqual(expect_log_info, log.info.called)
self.assertNotEqual(expect_log_info, log.exception.called)
def test_4xx_error_logged_info_level(self):
self._test_error_log_level(exc.HTTPNotFound, expect_log_info=True)
def test_non_4xx_error_logged_exception_level(self):
self._test_error_log_level(exc.HTTPServiceUnavailable,
expect_log_info=False)
def test_unmapped_error_logged_exception_level(self):
self._test_error_log_level(exc.HTTPInternalServerError,
expect_log_info=False, use_fault_map=False)
def test_no_route_args(self):
controller = mock.MagicMock()
resource = webtest.TestApp(wsgi_resource.Resource(controller))
environ = {}
res = resource.get('', extra_environ=environ, expect_errors=True)
self.assertEqual(res.status_int, exc.HTTPInternalServerError.code)
def test_post_with_body(self):
controller = mock.MagicMock()
controller.test = lambda request, body: {'foo': 'bar'}
resource = webtest.TestApp(wsgi_resource.Resource(controller))
environ = {'wsgiorg.routing_args': (None, {'action': 'test'})}
res = resource.post('', params='{"key": "val"}',
extra_environ=environ)
self.assertEqual(res.status_int, 200)
| apache-2.0 |
codevlabs/grab | test/grab_pickle.py | 12 | 1081 | # coding: utf-8
try:
import cPickle as pickle
except ImportError:
import pickle
from multiprocessing import Queue
from test.util import BaseGrabTestCase
from test.util import build_grab
class TestGrab(BaseGrabTestCase):
def setUp(self):
self.server.reset()
def test_pickling(self):
"""
Test that Grab instance could be pickled and unpickled.
"""
g = build_grab()
self.server.response['get.data'] =\
'<form><textarea name="text">the cat</textarea></form>'
g.go(self.server.get_url())
g.set_input('text', 'foobar')
data = pickle.dumps(g, pickle.HIGHEST_PROTOCOL)
def func(pickled_grab, resultq):
g2 = pickle.loads(pickled_grab)
text = g2.doc.select('//textarea').text()
resultq.put(text)
result_queue = Queue()
# p = Process(target=func, args=[data, result_queue])
# p.start()
func(data, result_queue)
text = result_queue.get(block=True, timeout=1)
self.assertEqual(text, 'the cat')
| mit |
cstipkovic/spidermonkey-research | testing/web-platform/tests/tools/pytest/doc/en/genapi.py | 203 | 1131 | import textwrap
import inspect
class Writer:
def __init__(self, clsname):
self.clsname = clsname
def __enter__(self):
self.file = open("%s.api" % self.clsname, "w")
return self
def __exit__(self, *args):
self.file.close()
print "wrote", self.file.name
def line(self, line):
self.file.write(line+"\n")
def docmethod(self, method):
doc = " ".join(method.__doc__.split())
indent = " "
w = textwrap.TextWrapper(initial_indent=indent,
subsequent_indent=indent)
spec = inspect.getargspec(method)
del spec.args[0]
self.line(".. py:method:: " + method.__name__ +
inspect.formatargspec(*spec))
self.line("")
self.line(w.fill(doc))
self.line("")
def pytest_funcarg__a(request):
with Writer("request") as writer:
writer.docmethod(request.getfuncargvalue)
writer.docmethod(request.cached_setup)
writer.docmethod(request.addfinalizer)
writer.docmethod(request.applymarker)
def test_hello(a):
pass
| mpl-2.0 |
40223119/2015cda | static/Brython3.1.1-20150328-091302/Lib/numbers.py | 883 | 10398 | # Copyright 2007 Google, Inc. All Rights Reserved.
# Licensed to PSF under a Contributor Agreement.
"""Abstract Base Classes (ABCs) for numbers, according to PEP 3141.
TODO: Fill out more detailed documentation on the operators."""
from abc import ABCMeta, abstractmethod
__all__ = ["Number", "Complex", "Real", "Rational", "Integral"]
class Number(metaclass=ABCMeta):
"""All numbers inherit from this class.
If you just want to check if an argument x is a number, without
caring what kind, use isinstance(x, Number).
"""
__slots__ = ()
# Concrete numeric types must provide their own hash implementation
__hash__ = None
## Notes on Decimal
## ----------------
## Decimal has all of the methods specified by the Real abc, but it should
## not be registered as a Real because decimals do not interoperate with
## binary floats (i.e. Decimal('3.14') + 2.71828 is undefined). But,
## abstract reals are expected to interoperate (i.e. R1 + R2 should be
## expected to work if R1 and R2 are both Reals).
class Complex(Number):
"""Complex defines the operations that work on the builtin complex type.
In short, those are: a conversion to complex, .real, .imag, +, -,
*, /, abs(), .conjugate, ==, and !=.
If it is given heterogenous arguments, and doesn't have special
knowledge about them, it should fall back to the builtin complex
type as described below.
"""
__slots__ = ()
@abstractmethod
def __complex__(self):
"""Return a builtin complex instance. Called for complex(self)."""
def __bool__(self):
"""True if self != 0. Called for bool(self)."""
return self != 0
@property
@abstractmethod
def real(self):
"""Retrieve the real component of this number.
This should subclass Real.
"""
raise NotImplementedError
@property
@abstractmethod
def imag(self):
"""Retrieve the imaginary component of this number.
This should subclass Real.
"""
raise NotImplementedError
@abstractmethod
def __add__(self, other):
"""self + other"""
raise NotImplementedError
@abstractmethod
def __radd__(self, other):
"""other + self"""
raise NotImplementedError
@abstractmethod
def __neg__(self):
"""-self"""
raise NotImplementedError
@abstractmethod
def __pos__(self):
"""+self"""
raise NotImplementedError
def __sub__(self, other):
"""self - other"""
return self + -other
def __rsub__(self, other):
"""other - self"""
return -self + other
@abstractmethod
def __mul__(self, other):
"""self * other"""
raise NotImplementedError
@abstractmethod
def __rmul__(self, other):
"""other * self"""
raise NotImplementedError
@abstractmethod
def __truediv__(self, other):
"""self / other: Should promote to float when necessary."""
raise NotImplementedError
@abstractmethod
def __rtruediv__(self, other):
"""other / self"""
raise NotImplementedError
@abstractmethod
def __pow__(self, exponent):
"""self**exponent; should promote to float or complex when necessary."""
raise NotImplementedError
@abstractmethod
def __rpow__(self, base):
"""base ** self"""
raise NotImplementedError
@abstractmethod
def __abs__(self):
"""Returns the Real distance from 0. Called for abs(self)."""
raise NotImplementedError
@abstractmethod
def conjugate(self):
"""(x+y*i).conjugate() returns (x-y*i)."""
raise NotImplementedError
@abstractmethod
def __eq__(self, other):
"""self == other"""
raise NotImplementedError
def __ne__(self, other):
"""self != other"""
# The default __ne__ doesn't negate __eq__ until 3.0.
return not (self == other)
Complex.register(complex)
class Real(Complex):
"""To Complex, Real adds the operations that work on real numbers.
In short, those are: a conversion to float, trunc(), divmod,
%, <, <=, >, and >=.
Real also provides defaults for the derived operations.
"""
__slots__ = ()
@abstractmethod
def __float__(self):
"""Any Real can be converted to a native float object.
Called for float(self)."""
raise NotImplementedError
@abstractmethod
def __trunc__(self):
"""trunc(self): Truncates self to an Integral.
Returns an Integral i such that:
* i>0 iff self>0;
* abs(i) <= abs(self);
* for any Integral j satisfying the first two conditions,
abs(i) >= abs(j) [i.e. i has "maximal" abs among those].
i.e. "truncate towards 0".
"""
raise NotImplementedError
@abstractmethod
def __floor__(self):
"""Finds the greatest Integral <= self."""
raise NotImplementedError
@abstractmethod
def __ceil__(self):
"""Finds the least Integral >= self."""
raise NotImplementedError
@abstractmethod
def __round__(self, ndigits=None):
"""Rounds self to ndigits decimal places, defaulting to 0.
If ndigits is omitted or None, returns an Integral, otherwise
returns a Real. Rounds half toward even.
"""
raise NotImplementedError
def __divmod__(self, other):
"""divmod(self, other): The pair (self // other, self % other).
Sometimes this can be computed faster than the pair of
operations.
"""
return (self // other, self % other)
def __rdivmod__(self, other):
"""divmod(other, self): The pair (self // other, self % other).
Sometimes this can be computed faster than the pair of
operations.
"""
return (other // self, other % self)
@abstractmethod
def __floordiv__(self, other):
"""self // other: The floor() of self/other."""
raise NotImplementedError
@abstractmethod
def __rfloordiv__(self, other):
"""other // self: The floor() of other/self."""
raise NotImplementedError
@abstractmethod
def __mod__(self, other):
"""self % other"""
raise NotImplementedError
@abstractmethod
def __rmod__(self, other):
"""other % self"""
raise NotImplementedError
@abstractmethod
def __lt__(self, other):
"""self < other
< on Reals defines a total ordering, except perhaps for NaN."""
raise NotImplementedError
@abstractmethod
def __le__(self, other):
"""self <= other"""
raise NotImplementedError
# Concrete implementations of Complex abstract methods.
def __complex__(self):
"""complex(self) == complex(float(self), 0)"""
return complex(float(self))
@property
def real(self):
"""Real numbers are their real component."""
return +self
@property
def imag(self):
"""Real numbers have no imaginary component."""
return 0
def conjugate(self):
"""Conjugate is a no-op for Reals."""
return +self
Real.register(float)
class Rational(Real):
""".numerator and .denominator should be in lowest terms."""
__slots__ = ()
@property
@abstractmethod
def numerator(self):
raise NotImplementedError
@property
@abstractmethod
def denominator(self):
raise NotImplementedError
# Concrete implementation of Real's conversion to float.
def __float__(self):
"""float(self) = self.numerator / self.denominator
It's important that this conversion use the integer's "true"
division rather than casting one side to float before dividing
so that ratios of huge integers convert without overflowing.
"""
return self.numerator / self.denominator
class Integral(Rational):
"""Integral adds a conversion to int and the bit-string operations."""
__slots__ = ()
@abstractmethod
def __int__(self):
"""int(self)"""
raise NotImplementedError
def __index__(self):
"""Called whenever an index is needed, such as in slicing"""
return int(self)
@abstractmethod
def __pow__(self, exponent, modulus=None):
"""self ** exponent % modulus, but maybe faster.
Accept the modulus argument if you want to support the
3-argument version of pow(). Raise a TypeError if exponent < 0
or any argument isn't Integral. Otherwise, just implement the
2-argument version described in Complex.
"""
raise NotImplementedError
@abstractmethod
def __lshift__(self, other):
"""self << other"""
raise NotImplementedError
@abstractmethod
def __rlshift__(self, other):
"""other << self"""
raise NotImplementedError
@abstractmethod
def __rshift__(self, other):
"""self >> other"""
raise NotImplementedError
@abstractmethod
def __rrshift__(self, other):
"""other >> self"""
raise NotImplementedError
@abstractmethod
def __and__(self, other):
"""self & other"""
raise NotImplementedError
@abstractmethod
def __rand__(self, other):
"""other & self"""
raise NotImplementedError
@abstractmethod
def __xor__(self, other):
"""self ^ other"""
raise NotImplementedError
@abstractmethod
def __rxor__(self, other):
"""other ^ self"""
raise NotImplementedError
@abstractmethod
def __or__(self, other):
"""self | other"""
raise NotImplementedError
@abstractmethod
def __ror__(self, other):
"""other | self"""
raise NotImplementedError
@abstractmethod
def __invert__(self):
"""~self"""
raise NotImplementedError
# Concrete implementations of Rational and Real abstract methods.
def __float__(self):
"""float(self) == float(int(self))"""
return float(int(self))
@property
def numerator(self):
"""Integers are their own numerators."""
return +self
@property
def denominator(self):
"""Integers have a denominator of 1."""
return 1
Integral.register(int)
| gpl-3.0 |
YuMatsuzawa/HadoopEclipseProject | hadoop-0.20.2-cdh3u5/src/contrib/cloud/src/py/hadoop/cloud/providers/dummy.py | 16 | 2282 | # Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from hadoop.cloud.cluster import Cluster
from hadoop.cloud.cluster import Instance
logger = logging.getLogger(__name__)
class DummyCluster(Cluster):
@staticmethod
def get_clusters_with_role(role, state="running"):
logger.info("get_clusters_with_role(%s, %s)", role, state)
return ["dummy-cluster"]
def __init__(self, name, config_dir):
super(DummyCluster, self).__init__(name, config_dir)
logger.info("__init__(%s, %s)", name, config_dir)
def get_provider_code(self):
return "dummy"
def authorize_role(self, role, from_port, to_port, cidr_ip):
logger.info("authorize_role(%s, %s, %s, %s)", role, from_port, to_port,
cidr_ip)
def get_instances_in_role(self, role, state_filter=None):
logger.info("get_instances_in_role(%s, %s)", role, state_filter)
return [Instance(1, '127.0.0.1', '127.0.0.1')]
def print_status(self, roles, state_filter="running"):
logger.info("print_status(%s, %s)", roles, state_filter)
def launch_instances(self, role, number, image_id, size_id,
instance_user_data, **kwargs):
logger.info("launch_instances(%s, %s, %s, %s, %s, %s)", role, number,
image_id, size_id, instance_user_data, str(kwargs))
return [1]
def wait_for_instances(self, instance_ids, timeout=600):
logger.info("wait_for_instances(%s, %s)", instance_ids, timeout)
def terminate(self):
logger.info("terminate")
def delete(self):
logger.info("delete")
| apache-2.0 |
lucalianas/openmicroscopy | components/tools/OmeroCpp/ext/gtest-1.7.0/test/gtest_filter_unittest.py | 2826 | 21261 | #!/usr/bin/env python
#
# Copyright 2005 Google Inc. All Rights Reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Unit test for Google Test test filters.
A user can specify which test(s) in a Google Test program to run via either
the GTEST_FILTER environment variable or the --gtest_filter flag.
This script tests such functionality by invoking
gtest_filter_unittest_ (a program written with Google Test) with different
environments and command line flags.
Note that test sharding may also influence which tests are filtered. Therefore,
we test that here also.
"""
__author__ = '[email protected] (Zhanyong Wan)'
import os
import re
import sets
import sys
import gtest_test_utils
# Constants.
# Checks if this platform can pass empty environment variables to child
# processes. We set an env variable to an empty string and invoke a python
# script in a subprocess to print whether the variable is STILL in
# os.environ. We then use 'eval' to parse the child's output so that an
# exception is thrown if the input is anything other than 'True' nor 'False'.
os.environ['EMPTY_VAR'] = ''
child = gtest_test_utils.Subprocess(
[sys.executable, '-c', 'import os; print \'EMPTY_VAR\' in os.environ'])
CAN_PASS_EMPTY_ENV = eval(child.output)
# Check if this platform can unset environment variables in child processes.
# We set an env variable to a non-empty string, unset it, and invoke
# a python script in a subprocess to print whether the variable
# is NO LONGER in os.environ.
# We use 'eval' to parse the child's output so that an exception
# is thrown if the input is neither 'True' nor 'False'.
os.environ['UNSET_VAR'] = 'X'
del os.environ['UNSET_VAR']
child = gtest_test_utils.Subprocess(
[sys.executable, '-c', 'import os; print \'UNSET_VAR\' not in os.environ'])
CAN_UNSET_ENV = eval(child.output)
# Checks if we should test with an empty filter. This doesn't
# make sense on platforms that cannot pass empty env variables (Win32)
# and on platforms that cannot unset variables (since we cannot tell
# the difference between "" and NULL -- Borland and Solaris < 5.10)
CAN_TEST_EMPTY_FILTER = (CAN_PASS_EMPTY_ENV and CAN_UNSET_ENV)
# The environment variable for specifying the test filters.
FILTER_ENV_VAR = 'GTEST_FILTER'
# The environment variables for test sharding.
TOTAL_SHARDS_ENV_VAR = 'GTEST_TOTAL_SHARDS'
SHARD_INDEX_ENV_VAR = 'GTEST_SHARD_INDEX'
SHARD_STATUS_FILE_ENV_VAR = 'GTEST_SHARD_STATUS_FILE'
# The command line flag for specifying the test filters.
FILTER_FLAG = 'gtest_filter'
# The command line flag for including disabled tests.
ALSO_RUN_DISABED_TESTS_FLAG = 'gtest_also_run_disabled_tests'
# Command to run the gtest_filter_unittest_ program.
COMMAND = gtest_test_utils.GetTestExecutablePath('gtest_filter_unittest_')
# Regex for determining whether parameterized tests are enabled in the binary.
PARAM_TEST_REGEX = re.compile(r'/ParamTest')
# Regex for parsing test case names from Google Test's output.
TEST_CASE_REGEX = re.compile(r'^\[\-+\] \d+ tests? from (\w+(/\w+)?)')
# Regex for parsing test names from Google Test's output.
TEST_REGEX = re.compile(r'^\[\s*RUN\s*\].*\.(\w+(/\w+)?)')
# The command line flag to tell Google Test to output the list of tests it
# will run.
LIST_TESTS_FLAG = '--gtest_list_tests'
# Indicates whether Google Test supports death tests.
SUPPORTS_DEATH_TESTS = 'HasDeathTest' in gtest_test_utils.Subprocess(
[COMMAND, LIST_TESTS_FLAG]).output
# Full names of all tests in gtest_filter_unittests_.
PARAM_TESTS = [
'SeqP/ParamTest.TestX/0',
'SeqP/ParamTest.TestX/1',
'SeqP/ParamTest.TestY/0',
'SeqP/ParamTest.TestY/1',
'SeqQ/ParamTest.TestX/0',
'SeqQ/ParamTest.TestX/1',
'SeqQ/ParamTest.TestY/0',
'SeqQ/ParamTest.TestY/1',
]
DISABLED_TESTS = [
'BarTest.DISABLED_TestFour',
'BarTest.DISABLED_TestFive',
'BazTest.DISABLED_TestC',
'DISABLED_FoobarTest.Test1',
'DISABLED_FoobarTest.DISABLED_Test2',
'DISABLED_FoobarbazTest.TestA',
]
if SUPPORTS_DEATH_TESTS:
DEATH_TESTS = [
'HasDeathTest.Test1',
'HasDeathTest.Test2',
]
else:
DEATH_TESTS = []
# All the non-disabled tests.
ACTIVE_TESTS = [
'FooTest.Abc',
'FooTest.Xyz',
'BarTest.TestOne',
'BarTest.TestTwo',
'BarTest.TestThree',
'BazTest.TestOne',
'BazTest.TestA',
'BazTest.TestB',
] + DEATH_TESTS + PARAM_TESTS
param_tests_present = None
# Utilities.
environ = os.environ.copy()
def SetEnvVar(env_var, value):
"""Sets the env variable to 'value'; unsets it when 'value' is None."""
if value is not None:
environ[env_var] = value
elif env_var in environ:
del environ[env_var]
def RunAndReturnOutput(args = None):
"""Runs the test program and returns its output."""
return gtest_test_utils.Subprocess([COMMAND] + (args or []),
env=environ).output
def RunAndExtractTestList(args = None):
"""Runs the test program and returns its exit code and a list of tests run."""
p = gtest_test_utils.Subprocess([COMMAND] + (args or []), env=environ)
tests_run = []
test_case = ''
test = ''
for line in p.output.split('\n'):
match = TEST_CASE_REGEX.match(line)
if match is not None:
test_case = match.group(1)
else:
match = TEST_REGEX.match(line)
if match is not None:
test = match.group(1)
tests_run.append(test_case + '.' + test)
return (tests_run, p.exit_code)
def InvokeWithModifiedEnv(extra_env, function, *args, **kwargs):
"""Runs the given function and arguments in a modified environment."""
try:
original_env = environ.copy()
environ.update(extra_env)
return function(*args, **kwargs)
finally:
environ.clear()
environ.update(original_env)
def RunWithSharding(total_shards, shard_index, command):
"""Runs a test program shard and returns exit code and a list of tests run."""
extra_env = {SHARD_INDEX_ENV_VAR: str(shard_index),
TOTAL_SHARDS_ENV_VAR: str(total_shards)}
return InvokeWithModifiedEnv(extra_env, RunAndExtractTestList, command)
# The unit test.
class GTestFilterUnitTest(gtest_test_utils.TestCase):
"""Tests the env variable or the command line flag to filter tests."""
# Utilities.
def AssertSetEqual(self, lhs, rhs):
"""Asserts that two sets are equal."""
for elem in lhs:
self.assert_(elem in rhs, '%s in %s' % (elem, rhs))
for elem in rhs:
self.assert_(elem in lhs, '%s in %s' % (elem, lhs))
def AssertPartitionIsValid(self, set_var, list_of_sets):
"""Asserts that list_of_sets is a valid partition of set_var."""
full_partition = []
for slice_var in list_of_sets:
full_partition.extend(slice_var)
self.assertEqual(len(set_var), len(full_partition))
self.assertEqual(sets.Set(set_var), sets.Set(full_partition))
def AdjustForParameterizedTests(self, tests_to_run):
"""Adjust tests_to_run in case value parameterized tests are disabled."""
global param_tests_present
if not param_tests_present:
return list(sets.Set(tests_to_run) - sets.Set(PARAM_TESTS))
else:
return tests_to_run
def RunAndVerify(self, gtest_filter, tests_to_run):
"""Checks that the binary runs correct set of tests for a given filter."""
tests_to_run = self.AdjustForParameterizedTests(tests_to_run)
# First, tests using the environment variable.
# Windows removes empty variables from the environment when passing it
# to a new process. This means it is impossible to pass an empty filter
# into a process using the environment variable. However, we can still
# test the case when the variable is not supplied (i.e., gtest_filter is
# None).
# pylint: disable-msg=C6403
if CAN_TEST_EMPTY_FILTER or gtest_filter != '':
SetEnvVar(FILTER_ENV_VAR, gtest_filter)
tests_run = RunAndExtractTestList()[0]
SetEnvVar(FILTER_ENV_VAR, None)
self.AssertSetEqual(tests_run, tests_to_run)
# pylint: enable-msg=C6403
# Next, tests using the command line flag.
if gtest_filter is None:
args = []
else:
args = ['--%s=%s' % (FILTER_FLAG, gtest_filter)]
tests_run = RunAndExtractTestList(args)[0]
self.AssertSetEqual(tests_run, tests_to_run)
def RunAndVerifyWithSharding(self, gtest_filter, total_shards, tests_to_run,
args=None, check_exit_0=False):
"""Checks that binary runs correct tests for the given filter and shard.
Runs all shards of gtest_filter_unittest_ with the given filter, and
verifies that the right set of tests were run. The union of tests run
on each shard should be identical to tests_to_run, without duplicates.
Args:
gtest_filter: A filter to apply to the tests.
total_shards: A total number of shards to split test run into.
tests_to_run: A set of tests expected to run.
args : Arguments to pass to the to the test binary.
check_exit_0: When set to a true value, make sure that all shards
return 0.
"""
tests_to_run = self.AdjustForParameterizedTests(tests_to_run)
# Windows removes empty variables from the environment when passing it
# to a new process. This means it is impossible to pass an empty filter
# into a process using the environment variable. However, we can still
# test the case when the variable is not supplied (i.e., gtest_filter is
# None).
# pylint: disable-msg=C6403
if CAN_TEST_EMPTY_FILTER or gtest_filter != '':
SetEnvVar(FILTER_ENV_VAR, gtest_filter)
partition = []
for i in range(0, total_shards):
(tests_run, exit_code) = RunWithSharding(total_shards, i, args)
if check_exit_0:
self.assertEqual(0, exit_code)
partition.append(tests_run)
self.AssertPartitionIsValid(tests_to_run, partition)
SetEnvVar(FILTER_ENV_VAR, None)
# pylint: enable-msg=C6403
def RunAndVerifyAllowingDisabled(self, gtest_filter, tests_to_run):
"""Checks that the binary runs correct set of tests for the given filter.
Runs gtest_filter_unittest_ with the given filter, and enables
disabled tests. Verifies that the right set of tests were run.
Args:
gtest_filter: A filter to apply to the tests.
tests_to_run: A set of tests expected to run.
"""
tests_to_run = self.AdjustForParameterizedTests(tests_to_run)
# Construct the command line.
args = ['--%s' % ALSO_RUN_DISABED_TESTS_FLAG]
if gtest_filter is not None:
args.append('--%s=%s' % (FILTER_FLAG, gtest_filter))
tests_run = RunAndExtractTestList(args)[0]
self.AssertSetEqual(tests_run, tests_to_run)
def setUp(self):
"""Sets up test case.
Determines whether value-parameterized tests are enabled in the binary and
sets the flags accordingly.
"""
global param_tests_present
if param_tests_present is None:
param_tests_present = PARAM_TEST_REGEX.search(
RunAndReturnOutput()) is not None
def testDefaultBehavior(self):
"""Tests the behavior of not specifying the filter."""
self.RunAndVerify(None, ACTIVE_TESTS)
def testDefaultBehaviorWithShards(self):
"""Tests the behavior without the filter, with sharding enabled."""
self.RunAndVerifyWithSharding(None, 1, ACTIVE_TESTS)
self.RunAndVerifyWithSharding(None, 2, ACTIVE_TESTS)
self.RunAndVerifyWithSharding(None, len(ACTIVE_TESTS) - 1, ACTIVE_TESTS)
self.RunAndVerifyWithSharding(None, len(ACTIVE_TESTS), ACTIVE_TESTS)
self.RunAndVerifyWithSharding(None, len(ACTIVE_TESTS) + 1, ACTIVE_TESTS)
def testEmptyFilter(self):
"""Tests an empty filter."""
self.RunAndVerify('', [])
self.RunAndVerifyWithSharding('', 1, [])
self.RunAndVerifyWithSharding('', 2, [])
def testBadFilter(self):
"""Tests a filter that matches nothing."""
self.RunAndVerify('BadFilter', [])
self.RunAndVerifyAllowingDisabled('BadFilter', [])
def testFullName(self):
"""Tests filtering by full name."""
self.RunAndVerify('FooTest.Xyz', ['FooTest.Xyz'])
self.RunAndVerifyAllowingDisabled('FooTest.Xyz', ['FooTest.Xyz'])
self.RunAndVerifyWithSharding('FooTest.Xyz', 5, ['FooTest.Xyz'])
def testUniversalFilters(self):
"""Tests filters that match everything."""
self.RunAndVerify('*', ACTIVE_TESTS)
self.RunAndVerify('*.*', ACTIVE_TESTS)
self.RunAndVerifyWithSharding('*.*', len(ACTIVE_TESTS) - 3, ACTIVE_TESTS)
self.RunAndVerifyAllowingDisabled('*', ACTIVE_TESTS + DISABLED_TESTS)
self.RunAndVerifyAllowingDisabled('*.*', ACTIVE_TESTS + DISABLED_TESTS)
def testFilterByTestCase(self):
"""Tests filtering by test case name."""
self.RunAndVerify('FooTest.*', ['FooTest.Abc', 'FooTest.Xyz'])
BAZ_TESTS = ['BazTest.TestOne', 'BazTest.TestA', 'BazTest.TestB']
self.RunAndVerify('BazTest.*', BAZ_TESTS)
self.RunAndVerifyAllowingDisabled('BazTest.*',
BAZ_TESTS + ['BazTest.DISABLED_TestC'])
def testFilterByTest(self):
"""Tests filtering by test name."""
self.RunAndVerify('*.TestOne', ['BarTest.TestOne', 'BazTest.TestOne'])
def testFilterDisabledTests(self):
"""Select only the disabled tests to run."""
self.RunAndVerify('DISABLED_FoobarTest.Test1', [])
self.RunAndVerifyAllowingDisabled('DISABLED_FoobarTest.Test1',
['DISABLED_FoobarTest.Test1'])
self.RunAndVerify('*DISABLED_*', [])
self.RunAndVerifyAllowingDisabled('*DISABLED_*', DISABLED_TESTS)
self.RunAndVerify('*.DISABLED_*', [])
self.RunAndVerifyAllowingDisabled('*.DISABLED_*', [
'BarTest.DISABLED_TestFour',
'BarTest.DISABLED_TestFive',
'BazTest.DISABLED_TestC',
'DISABLED_FoobarTest.DISABLED_Test2',
])
self.RunAndVerify('DISABLED_*', [])
self.RunAndVerifyAllowingDisabled('DISABLED_*', [
'DISABLED_FoobarTest.Test1',
'DISABLED_FoobarTest.DISABLED_Test2',
'DISABLED_FoobarbazTest.TestA',
])
def testWildcardInTestCaseName(self):
"""Tests using wildcard in the test case name."""
self.RunAndVerify('*a*.*', [
'BarTest.TestOne',
'BarTest.TestTwo',
'BarTest.TestThree',
'BazTest.TestOne',
'BazTest.TestA',
'BazTest.TestB', ] + DEATH_TESTS + PARAM_TESTS)
def testWildcardInTestName(self):
"""Tests using wildcard in the test name."""
self.RunAndVerify('*.*A*', ['FooTest.Abc', 'BazTest.TestA'])
def testFilterWithoutDot(self):
"""Tests a filter that has no '.' in it."""
self.RunAndVerify('*z*', [
'FooTest.Xyz',
'BazTest.TestOne',
'BazTest.TestA',
'BazTest.TestB',
])
def testTwoPatterns(self):
"""Tests filters that consist of two patterns."""
self.RunAndVerify('Foo*.*:*A*', [
'FooTest.Abc',
'FooTest.Xyz',
'BazTest.TestA',
])
# An empty pattern + a non-empty one
self.RunAndVerify(':*A*', ['FooTest.Abc', 'BazTest.TestA'])
def testThreePatterns(self):
"""Tests filters that consist of three patterns."""
self.RunAndVerify('*oo*:*A*:*One', [
'FooTest.Abc',
'FooTest.Xyz',
'BarTest.TestOne',
'BazTest.TestOne',
'BazTest.TestA',
])
# The 2nd pattern is empty.
self.RunAndVerify('*oo*::*One', [
'FooTest.Abc',
'FooTest.Xyz',
'BarTest.TestOne',
'BazTest.TestOne',
])
# The last 2 patterns are empty.
self.RunAndVerify('*oo*::', [
'FooTest.Abc',
'FooTest.Xyz',
])
def testNegativeFilters(self):
self.RunAndVerify('*-BazTest.TestOne', [
'FooTest.Abc',
'FooTest.Xyz',
'BarTest.TestOne',
'BarTest.TestTwo',
'BarTest.TestThree',
'BazTest.TestA',
'BazTest.TestB',
] + DEATH_TESTS + PARAM_TESTS)
self.RunAndVerify('*-FooTest.Abc:BazTest.*', [
'FooTest.Xyz',
'BarTest.TestOne',
'BarTest.TestTwo',
'BarTest.TestThree',
] + DEATH_TESTS + PARAM_TESTS)
self.RunAndVerify('BarTest.*-BarTest.TestOne', [
'BarTest.TestTwo',
'BarTest.TestThree',
])
# Tests without leading '*'.
self.RunAndVerify('-FooTest.Abc:FooTest.Xyz:BazTest.*', [
'BarTest.TestOne',
'BarTest.TestTwo',
'BarTest.TestThree',
] + DEATH_TESTS + PARAM_TESTS)
# Value parameterized tests.
self.RunAndVerify('*/*', PARAM_TESTS)
# Value parameterized tests filtering by the sequence name.
self.RunAndVerify('SeqP/*', [
'SeqP/ParamTest.TestX/0',
'SeqP/ParamTest.TestX/1',
'SeqP/ParamTest.TestY/0',
'SeqP/ParamTest.TestY/1',
])
# Value parameterized tests filtering by the test name.
self.RunAndVerify('*/0', [
'SeqP/ParamTest.TestX/0',
'SeqP/ParamTest.TestY/0',
'SeqQ/ParamTest.TestX/0',
'SeqQ/ParamTest.TestY/0',
])
def testFlagOverridesEnvVar(self):
"""Tests that the filter flag overrides the filtering env. variable."""
SetEnvVar(FILTER_ENV_VAR, 'Foo*')
args = ['--%s=%s' % (FILTER_FLAG, '*One')]
tests_run = RunAndExtractTestList(args)[0]
SetEnvVar(FILTER_ENV_VAR, None)
self.AssertSetEqual(tests_run, ['BarTest.TestOne', 'BazTest.TestOne'])
def testShardStatusFileIsCreated(self):
"""Tests that the shard file is created if specified in the environment."""
shard_status_file = os.path.join(gtest_test_utils.GetTempDir(),
'shard_status_file')
self.assert_(not os.path.exists(shard_status_file))
extra_env = {SHARD_STATUS_FILE_ENV_VAR: shard_status_file}
try:
InvokeWithModifiedEnv(extra_env, RunAndReturnOutput)
finally:
self.assert_(os.path.exists(shard_status_file))
os.remove(shard_status_file)
def testShardStatusFileIsCreatedWithListTests(self):
"""Tests that the shard file is created with the "list_tests" flag."""
shard_status_file = os.path.join(gtest_test_utils.GetTempDir(),
'shard_status_file2')
self.assert_(not os.path.exists(shard_status_file))
extra_env = {SHARD_STATUS_FILE_ENV_VAR: shard_status_file}
try:
output = InvokeWithModifiedEnv(extra_env,
RunAndReturnOutput,
[LIST_TESTS_FLAG])
finally:
# This assertion ensures that Google Test enumerated the tests as
# opposed to running them.
self.assert_('[==========]' not in output,
'Unexpected output during test enumeration.\n'
'Please ensure that LIST_TESTS_FLAG is assigned the\n'
'correct flag value for listing Google Test tests.')
self.assert_(os.path.exists(shard_status_file))
os.remove(shard_status_file)
if SUPPORTS_DEATH_TESTS:
def testShardingWorksWithDeathTests(self):
"""Tests integration with death tests and sharding."""
gtest_filter = 'HasDeathTest.*:SeqP/*'
expected_tests = [
'HasDeathTest.Test1',
'HasDeathTest.Test2',
'SeqP/ParamTest.TestX/0',
'SeqP/ParamTest.TestX/1',
'SeqP/ParamTest.TestY/0',
'SeqP/ParamTest.TestY/1',
]
for flag in ['--gtest_death_test_style=threadsafe',
'--gtest_death_test_style=fast']:
self.RunAndVerifyWithSharding(gtest_filter, 3, expected_tests,
check_exit_0=True, args=[flag])
self.RunAndVerifyWithSharding(gtest_filter, 5, expected_tests,
check_exit_0=True, args=[flag])
if __name__ == '__main__':
gtest_test_utils.Main()
| gpl-2.0 |
adrianholovaty/django | tests/regressiontests/localflavor/pl/tests.py | 33 | 22496 | from django.contrib.localflavor.pl.forms import (PLProvinceSelect,
PLCountySelect, PLPostalCodeField, PLNIPField, PLPESELField, PLNationalIDCardNumberField, PLREGONField)
from django.test import SimpleTestCase
class PLLocalFlavorTests(SimpleTestCase):
def test_PLProvinceSelect(self):
f = PLProvinceSelect()
out = u'''<select name="voivodeships">
<option value="lower_silesia">Lower Silesia</option>
<option value="kuyavia-pomerania">Kuyavia-Pomerania</option>
<option value="lublin">Lublin</option>
<option value="lubusz">Lubusz</option>
<option value="lodz">Lodz</option>
<option value="lesser_poland">Lesser Poland</option>
<option value="masovia">Masovia</option>
<option value="opole">Opole</option>
<option value="subcarpatia">Subcarpatia</option>
<option value="podlasie">Podlasie</option>
<option value="pomerania" selected="selected">Pomerania</option>
<option value="silesia">Silesia</option>
<option value="swietokrzyskie">Swietokrzyskie</option>
<option value="warmia-masuria">Warmia-Masuria</option>
<option value="greater_poland">Greater Poland</option>
<option value="west_pomerania">West Pomerania</option>
</select>'''
self.assertHTMLEqual(f.render('voivodeships', 'pomerania'), out)
def test_PLCountrySelect(self):
f = PLCountySelect()
out = u'''<select name="administrativeunit">
<option value="wroclaw">Wroc\u0142aw</option>
<option value="jeleniagora">Jelenia G\xf3ra</option>
<option value="legnica">Legnica</option>
<option value="boleslawiecki">boles\u0142awiecki</option>
<option value="dzierzoniowski">dzier\u017coniowski</option>
<option value="glogowski">g\u0142ogowski</option>
<option value="gorowski">g\xf3rowski</option>
<option value="jaworski">jaworski</option>
<option value="jeleniogorski">jeleniog\xf3rski</option>
<option value="kamiennogorski">kamiennog\xf3rski</option>
<option value="klodzki">k\u0142odzki</option>
<option value="legnicki">legnicki</option>
<option value="lubanski">luba\u0144ski</option>
<option value="lubinski">lubi\u0144ski</option>
<option value="lwowecki">lw\xf3wecki</option>
<option value="milicki">milicki</option>
<option value="olesnicki">ole\u015bnicki</option>
<option value="olawski">o\u0142awski</option>
<option value="polkowicki">polkowicki</option>
<option value="strzelinski">strzeli\u0144ski</option>
<option value="sredzki">\u015bredzki</option>
<option value="swidnicki">\u015bwidnicki</option>
<option value="trzebnicki">trzebnicki</option>
<option value="walbrzyski">wa\u0142brzyski</option>
<option value="wolowski">wo\u0142owski</option>
<option value="wroclawski">wroc\u0142awski</option>
<option value="zabkowicki">z\u0105bkowicki</option>
<option value="zgorzelecki">zgorzelecki</option>
<option value="zlotoryjski">z\u0142otoryjski</option>
<option value="bydgoszcz">Bydgoszcz</option>
<option value="torun">Toru\u0144</option>
<option value="wloclawek">W\u0142oc\u0142awek</option>
<option value="grudziadz">Grudzi\u0105dz</option>
<option value="aleksandrowski">aleksandrowski</option>
<option value="brodnicki">brodnicki</option>
<option value="bydgoski">bydgoski</option>
<option value="chelminski">che\u0142mi\u0144ski</option>
<option value="golubsko-dobrzynski">golubsko-dobrzy\u0144ski</option>
<option value="grudziadzki">grudzi\u0105dzki</option>
<option value="inowroclawski">inowroc\u0142awski</option>
<option value="lipnowski">lipnowski</option>
<option value="mogilenski">mogile\u0144ski</option>
<option value="nakielski">nakielski</option>
<option value="radziejowski">radziejowski</option>
<option value="rypinski">rypi\u0144ski</option>
<option value="sepolenski">s\u0119pole\u0144ski</option>
<option value="swiecki">\u015bwiecki</option>
<option value="torunski">toru\u0144ski</option>
<option value="tucholski">tucholski</option>
<option value="wabrzeski">w\u0105brzeski</option>
<option value="wloclawski">wroc\u0142awski</option>
<option value="zninski">\u017ani\u0144ski</option>
<option value="lublin">Lublin</option>
<option value="biala-podlaska">Bia\u0142a Podlaska</option>
<option value="chelm">Che\u0142m</option>
<option value="zamosc">Zamo\u015b\u0107</option>
<option value="bialski">bialski</option>
<option value="bilgorajski">bi\u0142gorajski</option>
<option value="chelmski">che\u0142mski</option>
<option value="hrubieszowski">hrubieszowski</option>
<option value="janowski">janowski</option>
<option value="krasnostawski">krasnostawski</option>
<option value="krasnicki">kra\u015bnicki</option>
<option value="lubartowski">lubartowski</option>
<option value="lubelski">lubelski</option>
<option value="leczynski">\u0142\u0119czy\u0144ski</option>
<option value="lukowski">\u0142ukowski</option>
<option value="opolski">opolski</option>
<option value="parczewski">parczewski</option>
<option value="pulawski">pu\u0142awski</option>
<option value="radzynski">radzy\u0144ski</option>
<option value="rycki">rycki</option>
<option value="swidnicki">\u015bwidnicki</option>
<option value="tomaszowski">tomaszowski</option>
<option value="wlodawski">w\u0142odawski</option>
<option value="zamojski">zamojski</option>
<option value="gorzow-wielkopolski">Gorz\xf3w Wielkopolski</option>
<option value="zielona-gora">Zielona G\xf3ra</option>
<option value="gorzowski">gorzowski</option>
<option value="krosnienski">kro\u015bnie\u0144ski</option>
<option value="miedzyrzecki">mi\u0119dzyrzecki</option>
<option value="nowosolski">nowosolski</option>
<option value="slubicki">s\u0142ubicki</option>
<option value="strzelecko-drezdenecki">strzelecko-drezdenecki</option>
<option value="sulecinski">sule\u0144ci\u0144ski</option>
<option value="swiebodzinski">\u015bwiebodzi\u0144ski</option>
<option value="wschowski">wschowski</option>
<option value="zielonogorski">zielonog\xf3rski</option>
<option value="zaganski">\u017caga\u0144ski</option>
<option value="zarski">\u017carski</option>
<option value="lodz">\u0141\xf3d\u017a</option>
<option value="piotrkow-trybunalski">Piotrk\xf3w Trybunalski</option>
<option value="skierniewice">Skierniewice</option>
<option value="belchatowski">be\u0142chatowski</option>
<option value="brzezinski">brzezi\u0144ski</option>
<option value="kutnowski">kutnowski</option>
<option value="laski">\u0142aski</option>
<option value="leczycki">\u0142\u0119czycki</option>
<option value="lowicki">\u0142owicki</option>
<option value="lodzki wschodni">\u0142\xf3dzki wschodni</option>
<option value="opoczynski">opoczy\u0144ski</option>
<option value="pabianicki">pabianicki</option>
<option value="pajeczanski">paj\u0119cza\u0144ski</option>
<option value="piotrkowski">piotrkowski</option>
<option value="poddebicki">podd\u0119bicki</option>
<option value="radomszczanski">radomszcza\u0144ski</option>
<option value="rawski">rawski</option>
<option value="sieradzki">sieradzki</option>
<option value="skierniewicki">skierniewicki</option>
<option value="tomaszowski">tomaszowski</option>
<option value="wielunski">wielu\u0144ski</option>
<option value="wieruszowski">wieruszowski</option>
<option value="zdunskowolski">zdu\u0144skowolski</option>
<option value="zgierski">zgierski</option>
<option value="krakow">Krak\xf3w</option>
<option value="tarnow">Tarn\xf3w</option>
<option value="nowy-sacz">Nowy S\u0105cz</option>
<option value="bochenski">boche\u0144ski</option>
<option value="brzeski">brzeski</option>
<option value="chrzanowski">chrzanowski</option>
<option value="dabrowski">d\u0105browski</option>
<option value="gorlicki">gorlicki</option>
<option value="krakowski">krakowski</option>
<option value="limanowski">limanowski</option>
<option value="miechowski">miechowski</option>
<option value="myslenicki">my\u015blenicki</option>
<option value="nowosadecki">nowos\u0105decki</option>
<option value="nowotarski">nowotarski</option>
<option value="olkuski">olkuski</option>
<option value="oswiecimski">o\u015bwi\u0119cimski</option>
<option value="proszowicki">proszowicki</option>
<option value="suski">suski</option>
<option value="tarnowski">tarnowski</option>
<option value="tatrzanski">tatrza\u0144ski</option>
<option value="wadowicki">wadowicki</option>
<option value="wielicki">wielicki</option>
<option value="warszawa">Warszawa</option>
<option value="ostroleka">Ostro\u0142\u0119ka</option>
<option value="plock">P\u0142ock</option>
<option value="radom">Radom</option>
<option value="siedlce">Siedlce</option>
<option value="bialobrzeski">bia\u0142obrzeski</option>
<option value="ciechanowski">ciechanowski</option>
<option value="garwolinski">garwoli\u0144ski</option>
<option value="gostyninski">gostyni\u0144ski</option>
<option value="grodziski">grodziski</option>
<option value="grojecki">gr\xf3jecki</option>
<option value="kozienicki">kozenicki</option>
<option value="legionowski">legionowski</option>
<option value="lipski">lipski</option>
<option value="losicki">\u0142osicki</option>
<option value="makowski">makowski</option>
<option value="minski">mi\u0144ski</option>
<option value="mlawski">m\u0142awski</option>
<option value="nowodworski">nowodworski</option>
<option value="ostrolecki">ostro\u0142\u0119cki</option>
<option value="ostrowski">ostrowski</option>
<option value="otwocki">otwocki</option>
<option value="piaseczynski">piaseczy\u0144ski</option>
<option value="plocki">p\u0142ocki</option>
<option value="plonski">p\u0142o\u0144ski</option>
<option value="pruszkowski">pruszkowski</option>
<option value="przasnyski">przasnyski</option>
<option value="przysuski">przysuski</option>
<option value="pultuski">pu\u0142tuski</option>
<option value="radomski">radomski</option>
<option value="siedlecki">siedlecki</option>
<option value="sierpecki">sierpecki</option>
<option value="sochaczewski">sochaczewski</option>
<option value="sokolowski">soko\u0142owski</option>
<option value="szydlowiecki">szyd\u0142owiecki</option>
<option value="warszawski-zachodni">warszawski zachodni</option>
<option value="wegrowski">w\u0119growski</option>
<option value="wolominski">wo\u0142omi\u0144ski</option>
<option value="wyszkowski">wyszkowski</option>
<option value="zwolenski">zwole\u0144ski</option>
<option value="zurominski">\u017curomi\u0144ski</option>
<option value="zyrardowski">\u017cyrardowski</option>
<option value="opole">Opole</option>
<option value="brzeski">brzeski</option>
<option value="glubczycki">g\u0142ubczyski</option>
<option value="kedzierzynsko-kozielski">k\u0119dzierzy\u0144ski-kozielski</option>
<option value="kluczborski">kluczborski</option>
<option value="krapkowicki">krapkowicki</option>
<option value="namyslowski">namys\u0142owski</option>
<option value="nyski">nyski</option>
<option value="oleski">oleski</option>
<option value="opolski">opolski</option>
<option value="prudnicki">prudnicki</option>
<option value="strzelecki">strzelecki</option>
<option value="rzeszow">Rzesz\xf3w</option>
<option value="krosno">Krosno</option>
<option value="przemysl">Przemy\u015bl</option>
<option value="tarnobrzeg">Tarnobrzeg</option>
<option value="bieszczadzki">bieszczadzki</option>
<option value="brzozowski">brzozowski</option>
<option value="debicki">d\u0119bicki</option>
<option value="jaroslawski">jaros\u0142awski</option>
<option value="jasielski">jasielski</option>
<option value="kolbuszowski">kolbuszowski</option>
<option value="krosnienski">kro\u015bnie\u0144ski</option>
<option value="leski">leski</option>
<option value="lezajski">le\u017cajski</option>
<option value="lubaczowski">lubaczowski</option>
<option value="lancucki">\u0142a\u0144cucki</option>
<option value="mielecki">mielecki</option>
<option value="nizanski">ni\u017ca\u0144ski</option>
<option value="przemyski">przemyski</option>
<option value="przeworski">przeworski</option>
<option value="ropczycko-sedziszowski">ropczycko-s\u0119dziszowski</option>
<option value="rzeszowski">rzeszowski</option>
<option value="sanocki">sanocki</option>
<option value="stalowowolski">stalowowolski</option>
<option value="strzyzowski">strzy\u017cowski</option>
<option value="tarnobrzeski">tarnobrzeski</option>
<option value="bialystok">Bia\u0142ystok</option>
<option value="lomza">\u0141om\u017ca</option>
<option value="suwalki">Suwa\u0142ki</option>
<option value="augustowski">augustowski</option>
<option value="bialostocki">bia\u0142ostocki</option>
<option value="bielski">bielski</option>
<option value="grajewski">grajewski</option>
<option value="hajnowski">hajnowski</option>
<option value="kolnenski">kolne\u0144ski</option>
<option value="\u0142omzynski">\u0142om\u017cy\u0144ski</option>
<option value="moniecki">moniecki</option>
<option value="sejnenski">sejne\u0144ski</option>
<option value="siemiatycki">siematycki</option>
<option value="sokolski">sok\xf3lski</option>
<option value="suwalski">suwalski</option>
<option value="wysokomazowiecki">wysokomazowiecki</option>
<option value="zambrowski">zambrowski</option>
<option value="gdansk">Gda\u0144sk</option>
<option value="gdynia">Gdynia</option>
<option value="slupsk">S\u0142upsk</option>
<option value="sopot">Sopot</option>
<option value="bytowski">bytowski</option>
<option value="chojnicki">chojnicki</option>
<option value="czluchowski">cz\u0142uchowski</option>
<option value="kartuski">kartuski</option>
<option value="koscierski">ko\u015bcierski</option>
<option value="kwidzynski">kwidzy\u0144ski</option>
<option value="leborski">l\u0119borski</option>
<option value="malborski">malborski</option>
<option value="nowodworski">nowodworski</option>
<option value="gdanski">gda\u0144ski</option>
<option value="pucki">pucki</option>
<option value="slupski">s\u0142upski</option>
<option value="starogardzki">starogardzki</option>
<option value="sztumski">sztumski</option>
<option value="tczewski">tczewski</option>
<option value="wejherowski">wejcherowski</option>
<option value="katowice" selected="selected">Katowice</option>
<option value="bielsko-biala">Bielsko-Bia\u0142a</option>
<option value="bytom">Bytom</option>
<option value="chorzow">Chorz\xf3w</option>
<option value="czestochowa">Cz\u0119stochowa</option>
<option value="dabrowa-gornicza">D\u0105browa G\xf3rnicza</option>
<option value="gliwice">Gliwice</option>
<option value="jastrzebie-zdroj">Jastrz\u0119bie Zdr\xf3j</option>
<option value="jaworzno">Jaworzno</option>
<option value="myslowice">Mys\u0142owice</option>
<option value="piekary-slaskie">Piekary \u015al\u0105skie</option>
<option value="ruda-slaska">Ruda \u015al\u0105ska</option>
<option value="rybnik">Rybnik</option>
<option value="siemianowice-slaskie">Siemianowice \u015al\u0105skie</option>
<option value="sosnowiec">Sosnowiec</option>
<option value="swietochlowice">\u015awi\u0119toch\u0142owice</option>
<option value="tychy">Tychy</option>
<option value="zabrze">Zabrze</option>
<option value="zory">\u017bory</option>
<option value="bedzinski">b\u0119dzi\u0144ski</option>
<option value="bielski">bielski</option>
<option value="bierunsko-ledzinski">bieru\u0144sko-l\u0119dzi\u0144ski</option>
<option value="cieszynski">cieszy\u0144ski</option>
<option value="czestochowski">cz\u0119stochowski</option>
<option value="gliwicki">gliwicki</option>
<option value="klobucki">k\u0142obucki</option>
<option value="lubliniecki">lubliniecki</option>
<option value="mikolowski">miko\u0142owski</option>
<option value="myszkowski">myszkowski</option>
<option value="pszczynski">pszczy\u0144ski</option>
<option value="raciborski">raciborski</option>
<option value="rybnicki">rybnicki</option>
<option value="tarnogorski">tarnog\xf3rski</option>
<option value="wodzislawski">wodzis\u0142awski</option>
<option value="zawiercianski">zawiercia\u0144ski</option>
<option value="zywiecki">\u017cywiecki</option>
<option value="kielce">Kielce</option>
<option value="buski">buski</option>
<option value="jedrzejowski">j\u0119drzejowski</option>
<option value="kazimierski">kazimierski</option>
<option value="kielecki">kielecki</option>
<option value="konecki">konecki</option>
<option value="opatowski">opatowski</option>
<option value="ostrowiecki">ostrowiecki</option>
<option value="pinczowski">pi\u0144czowski</option>
<option value="sandomierski">sandomierski</option>
<option value="skarzyski">skar\u017cyski</option>
<option value="starachowicki">starachowicki</option>
<option value="staszowski">staszowski</option>
<option value="wloszczowski">w\u0142oszczowski</option>
<option value="olsztyn">Olsztyn</option>
<option value="elblag">Elbl\u0105g</option>
<option value="bartoszycki">bartoszycki</option>
<option value="braniewski">braniewski</option>
<option value="dzialdowski">dzia\u0142dowski</option>
<option value="elblaski">elbl\u0105ski</option>
<option value="elcki">e\u0142cki</option>
<option value="gizycki">gi\u017cycki</option>
<option value="goldapski">go\u0142dapski</option>
<option value="ilawski">i\u0142awski</option>
<option value="ketrzynski">k\u0119trzy\u0144ski</option>
<option value="lidzbarski">lidzbarski</option>
<option value="mragowski">mr\u0105gowski</option>
<option value="nidzicki">nidzicki</option>
<option value="nowomiejski">nowomiejski</option>
<option value="olecki">olecki</option>
<option value="olsztynski">olszty\u0144ski</option>
<option value="ostrodzki">ostr\xf3dzki</option>
<option value="piski">piski</option>
<option value="szczycienski">szczycie\u0144ski</option>
<option value="wegorzewski">w\u0119gorzewski</option>
<option value="poznan">Pozna\u0144</option>
<option value="kalisz">Kalisz</option>
<option value="konin">Konin</option>
<option value="leszno">Leszno</option>
<option value="chodzieski">chodziejski</option>
<option value="czarnkowsko-trzcianecki">czarnkowsko-trzcianecki</option>
<option value="gnieznienski">gnie\u017anie\u0144ski</option>
<option value="gostynski">gosty\u0144ski</option>
<option value="grodziski">grodziski</option>
<option value="jarocinski">jaroci\u0144ski</option>
<option value="kaliski">kaliski</option>
<option value="kepinski">k\u0119pi\u0144ski</option>
<option value="kolski">kolski</option>
<option value="koninski">koni\u0144ski</option>
<option value="koscianski">ko\u015bcia\u0144ski</option>
<option value="krotoszynski">krotoszy\u0144ski</option>
<option value="leszczynski">leszczy\u0144ski</option>
<option value="miedzychodzki">mi\u0119dzychodzki</option>
<option value="nowotomyski">nowotomyski</option>
<option value="obornicki">obornicki</option>
<option value="ostrowski">ostrowski</option>
<option value="ostrzeszowski">ostrzeszowski</option>
<option value="pilski">pilski</option>
<option value="pleszewski">pleszewski</option>
<option value="poznanski">pozna\u0144ski</option>
<option value="rawicki">rawicki</option>
<option value="slupecki">s\u0142upecki</option>
<option value="szamotulski">szamotulski</option>
<option value="sredzki">\u015bredzki</option>
<option value="sremski">\u015bremski</option>
<option value="turecki">turecki</option>
<option value="wagrowiecki">w\u0105growiecki</option>
<option value="wolsztynski">wolszty\u0144ski</option>
<option value="wrzesinski">wrzesi\u0144ski</option>
<option value="zlotowski">z\u0142otowski</option>
<option value="bialogardzki">bia\u0142ogardzki</option>
<option value="choszczenski">choszcze\u0144ski</option>
<option value="drawski">drawski</option>
<option value="goleniowski">goleniowski</option>
<option value="gryficki">gryficki</option>
<option value="gryfinski">gryfi\u0144ski</option>
<option value="kamienski">kamie\u0144ski</option>
<option value="kolobrzeski">ko\u0142obrzeski</option>
<option value="koszalinski">koszali\u0144ski</option>
<option value="lobeski">\u0142obeski</option>
<option value="mysliborski">my\u015bliborski</option>
<option value="policki">policki</option>
<option value="pyrzycki">pyrzycki</option>
<option value="slawienski">s\u0142awie\u0144ski</option>
<option value="stargardzki">stargardzki</option>
<option value="szczecinecki">szczecinecki</option>
<option value="swidwinski">\u015bwidwi\u0144ski</option>
<option value="walecki">wa\u0142ecki</option>
</select>'''
self.assertHTMLEqual(f.render('administrativeunit', 'katowice'), out)
def test_PLPostalCodeField(self):
error_format = [u'Enter a postal code in the format XX-XXX.']
valid = {
'41-403': '41-403',
}
invalid = {
'43--434': error_format,
}
self.assertFieldOutput(PLPostalCodeField, valid, invalid)
def test_PLNIPField(self):
error_format = [u'Enter a tax number field (NIP) in the format XXX-XXX-XX-XX, XXX-XX-XX-XXX or XXXXXXXXXX.']
error_checksum = [u'Wrong checksum for the Tax Number (NIP).']
valid = {
'646-241-41-24': '6462414124',
'646-24-14-124': '6462414124',
'6462414124': '6462414124',
}
invalid = {
'43-343-234-323': error_format,
'64-62-414-124': error_format,
'646-241-41-23': error_checksum,
}
self.assertFieldOutput(PLNIPField, valid, invalid)
def test_PLPESELField(self):
error_checksum = [u'Wrong checksum for the National Identification Number.']
error_format = [u'National Identification Number consists of 11 digits.']
valid = {
'80071610614': '80071610614',
}
invalid = {
'80071610610': error_checksum,
'80': error_format,
'800716106AA': error_format,
}
self.assertFieldOutput(PLPESELField, valid, invalid)
def test_PLNationalIDCardNumberField(self):
error_checksum = [u'Wrong checksum for the National ID Card Number.']
error_format = [u'National ID Card Number consists of 3 letters and 6 digits.']
valid = {
'ABC123458': 'ABC123458',
'abc123458': 'ABC123458',
}
invalid = {
'ABC123457': error_checksum,
'abc123457': error_checksum,
'a12Aaaaaa': error_format,
'AA1234443': error_format,
}
self.assertFieldOutput(PLNationalIDCardNumberField, valid, invalid)
def test_PLREGONField(self):
error_checksum = [u'Wrong checksum for the National Business Register Number (REGON).']
error_format = [u'National Business Register Number (REGON) consists of 9 or 14 digits.']
valid = {
'12345678512347': '12345678512347',
'590096454': '590096454',
}
invalid = {
'123456784': error_checksum,
'12345678412342': error_checksum,
'590096453': error_checksum,
'590096': error_format,
}
self.assertFieldOutput(PLREGONField, valid, invalid)
| bsd-3-clause |
bobcyw/django | django/contrib/auth/backends.py | 468 | 6114 | from __future__ import unicode_literals
from django.contrib.auth import get_user_model
from django.contrib.auth.models import Permission
class ModelBackend(object):
"""
Authenticates against settings.AUTH_USER_MODEL.
"""
def authenticate(self, username=None, password=None, **kwargs):
UserModel = get_user_model()
if username is None:
username = kwargs.get(UserModel.USERNAME_FIELD)
try:
user = UserModel._default_manager.get_by_natural_key(username)
if user.check_password(password):
return user
except UserModel.DoesNotExist:
# Run the default password hasher once to reduce the timing
# difference between an existing and a non-existing user (#20760).
UserModel().set_password(password)
def _get_user_permissions(self, user_obj):
return user_obj.user_permissions.all()
def _get_group_permissions(self, user_obj):
user_groups_field = get_user_model()._meta.get_field('groups')
user_groups_query = 'group__%s' % user_groups_field.related_query_name()
return Permission.objects.filter(**{user_groups_query: user_obj})
def _get_permissions(self, user_obj, obj, from_name):
"""
Returns the permissions of `user_obj` from `from_name`. `from_name` can
be either "group" or "user" to return permissions from
`_get_group_permissions` or `_get_user_permissions` respectively.
"""
if not user_obj.is_active or user_obj.is_anonymous() or obj is not None:
return set()
perm_cache_name = '_%s_perm_cache' % from_name
if not hasattr(user_obj, perm_cache_name):
if user_obj.is_superuser:
perms = Permission.objects.all()
else:
perms = getattr(self, '_get_%s_permissions' % from_name)(user_obj)
perms = perms.values_list('content_type__app_label', 'codename').order_by()
setattr(user_obj, perm_cache_name, set("%s.%s" % (ct, name) for ct, name in perms))
return getattr(user_obj, perm_cache_name)
def get_user_permissions(self, user_obj, obj=None):
"""
Returns a set of permission strings the user `user_obj` has from their
`user_permissions`.
"""
return self._get_permissions(user_obj, obj, 'user')
def get_group_permissions(self, user_obj, obj=None):
"""
Returns a set of permission strings the user `user_obj` has from the
groups they belong.
"""
return self._get_permissions(user_obj, obj, 'group')
def get_all_permissions(self, user_obj, obj=None):
if not user_obj.is_active or user_obj.is_anonymous() or obj is not None:
return set()
if not hasattr(user_obj, '_perm_cache'):
user_obj._perm_cache = self.get_user_permissions(user_obj)
user_obj._perm_cache.update(self.get_group_permissions(user_obj))
return user_obj._perm_cache
def has_perm(self, user_obj, perm, obj=None):
if not user_obj.is_active:
return False
return perm in self.get_all_permissions(user_obj, obj)
def has_module_perms(self, user_obj, app_label):
"""
Returns True if user_obj has any permissions in the given app_label.
"""
if not user_obj.is_active:
return False
for perm in self.get_all_permissions(user_obj):
if perm[:perm.index('.')] == app_label:
return True
return False
def get_user(self, user_id):
UserModel = get_user_model()
try:
return UserModel._default_manager.get(pk=user_id)
except UserModel.DoesNotExist:
return None
class RemoteUserBackend(ModelBackend):
"""
This backend is to be used in conjunction with the ``RemoteUserMiddleware``
found in the middleware module of this package, and is used when the server
is handling authentication outside of Django.
By default, the ``authenticate`` method creates ``User`` objects for
usernames that don't already exist in the database. Subclasses can disable
this behavior by setting the ``create_unknown_user`` attribute to
``False``.
"""
# Create a User object if not already in the database?
create_unknown_user = True
def authenticate(self, remote_user):
"""
The username passed as ``remote_user`` is considered trusted. This
method simply returns the ``User`` object with the given username,
creating a new ``User`` object if ``create_unknown_user`` is ``True``.
Returns None if ``create_unknown_user`` is ``False`` and a ``User``
object with the given username is not found in the database.
"""
if not remote_user:
return
user = None
username = self.clean_username(remote_user)
UserModel = get_user_model()
# Note that this could be accomplished in one try-except clause, but
# instead we use get_or_create when creating unknown users since it has
# built-in safeguards for multiple threads.
if self.create_unknown_user:
user, created = UserModel._default_manager.get_or_create(**{
UserModel.USERNAME_FIELD: username
})
if created:
user = self.configure_user(user)
else:
try:
user = UserModel._default_manager.get_by_natural_key(username)
except UserModel.DoesNotExist:
pass
return user
def clean_username(self, username):
"""
Performs any cleaning on the "username" prior to using it to get or
create the user object. Returns the cleaned username.
By default, returns the username unchanged.
"""
return username
def configure_user(self, user):
"""
Configures a user after creation and returns the updated user.
By default, returns the user unmodified.
"""
return user
| bsd-3-clause |
ucsb-cs/submit | submit/__init__.py | 1 | 5135 | from pyramid.authentication import AuthTktAuthenticationPolicy
from pyramid.authorization import ACLAuthorizationPolicy
from pyramid.config import Configurator
from pyramid.security import ALL_PERMISSIONS, Allow, Authenticated
from pyramid.session import UnencryptedCookieSessionFactoryConfig
from sqlalchemy import engine_from_config
from .helpers import get_queue_func
from .models import configure_sql, create_schema, populate_database
from .security import get_user, group_finder
__version__ = '1.3.3'
class Root(object):
__acl__ = [(Allow, Authenticated, 'authenticated'),
(Allow, 'admin', ALL_PERMISSIONS)]
def __init__(self, request):
self.request = request
def add_routes(config):
# Application routes
config.add_route('home', '/')
config.add_route('robots', '/robots.txt')
config.add_route('build_file', '/build_file')
config.add_route('build_file_item', '/build_file/{build_file_id}')
config.add_route('class', '/class')
config.add_route('class.admins', '/class/{class_id}/admins'),
config.add_route('class_item', '/class/{class_id}')
config.add_route('execution_file', '/execution_file')
config.add_route('execution_file_item',
'/execution_file/{execution_file_id}')
config.add_route('file', '/file')
config.add_route('file_item', '/file/{sha1sum}/{filename}')
config.add_route('file_verifier', '/file_verifier')
config.add_route('file_verifier_item', '/file_verifier/{file_verifier_id}')
config.add_route('password_reset', '/password_reset')
config.add_route('password_reset_item', '/password_reset/{token}')
config.add_route('project', '/p')
config.add_route('project_export', '/p/{project_id}/export')
config.add_route('project_import', '/p/{project_id}/import')
config.add_route('project_group', '/p/{project_id}/group')
config.add_route('project_group_item',
'/p/{project_id}/group/{group_request_id}')
config.add_route('project_info', '/p/{project_id}/info')
config.add_route('project_item_download', '/p/{project_id}/download')
config.add_route('project_item_summary', '/p/{project_id}')
config.add_route('project_item_detailed', '/p/{project_id}/g/{group_id}')
config.add_route('project_item_detailed_user',
'/p/{project_id}/u/{username}')
config.add_route('project_scores', '/p/{project_id}/scores')
config.add_route('session', '/session')
config.add_route('submission', '/submission')
config.add_route('submission_item', '/submission/{submission_id}')
config.add_route('submission_item_gen', '/submission/{submission_id}/gen')
config.add_route('test_case', '/test_case')
config.add_route('test_case_item', '/test_case/{test_case_id}')
config.add_route('testable', '/testable')
config.add_route('testable_item', '/testable/{testable_id}')
config.add_route('user', '/user')
config.add_route('user_item', '/user/{username}')
config.add_route('zipfile_download', '/zipfile_download/{submission_id}')
# Form view routes
config.add_route('class_new', '/form/class')
config.add_route('class_edit', '/form/class/{class_id}')
config.add_route('project_new', '/form/class/{class_id}/project')
config.add_route('project_edit', '/form/project/{project_id}')
config.add_route('group_admin', '/form/project/{project_id}/group')
config.add_route('submission_new', '/form/project/{project_id}/submission')
config.add_route('user_new', '/form/user')
config.add_route('user_new_special', '/form/user_special')
config.add_route('user_join', '/form/user/join')
def main(global_config, **settings):
""" This function returns a Pyramid WSGI application.
"""
# Initialize the database
engine = engine_from_config(settings, 'sqlalchemy.')
configure_sql(engine)
secure_cookies = settings.get('secure_cookies') != 'false'
if 'pyramid_debugtoolbar' in settings['pyramid.includes']:
create_schema(global_config['__file__'])
populate_database()
secure_cookies = False
# Configure the webapp
authen = AuthTktAuthenticationPolicy(secret=settings['auth_secret'],
callback=group_finder,
secure=secure_cookies,
include_ip=False, hashalg='sha512',
wild_domain=False, max_age=5000000)
author = ACLAuthorizationPolicy()
session_factory = UnencryptedCookieSessionFactoryConfig(
settings['cookie_secret'])
config = Configurator(settings=settings, authentication_policy=authen,
authorization_policy=author, root_factory=Root,
session_factory=session_factory)
config.add_static_view('static', 'static', cache_max_age=3600)
# Add attributes to request
config.add_request_method(get_user, 'user', reify=True)
config.add_request_method(get_queue_func, 'queue', reify=True)
add_routes(config)
config.scan()
return config.make_wsgi_app()
| bsd-2-clause |
qrkourier/ansible | lib/ansible/plugins/connection/iocage.py | 44 | 2394 | # based on jail.py (c) 2013, Michael Scherer <[email protected]>
# (c) 2015, Toshio Kuratomi <[email protected]>
# (c) 2016, Stephan Lohse <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import subprocess
from ansible.plugins.connection.jail import Connection as Jail
from ansible.errors import AnsibleError
try:
from __main__ import display
except ImportError:
from ansible.utils.display import Display
display = Display()
class Connection(Jail):
''' Local iocage based connections '''
transport = 'iocage'
def __init__(self, play_context, new_stdin, *args, **kwargs):
self.ioc_jail = play_context.remote_addr
self.iocage_cmd = Jail._search_executable('iocage')
jail_uuid = self.get_jail_uuid()
kwargs[Jail.modified_jailname_key] = 'ioc-{}'.format(jail_uuid)
display.vvv(u"Jail {iocjail} has been translated to {rawjail}".format(
iocjail=self.ioc_jail, rawjail=kwargs[Jail.modified_jailname_key]),
host=kwargs[Jail.modified_jailname_key])
super(Connection, self).__init__(play_context, new_stdin, *args, **kwargs)
def get_jail_uuid(self):
p = subprocess.Popen([self.iocage_cmd, 'get', 'host_hostuuid', self.ioc_jail],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
stdout, stderr = p.communicate()
# otherwise p.returncode would not be set
p.wait()
if p.returncode != 0:
raise AnsibleError(u"iocage returned an error: {}".format(stdout))
return stdout.strip('\n')
| gpl-3.0 |
mansilladev/zulip | bots/zulip_git_config.py | 125 | 1688 | # Zulip, Inc's internal git plugin configuration.
# The plugin and example config are under api/integrations/
# Leaving all the instructions out of this file to avoid having to
# sync them as we update the comments.
ZULIP_USER = "[email protected]"
ZULIP_API_KEY = "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx"
# commit_notice_destination() lets you customize where commit notices
# are sent to.
#
# It takes the following arguments:
# * repo = the name of the git repository
# * branch = the name of the branch that was pushed to
# * commit = the commit id
#
# Returns a dictionary encoding the stream and subject to send the
# notification to (or None to send no notification, e.g. for ).
#
# The default code below will send every commit pushed to "master" to
# * stream "commits"
# * topic "master"
# And similarly for branch "test-post-receive" (for use when testing).
def commit_notice_destination(repo, branch, commit):
if branch in ["master", "prod", "test-post-receive"]:
return dict(stream = 'test' if 'test-' in branch else 'commits',
subject = u"%s" % (branch,))
# Return None for cases where you don't want a notice sent
return None
# Modify this function to change how commits are displayed; the most
# common customization is to include a link to the commit in your
# graphical repository viewer, e.g.
#
# return '!avatar(%s) [%s](https://example.com/commits/%s)\n' % (author, subject, commit_id)
def format_commit_message(author, subject, commit_id):
return '!avatar(%s) [%s](https://git.zulip.net/eng/zulip/commit/%s)\n' % (author, subject, commit_id)
ZULIP_API_PATH = "/home/zulip/zulip/api"
ZULIP_SITE = "https://zulip.com"
| apache-2.0 |
40223114/2015_g4 | static/Brython3.1.0-20150301-090019/Lib/unittest/test/test_functiontestcase.py | 791 | 5478 | import unittest
from .support import LoggingResult
class Test_FunctionTestCase(unittest.TestCase):
# "Return the number of tests represented by the this test object. For
# TestCase instances, this will always be 1"
def test_countTestCases(self):
test = unittest.FunctionTestCase(lambda: None)
self.assertEqual(test.countTestCases(), 1)
# "When a setUp() method is defined, the test runner will run that method
# prior to each test. Likewise, if a tearDown() method is defined, the
# test runner will invoke that method after each test. In the example,
# setUp() was used to create a fresh sequence for each test."
#
# Make sure the proper call order is maintained, even if setUp() raises
# an exception.
def test_run_call_order__error_in_setUp(self):
events = []
result = LoggingResult(events)
def setUp():
events.append('setUp')
raise RuntimeError('raised by setUp')
def test():
events.append('test')
def tearDown():
events.append('tearDown')
expected = ['startTest', 'setUp', 'addError', 'stopTest']
unittest.FunctionTestCase(test, setUp, tearDown).run(result)
self.assertEqual(events, expected)
# "When a setUp() method is defined, the test runner will run that method
# prior to each test. Likewise, if a tearDown() method is defined, the
# test runner will invoke that method after each test. In the example,
# setUp() was used to create a fresh sequence for each test."
#
# Make sure the proper call order is maintained, even if the test raises
# an error (as opposed to a failure).
def test_run_call_order__error_in_test(self):
events = []
result = LoggingResult(events)
def setUp():
events.append('setUp')
def test():
events.append('test')
raise RuntimeError('raised by test')
def tearDown():
events.append('tearDown')
expected = ['startTest', 'setUp', 'test', 'tearDown',
'addError', 'stopTest']
unittest.FunctionTestCase(test, setUp, tearDown).run(result)
self.assertEqual(events, expected)
# "When a setUp() method is defined, the test runner will run that method
# prior to each test. Likewise, if a tearDown() method is defined, the
# test runner will invoke that method after each test. In the example,
# setUp() was used to create a fresh sequence for each test."
#
# Make sure the proper call order is maintained, even if the test signals
# a failure (as opposed to an error).
def test_run_call_order__failure_in_test(self):
events = []
result = LoggingResult(events)
def setUp():
events.append('setUp')
def test():
events.append('test')
self.fail('raised by test')
def tearDown():
events.append('tearDown')
expected = ['startTest', 'setUp', 'test', 'tearDown',
'addFailure', 'stopTest']
unittest.FunctionTestCase(test, setUp, tearDown).run(result)
self.assertEqual(events, expected)
# "When a setUp() method is defined, the test runner will run that method
# prior to each test. Likewise, if a tearDown() method is defined, the
# test runner will invoke that method after each test. In the example,
# setUp() was used to create a fresh sequence for each test."
#
# Make sure the proper call order is maintained, even if tearDown() raises
# an exception.
def test_run_call_order__error_in_tearDown(self):
events = []
result = LoggingResult(events)
def setUp():
events.append('setUp')
def test():
events.append('test')
def tearDown():
events.append('tearDown')
raise RuntimeError('raised by tearDown')
expected = ['startTest', 'setUp', 'test', 'tearDown', 'addError',
'stopTest']
unittest.FunctionTestCase(test, setUp, tearDown).run(result)
self.assertEqual(events, expected)
# "Return a string identifying the specific test case."
#
# Because of the vague nature of the docs, I'm not going to lock this
# test down too much. Really all that can be asserted is that the id()
# will be a string (either 8-byte or unicode -- again, because the docs
# just say "string")
def test_id(self):
test = unittest.FunctionTestCase(lambda: None)
self.assertIsInstance(test.id(), str)
# "Returns a one-line description of the test, or None if no description
# has been provided. The default implementation of this method returns
# the first line of the test method's docstring, if available, or None."
def test_shortDescription__no_docstring(self):
test = unittest.FunctionTestCase(lambda: None)
self.assertEqual(test.shortDescription(), None)
# "Returns a one-line description of the test, or None if no description
# has been provided. The default implementation of this method returns
# the first line of the test method's docstring, if available, or None."
def test_shortDescription__singleline_docstring(self):
desc = "this tests foo"
test = unittest.FunctionTestCase(lambda: None, description=desc)
self.assertEqual(test.shortDescription(), "this tests foo")
| gpl-3.0 |
NedJunk/APlasticBag | driver.py | 1 | 1662 | import tweepy, time, sys, yaml, json
class Bot(object):
"""docstring for Bot"""
def __init__(self):
super(Bot, self).__init__()
self.creds = None
self.api = self.load_api()
def load_api(self):
with open('credentials.yaml', 'r') as f:
self.creds = yaml.load(f)
auth = tweepy.OAuthHandler(self.creds['CONSUMER_KEY'], self.creds['CONSUMER_SECRET'])
auth.set_access_token(self.creds['ACCESS_TOKEN'], self.creds['ACCESS_TOKEN_SECRET'])
return tweepy.API(auth)
def tweet(self):
question = self.pop_question()
self.api.update_status(question)
def pop_question(self):
with open('questions.txt', 'r+') as f:
questions = f.readlines()
q = questions[0]
f.seek(0)
f.truncate()
f.writelines(questions[1:])
return q.strip()
def get_my_tweets(self):
return [status._json for status in tweepy.Cursor(self.api.user_timeline).items()]
def get_my_tweet(self, status_id):
for s in self.get_my_tweets():
if s['id'] == status_id:
print(s['id'])
# return s
def get_response_to(self, status_id):
for m in self.get_my_mentions():
if m['in_reply_to_status_id'] == status_id:
print(m['text'])
def get_my_mentions(self):
return [mention._json for mention in tweepy.Cursor(self.api.mentions_timeline).items()]
def test(self):
# print('++++++++++')
# print(self.api)
# print('++++++++++')
# for k, v in self.creds.items():
# print(k, v)
# self.tweet()
# print([tweet['id'] for tweet in self.get_my_tweets()])
# print(self.get_my_tweet(767443708417486848)['text'])
self.get_response_to(767442132546170881)
def main():
bot = Bot()
bot.test()
if __name__ == '__main__':
main() | mit |
kabakchey/django-annoying | tests/settings.py | 2 | 1372 | import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
SECRET_KEY = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ1234567890'
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'tests',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'tests.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
}
}
| bsd-3-clause |
Grumbel/rfactortools | minised.py | 1 | 5031 | #!/usr/bin/env python3
# Minimal sed replacement
# Copyright (C) 2014 Ingo Ruhnke <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import argparse
import fnmatch
import os
import re
import sys
def minised_on_lines(lines, pattern, replacement, ignore_case, only_replaced_lines=False):
result = []
flags = 0
if ignore_case:
flags |= re.I
rx = re.compile(pattern, flags)
for line in lines:
m = rx.search(line)
if m:
left = line[:m.span()[0]]
right = line[m.span()[1]:]
middle = line[m.span()[0]:m.span()[1]]
if replacement is not None:
expanded = m.expand(replacement)
print("- %s%s%s%s%s" % (left, "{{{", middle, "}}}", right))
print("+ %s%s%s%s%s" % (left, "{{{", expanded, "}}}", right))
print()
result.append(left + expanded + right)
else:
print("%s%s%s%s%s" % (left, "{{{", middle, "}}}", right))
result.append(line)
else:
if not only_replaced_lines:
result.append(line)
return result
def minised_on_file(filename, outfile, pattern, replace, ignore_case, dry_run):
if filename:
with open(filename, 'rt', encoding='latin-1') as fin:
lines = fin.read().splitlines()
else:
lines = sys.stdin.read().splitlines()
output = minised_on_lines(lines, pattern, replace, ignore_case)
if not dry_run:
if outfile:
with open(outfile, 'wt', newline='\r\n', encoding='latin-1', errors="replace") as fout:
for line in output:
fout.write(line)
fout.write("\n")
else:
for line in output:
print(line)
def main():
parser = argparse.ArgumentParser(description="A minimal sed-like tool")
parser.add_argument('FILE', action='store', nargs='?',
help="files to process")
parser.add_argument("-i", "--in-place", action='store_true', default=False,
help="modify files in place")
parser.add_argument("-o", "--output", metavar="FILE", type=str, default=None,
help="write output to FILE")
parser.add_argument("-n", "--dry-run", action='store_true', default=False,
help="only show modifications, without actually modifying anything")
parser.add_argument("-p", "--pattern", metavar="PAT", type=str, required=True,
help="the search pattern expression")
parser.add_argument("-r", "--replace", metavar="REPL", type=str,
help="the replacement expression, if not given just print the match")
parser.add_argument("-R", "--recursive", metavar="GLOB", type=str,
help="interprets the FILE argument as perform replacement in all files matching GLOB")
parser.add_argument("-I", "--ignore-case", action='store_true', default=False,
help="ignore case")
parser.add_argument("-v", "--verbose", action='store_true', default=False,
help="display the replacements are performed")
args = parser.parse_args()
if args.replace is None:
dry_run = True
else:
dry_run = args.dry_run
if args.recursive is not None:
if args.output:
raise Exception("can't use --output and recursive together")
for path, dirs, files in os.walk(args.FILE):
for fname in files:
filename = os.path.join(path, fname)
if args.in_place:
outfile = filename
else:
outfile = None
if fnmatch.fnmatch(fname.lower(), args.recursive.lower()):
print("%s:" % filename)
minised_on_file(filename, outfile, args.pattern, args.replace, args.ignore_case, dry_run)
else:
if args.output:
if args.recursive:
raise Exception("can't use --output and recursive together")
else:
outfile = args.output
elif args.in_place:
outfile = args.FILE
else:
outfile = None
minised_on_file(args.FILE, outfile, args.pattern, args.replace, args.ignore_case, dry_run)
if __name__ == "__main__":
main()
# EOF #
| gpl-3.0 |
Victorgichohi/ChromeWebLab | Orchestra/sw/hub/orchestra/messenger/websocketclient.py | 7 | 5137 | #
# websocketclient.py: WebSockets connection to Node.js server
#
# Copyright 2013 Google Inc
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from twisted.application import internet, service
from twisted.python import log
from twisted.internet.protocol import ReconnectingClientFactory
from autobahn.websocket import WebSocketClientFactory, WebSocketClientProtocol
from time import time
import json
import urllib2
from format import *
from config import config
sequencer = __import__(config['osc']['sequencer'])
clients = []
class MessengerClientProtocol(WebSocketClientProtocol):
"""
A Twisted Protocol models the connection to the server.
This class is used to receive and send messages to the
realtime (Node.js) server.
"""
def onOpen(self):
pass
def onConnect(self, connectionResponse):
global clients
clients.append(self)
def onClose(self, wasClean, code, reason):
global clients
try:
clients.remove(self)
except Exception, e:
pass # client closed before it fully connected?
def onMessage(self, message, binary):
"""
Server sent us a message: most likely related to note state.
"""
message_type = decode_message_type(message)
log.msg("Received message '%s'" % message_type)
# Layout: Parse through all instruments & notes at once
if message_type is 'current_layout':
sequencer.set_instruments(decode_layout(message))
# CRUD for notes
elif message_type is 'change_note' or message_type is 'add_note' or message_type is 'remove_note':
instrument_id, note = decode_note(message)
# Notify sequencer of the change, get schedule note time
sequencer.update_instrument(instrument_id, message_type, note)
# just report current server time for all note messages
note_time = int(time() * 1000)
# Confirm note with server
confirmation_message = encode_note_confirmation(message_type, message, note_time)
self.sendMessage(confirmation_message)
# Error, typically in response to our last message
elif message_type is 'error':
log.err("Server error: %s" % message[1:])
def send_loop_times(self, loop_times):
"""
Send next n loop start times to sync data to playhead
"""
log.msg("Sending loop_times starting with %d" % loop_times[0])
self.sendMessage(encode_loop_times(loop_times))
class ReconnectingWebSocketClientFactory(WebSocketClientFactory, ReconnectingClientFactory):
"""
A Twisted Factory monitors the connection and spins up Protocols
when it's alive.
This factory uses multiple inheritance to combine
Autobahn's WebSocket support with Twisted's reconnecting helpers.
For more information on ReconnectingClientFactory:
http://twistedmatrix.com/documents/current/core/howto/clients.html#auto4
"""
def startedConnecting(self, connector):
log.msg("Connecting to realtime server at %s..." % connector.getDestination().host)
def buildProtocol(self, addr):
log.msg("Connected to realtime server at %s." % addr.host)
# reset exponentially-increasing delay
self.resetDelay()
# build protocol as usual
return WebSocketClientFactory.buildProtocol(self, addr)
def clientConnectionLost(self, connector, reason):
log.err("Lost connection to realtime server at %s: %s" % (connector.getDestination().host, reason))
ReconnectingClientFactory.clientConnectionLost(self, connector, reason)
def clientConnectionFailed(self, connector, reason):
log.err("Failed to connect to realtime server at %s: %s" % (connector.getDestination().host, reason))
ReconnectingClientFactory.clientConnectionFailed(self, connector, reason)
def broadcast_loop_times(loop_times):
"""
Send loop times to all clients
"""
global clients
for client in clients:
client.send_loop_times(loop_times)
def service(host):
"""
Create and return Twisted service (w. assoc. factory w. assoc. protocol)
"""
web_socket_url = "ws://%s:8080/orchestra/connection/instrumentControlJoin" % host
factory = ReconnectingWebSocketClientFactory(web_socket_url)
factory.protocol = MessengerClientProtocol
return internet.TCPClient(factory.host, factory.port, factory)
| apache-2.0 |
nexusz99/boto | boto/directconnect/layer1.py | 148 | 23592 | # Copyright (c) 2013 Amazon.com, Inc. or its affiliates. All Rights Reserved
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
import boto
from boto.connection import AWSQueryConnection
from boto.regioninfo import RegionInfo
from boto.exception import JSONResponseError
from boto.directconnect import exceptions
from boto.compat import json
class DirectConnectConnection(AWSQueryConnection):
"""
AWS Direct Connect makes it easy to establish a dedicated network
connection from your premises to Amazon Web Services (AWS). Using
AWS Direct Connect, you can establish private connectivity between
AWS and your data center, office, or colocation environment, which
in many cases can reduce your network costs, increase bandwidth
throughput, and provide a more consistent network experience than
Internet-based connections.
The AWS Direct Connect API Reference provides descriptions,
syntax, and usage examples for each of the actions and data types
for AWS Direct Connect. Use the following links to get started
using the AWS Direct Connect API Reference :
+ `Actions`_: An alphabetical list of all AWS Direct Connect
actions.
+ `Data Types`_: An alphabetical list of all AWS Direct Connect
data types.
+ `Common Query Parameters`_: Parameters that all Query actions
can use.
+ `Common Errors`_: Client and server errors that all actions can
return.
"""
APIVersion = "2012-10-25"
DefaultRegionName = "us-east-1"
DefaultRegionEndpoint = "directconnect.us-east-1.amazonaws.com"
ServiceName = "DirectConnect"
TargetPrefix = "OvertureService"
ResponseError = JSONResponseError
_faults = {
"DirectConnectClientException": exceptions.DirectConnectClientException,
"DirectConnectServerException": exceptions.DirectConnectServerException,
}
def __init__(self, **kwargs):
region = kwargs.pop('region', None)
if not region:
region = RegionInfo(self, self.DefaultRegionName,
self.DefaultRegionEndpoint)
if 'host' not in kwargs:
kwargs['host'] = region.endpoint
super(DirectConnectConnection, self).__init__(**kwargs)
self.region = region
def _required_auth_capability(self):
return ['hmac-v4']
def allocate_connection_on_interconnect(self, bandwidth, connection_name,
owner_account, interconnect_id,
vlan):
"""
Creates a hosted connection on an interconnect.
Allocates a VLAN number and a specified amount of bandwidth
for use by a hosted connection on the given interconnect.
:type bandwidth: string
:param bandwidth: Bandwidth of the connection.
Example: " 500Mbps "
Default: None
:type connection_name: string
:param connection_name: Name of the provisioned connection.
Example: " 500M Connection to AWS "
Default: None
:type owner_account: string
:param owner_account: Numeric account Id of the customer for whom the
connection will be provisioned.
Example: 123443215678
Default: None
:type interconnect_id: string
:param interconnect_id: ID of the interconnect on which the connection
will be provisioned.
Example: dxcon-456abc78
Default: None
:type vlan: integer
:param vlan: The dedicated VLAN provisioned to the connection.
Example: 101
Default: None
"""
params = {
'bandwidth': bandwidth,
'connectionName': connection_name,
'ownerAccount': owner_account,
'interconnectId': interconnect_id,
'vlan': vlan,
}
return self.make_request(action='AllocateConnectionOnInterconnect',
body=json.dumps(params))
def allocate_private_virtual_interface(self, connection_id,
owner_account,
new_private_virtual_interface_allocation):
"""
Provisions a private virtual interface to be owned by a
different customer.
The owner of a connection calls this function to provision a
private virtual interface which will be owned by another AWS
customer.
Virtual interfaces created using this function must be
confirmed by the virtual interface owner by calling
ConfirmPrivateVirtualInterface. Until this step has been
completed, the virtual interface will be in 'Confirming'
state, and will not be available for handling traffic.
:type connection_id: string
:param connection_id: The connection ID on which the private virtual
interface is provisioned.
Default: None
:type owner_account: string
:param owner_account: The AWS account that will own the new private
virtual interface.
Default: None
:type new_private_virtual_interface_allocation: dict
:param new_private_virtual_interface_allocation: Detailed information
for the private virtual interface to be provisioned.
Default: None
"""
params = {
'connectionId': connection_id,
'ownerAccount': owner_account,
'newPrivateVirtualInterfaceAllocation': new_private_virtual_interface_allocation,
}
return self.make_request(action='AllocatePrivateVirtualInterface',
body=json.dumps(params))
def allocate_public_virtual_interface(self, connection_id, owner_account,
new_public_virtual_interface_allocation):
"""
Provisions a public virtual interface to be owned by a
different customer.
The owner of a connection calls this function to provision a
public virtual interface which will be owned by another AWS
customer.
Virtual interfaces created using this function must be
confirmed by the virtual interface owner by calling
ConfirmPublicVirtualInterface. Until this step has been
completed, the virtual interface will be in 'Confirming'
state, and will not be available for handling traffic.
:type connection_id: string
:param connection_id: The connection ID on which the public virtual
interface is provisioned.
Default: None
:type owner_account: string
:param owner_account: The AWS account that will own the new public
virtual interface.
Default: None
:type new_public_virtual_interface_allocation: dict
:param new_public_virtual_interface_allocation: Detailed information
for the public virtual interface to be provisioned.
Default: None
"""
params = {
'connectionId': connection_id,
'ownerAccount': owner_account,
'newPublicVirtualInterfaceAllocation': new_public_virtual_interface_allocation,
}
return self.make_request(action='AllocatePublicVirtualInterface',
body=json.dumps(params))
def confirm_connection(self, connection_id):
"""
Confirm the creation of a hosted connection on an
interconnect.
Upon creation, the hosted connection is initially in the
'Ordering' state, and will remain in this state until the
owner calls ConfirmConnection to confirm creation of the
hosted connection.
:type connection_id: string
:param connection_id: ID of the connection.
Example: dxcon-fg5678gh
Default: None
"""
params = {'connectionId': connection_id, }
return self.make_request(action='ConfirmConnection',
body=json.dumps(params))
def confirm_private_virtual_interface(self, virtual_interface_id,
virtual_gateway_id):
"""
Accept ownership of a private virtual interface created by
another customer.
After the virtual interface owner calls this function, the
virtual interface will be created and attached to the given
virtual private gateway, and will be available for handling
traffic.
:type virtual_interface_id: string
:param virtual_interface_id: ID of the virtual interface.
Example: dxvif-123dfg56
Default: None
:type virtual_gateway_id: string
:param virtual_gateway_id: ID of the virtual private gateway that will
be attached to the virtual interface.
A virtual private gateway can be managed via the Amazon Virtual Private
Cloud (VPC) console or the `EC2 CreateVpnGateway`_ action.
Default: None
"""
params = {
'virtualInterfaceId': virtual_interface_id,
'virtualGatewayId': virtual_gateway_id,
}
return self.make_request(action='ConfirmPrivateVirtualInterface',
body=json.dumps(params))
def confirm_public_virtual_interface(self, virtual_interface_id):
"""
Accept ownership of a public virtual interface created by
another customer.
After the virtual interface owner calls this function, the
specified virtual interface will be created and made available
for handling traffic.
:type virtual_interface_id: string
:param virtual_interface_id: ID of the virtual interface.
Example: dxvif-123dfg56
Default: None
"""
params = {'virtualInterfaceId': virtual_interface_id, }
return self.make_request(action='ConfirmPublicVirtualInterface',
body=json.dumps(params))
def create_connection(self, location, bandwidth, connection_name):
"""
Creates a new connection between the customer network and a
specific AWS Direct Connect location.
A connection links your internal network to an AWS Direct
Connect location over a standard 1 gigabit or 10 gigabit
Ethernet fiber-optic cable. One end of the cable is connected
to your router, the other to an AWS Direct Connect router. An
AWS Direct Connect location provides access to Amazon Web
Services in the region it is associated with. You can
establish connections with AWS Direct Connect locations in
multiple regions, but a connection in one region does not
provide connectivity to other regions.
:type location: string
:param location: Where the connection is located.
Example: EqSV5
Default: None
:type bandwidth: string
:param bandwidth: Bandwidth of the connection.
Example: 1Gbps
Default: None
:type connection_name: string
:param connection_name: The name of the connection.
Example: " My Connection to AWS "
Default: None
"""
params = {
'location': location,
'bandwidth': bandwidth,
'connectionName': connection_name,
}
return self.make_request(action='CreateConnection',
body=json.dumps(params))
def create_interconnect(self, interconnect_name, bandwidth, location):
"""
Creates a new interconnect between a AWS Direct Connect
partner's network and a specific AWS Direct Connect location.
An interconnect is a connection which is capable of hosting
other connections. The AWS Direct Connect partner can use an
interconnect to provide sub-1Gbps AWS Direct Connect service
to tier 2 customers who do not have their own connections.
Like a standard connection, an interconnect links the AWS
Direct Connect partner's network to an AWS Direct Connect
location over a standard 1 Gbps or 10 Gbps Ethernet fiber-
optic cable. One end is connected to the partner's router, the
other to an AWS Direct Connect router.
For each end customer, the AWS Direct Connect partner
provisions a connection on their interconnect by calling
AllocateConnectionOnInterconnect. The end customer can then
connect to AWS resources by creating a virtual interface on
their connection, using the VLAN assigned to them by the AWS
Direct Connect partner.
:type interconnect_name: string
:param interconnect_name: The name of the interconnect.
Example: " 1G Interconnect to AWS "
Default: None
:type bandwidth: string
:param bandwidth: The port bandwidth
Example: 1Gbps
Default: None
Available values: 1Gbps,10Gbps
:type location: string
:param location: Where the interconnect is located
Example: EqSV5
Default: None
"""
params = {
'interconnectName': interconnect_name,
'bandwidth': bandwidth,
'location': location,
}
return self.make_request(action='CreateInterconnect',
body=json.dumps(params))
def create_private_virtual_interface(self, connection_id,
new_private_virtual_interface):
"""
Creates a new private virtual interface. A virtual interface
is the VLAN that transports AWS Direct Connect traffic. A
private virtual interface supports sending traffic to a single
virtual private cloud (VPC).
:type connection_id: string
:param connection_id: ID of the connection.
Example: dxcon-fg5678gh
Default: None
:type new_private_virtual_interface: dict
:param new_private_virtual_interface: Detailed information for the
private virtual interface to be created.
Default: None
"""
params = {
'connectionId': connection_id,
'newPrivateVirtualInterface': new_private_virtual_interface,
}
return self.make_request(action='CreatePrivateVirtualInterface',
body=json.dumps(params))
def create_public_virtual_interface(self, connection_id,
new_public_virtual_interface):
"""
Creates a new public virtual interface. A virtual interface is
the VLAN that transports AWS Direct Connect traffic. A public
virtual interface supports sending traffic to public services
of AWS such as Amazon Simple Storage Service (Amazon S3).
:type connection_id: string
:param connection_id: ID of the connection.
Example: dxcon-fg5678gh
Default: None
:type new_public_virtual_interface: dict
:param new_public_virtual_interface: Detailed information for the
public virtual interface to be created.
Default: None
"""
params = {
'connectionId': connection_id,
'newPublicVirtualInterface': new_public_virtual_interface,
}
return self.make_request(action='CreatePublicVirtualInterface',
body=json.dumps(params))
def delete_connection(self, connection_id):
"""
Deletes the connection.
Deleting a connection only stops the AWS Direct Connect port
hour and data transfer charges. You need to cancel separately
with the providers any services or charges for cross-connects
or network circuits that connect you to the AWS Direct Connect
location.
:type connection_id: string
:param connection_id: ID of the connection.
Example: dxcon-fg5678gh
Default: None
"""
params = {'connectionId': connection_id, }
return self.make_request(action='DeleteConnection',
body=json.dumps(params))
def delete_interconnect(self, interconnect_id):
"""
Deletes the specified interconnect.
:type interconnect_id: string
:param interconnect_id: The ID of the interconnect.
Example: dxcon-abc123
"""
params = {'interconnectId': interconnect_id, }
return self.make_request(action='DeleteInterconnect',
body=json.dumps(params))
def delete_virtual_interface(self, virtual_interface_id):
"""
Deletes a virtual interface.
:type virtual_interface_id: string
:param virtual_interface_id: ID of the virtual interface.
Example: dxvif-123dfg56
Default: None
"""
params = {'virtualInterfaceId': virtual_interface_id, }
return self.make_request(action='DeleteVirtualInterface',
body=json.dumps(params))
def describe_connections(self, connection_id=None):
"""
Displays all connections in this region.
If a connection ID is provided, the call returns only that
particular connection.
:type connection_id: string
:param connection_id: ID of the connection.
Example: dxcon-fg5678gh
Default: None
"""
params = {}
if connection_id is not None:
params['connectionId'] = connection_id
return self.make_request(action='DescribeConnections',
body=json.dumps(params))
def describe_connections_on_interconnect(self, interconnect_id):
"""
Return a list of connections that have been provisioned on the
given interconnect.
:type interconnect_id: string
:param interconnect_id: ID of the interconnect on which a list of
connection is provisioned.
Example: dxcon-abc123
Default: None
"""
params = {'interconnectId': interconnect_id, }
return self.make_request(action='DescribeConnectionsOnInterconnect',
body=json.dumps(params))
def describe_interconnects(self, interconnect_id=None):
"""
Returns a list of interconnects owned by the AWS account.
If an interconnect ID is provided, it will only return this
particular interconnect.
:type interconnect_id: string
:param interconnect_id: The ID of the interconnect.
Example: dxcon-abc123
"""
params = {}
if interconnect_id is not None:
params['interconnectId'] = interconnect_id
return self.make_request(action='DescribeInterconnects',
body=json.dumps(params))
def describe_locations(self):
"""
Returns the list of AWS Direct Connect locations in the
current AWS region. These are the locations that may be
selected when calling CreateConnection or CreateInterconnect.
"""
params = {}
return self.make_request(action='DescribeLocations',
body=json.dumps(params))
def describe_virtual_gateways(self):
"""
Returns a list of virtual private gateways owned by the AWS
account.
You can create one or more AWS Direct Connect private virtual
interfaces linking to a virtual private gateway. A virtual
private gateway can be managed via Amazon Virtual Private
Cloud (VPC) console or the `EC2 CreateVpnGateway`_ action.
"""
params = {}
return self.make_request(action='DescribeVirtualGateways',
body=json.dumps(params))
def describe_virtual_interfaces(self, connection_id=None,
virtual_interface_id=None):
"""
Displays all virtual interfaces for an AWS account. Virtual
interfaces deleted fewer than 15 minutes before
DescribeVirtualInterfaces is called are also returned. If a
connection ID is included then only virtual interfaces
associated with this connection will be returned. If a virtual
interface ID is included then only a single virtual interface
will be returned.
A virtual interface (VLAN) transmits the traffic between the
AWS Direct Connect location and the customer.
If a connection ID is provided, only virtual interfaces
provisioned on the specified connection will be returned. If a
virtual interface ID is provided, only this particular virtual
interface will be returned.
:type connection_id: string
:param connection_id: ID of the connection.
Example: dxcon-fg5678gh
Default: None
:type virtual_interface_id: string
:param virtual_interface_id: ID of the virtual interface.
Example: dxvif-123dfg56
Default: None
"""
params = {}
if connection_id is not None:
params['connectionId'] = connection_id
if virtual_interface_id is not None:
params['virtualInterfaceId'] = virtual_interface_id
return self.make_request(action='DescribeVirtualInterfaces',
body=json.dumps(params))
def make_request(self, action, body):
headers = {
'X-Amz-Target': '%s.%s' % (self.TargetPrefix, action),
'Host': self.region.endpoint,
'Content-Type': 'application/x-amz-json-1.1',
'Content-Length': str(len(body)),
}
http_request = self.build_base_http_request(
method='POST', path='/', auth_path='/', params={},
headers=headers, data=body)
response = self._mexe(http_request, sender=None,
override_num_retries=10)
response_body = response.read().decode('utf-8')
boto.log.debug(response_body)
if response.status == 200:
if response_body:
return json.loads(response_body)
else:
json_body = json.loads(response_body)
fault_name = json_body.get('__type', None)
exception_class = self._faults.get(fault_name, self.ResponseError)
raise exception_class(response.status, response.reason,
body=json_body)
| mit |
andresailer/DIRAC | Core/Utilities/SiteSEMapping.py | 3 | 5364 | """ The SiteSEMapping module performs the necessary CS gymnastics to
resolve site and SE combinations. These manipulations are necessary
in several components.
Assumes CS structure of: /Resources/Sites/<GRIDNAME>/<SITENAME>
"""
__RCSID__ = "$Id$"
from DIRAC import gLogger, S_OK
from DIRAC.DataManagementSystem.Utilities.DMSHelpers import DMSHelpers, siteGridName
def getSEParameters(seName):
""" get all the SE parameters in a list
:param str seName: name of the Storage Element
:return: S_OK() with list of dict with parameters
"""
# This import is here to avoid circular imports
from DIRAC.Resources.Storage.StorageElement import StorageElement
se = StorageElement(seName, hideExceptions=True)
protocolsSet = set(se.localAccessProtocolList) | set(se.localWriteProtocolList)
seParametersList = []
for protocol in protocolsSet:
seParameters = se.getStorageParameters(protocol=protocol)
if seParameters['OK']:
seParametersList.append(seParameters['Value'])
else:
gLogger.verbose("No SE parameters obtained", "for SE %s and protocol %s" % (seName, protocol))
return S_OK(seParametersList)
def getSEHosts(seName):
""" Get StorageElement host names (can be more than one depending on the protocol)
:param str seName: name of the storage element
:return: S_OK() with list of hosts or S_ERROR
"""
seParameters = getSEParameters(seName)
if not seParameters['OK']:
gLogger.warn("Could not get SE parameters", "SE: %s" % seName)
return seParameters
return S_OK([parameters['Host'] for parameters in seParameters['Value']])
def getStorageElementsHosts(seNames=None):
""" Get StorageElement host names
:param list seNames: possible list of storage element names (if not provided, will use all)
:param list plugins: if provided, restrict to a certain list of plugins
:return: S_OK() with list of hosts or S_ERROR
"""
seHosts = []
if seNames is None:
seNames = DMSHelpers().getStorageElements()
for seName in seNames:
seHost = getSEHosts(seName)
if not seHost['OK']:
gLogger.warn("Could not get SE Host", "SE: %s" % seName)
continue
if seHost['Value']:
seHosts.extend(seHost['Value'])
return S_OK(list(set(seHosts)))
#############################################################################
def getSiteSEMapping(gridName='', withSiteLocalSEMapping=False):
""" Returns a dictionary of all sites and their localSEs as a list, e.g.
{'LCG.CERN.ch':['CERN-RAW','CERN-RDST',...]}
If gridName is specified, result is restricted to that Grid type.
"""
result = DMSHelpers().getSiteSEMapping()
if not result['OK']:
return result
if withSiteLocalSEMapping:
mapping = result['Value'][2]
else:
mapping = result['Value'][1]
if gridName:
mapping = dict((site, mapping[site]) for site in mapping if siteGridName(site) == gridName)
return S_OK(mapping)
#############################################################################
def getSESiteMapping(gridName='', withSiteLocalSEMapping=False):
""" Returns a dictionary of all SEs and their associated site(s), e.g.
{'CERN-RAW':'LCG.CERN.ch','CERN-RDST':'LCG.CERN.ch',...]}
Although normally one site exists for a given SE, it is possible over all
Grid types to have multiple entries.
If gridName is specified, result is restricted to that Grid type.
Assumes CS structure of: /Resources/Sites/<GRIDNAME>/<SITENAME>
"""
storageElements = DMSHelpers().getStorageElements()
return S_OK(dict((se,
getSitesForSE(se, gridName=gridName,
withSiteLocalSEMapping=withSiteLocalSEMapping).get('Value', []))
for se in storageElements))
#############################################################################
def getSitesForSE(storageElement, gridName='', withSiteLocalSEMapping=False):
""" Given a DIRAC SE name this method returns a list of corresponding sites.
Optionally restrict to Grid specified by name.
"""
result = DMSHelpers().getSitesForSE(storageElement,
connectionLevel='DOWNLOAD' if withSiteLocalSEMapping else 'LOCAL')
if not result['OK'] or not gridName:
return result
return S_OK([site for site in result['Value'] if siteGridName(site) == gridName])
#############################################################################
def getSEsForSite(siteName, withSiteLocalSEMapping=False):
""" Given a DIRAC site name this method returns a list of corresponding SEs.
"""
result = DMSHelpers().getSEsForSite(siteName, connectionLevel='DOWNLOAD' if withSiteLocalSEMapping else 'LOCAL')
if not result['OK']:
return S_OK([])
return result
#############################################################################
def isSameSiteSE(se1, se2):
""" Check if the 2 SEs are at the same site
"""
dmsHelper = DMSHelpers()
site1 = dmsHelper.getLocalSiteForSE(se1).get('Value')
site2 = dmsHelper.getLocalSiteForSE(se2).get('Value')
return site1 and site2 and site1 == site2
#############################################################################
def getSEsForCountry(country):
""" Determines the associated SEs from the country code
"""
return DMSHelpers().getSEsAtCountry(country)
| gpl-3.0 |
lshain-android-source/external-chromium_org | chrome/test/mini_installer/registry_verifier.py | 23 | 2057 | # Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import _winreg
def VerifyRegistryEntries(entries):
"""Verifies that the current registry matches the specified criteria.
Args:
entries: A dictionary whose keys are registry keys and values are
expectation dictionaries.
"""
for key, expectation in entries.iteritems():
VerifyRegistryEntry(key, expectation)
def RootKeyConstant(key):
"""Converts a root registry key string into a _winreg.HKEY_* constant."""
if key == 'HKEY_CLASSES_ROOT':
return _winreg.HKEY_CLASSES_ROOT
if key == 'HKEY_CURRENT_USER':
return _winreg.HKEY_CURRENT_USER
if key == 'HKEY_LOCAL_MACHINE':
return _winreg.HKEY_LOCAL_MACHINE
if key == 'HKEY_USERS':
return _winreg.HKEY_USERS
raise KeyError("Unknown root registry key '%s'" % key)
def VerifyRegistryEntry(key, expectation):
"""Verifies a registry key according to the |expectation|.
The |expectation| specifies whether or not the registry key should exist
(under 'exists') and optionally specifies an expected 'value' for the key.
Args:
key: Name of the registry key.
expectation: A dictionary with the following keys and values:
'exists' a boolean indicating whether the registry entry should exist.
'value' (optional) a string representing the expected value for
the key.
"""
root_key, sub_key = key.split('\\', 1)
try:
# Query the Windows registry for the registry key. It will throw a
# WindowsError if the key doesn't exist.
_ = _winreg.OpenKey(RootKeyConstant(root_key), sub_key, 0, _winreg.KEY_READ)
except WindowsError:
# Key doesn't exist. See that it matches the expectation.
assert not expectation['exists'], 'Registry entry %s is missing' % key
return
# The key exists, see that it matches the expectation.
assert expectation['exists'], 'Registry entry %s exists' % key
# TODO(sukolsak): Verify the expected value.
| bsd-3-clause |
onitake/ansible | test/units/playbook/test_become.py | 42 | 3175 | # -*- coding: utf-8 -*-
# (c) 2018 Matt Martz <[email protected]>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
import re
from ansible.errors import AnsibleParserError
from ansible.playbook.become import Become
from ansible.module_utils._text import to_native
import pytest
class InString(str):
def __eq__(self, other):
return self in other
@pytest.mark.parametrize("ds", [
{},
{'become': True},
{'become_user': 'root'},
{'sudo': True},
{'sudo_user': 'root'},
{'su': True},
{'su_user': 'root'}
])
def test_detect_privilege_escalation_conflict_valid(ds):
become = Become()
become._detect_privilege_escalation_conflict(ds)
@pytest.mark.parametrize("ds,message", [
({'become': True, 'sudo': True}, re.compile('"become".*"sudo"')),
({'become': True, 'su': True}, re.compile('"become".*"su"')),
({'sudo': True, 'su': True}, re.compile('"sudo".*"su"')),
({'become_user': 'root', 'sudo': True}, re.compile('"become".*"sudo"')),
({'sudo_user': 'root', 'su': True}, re.compile('"sudo".*"su"')),
])
def test_detect_privilege_escalation_conflict_invalid(ds, message):
become = Become()
with pytest.raises(AnsibleParserError) as excinfo:
become._detect_privilege_escalation_conflict(ds)
assert message.search(excinfo.value.message) is not None
def test_preprocess_data_become(mocker):
display_mock = mocker.patch('ansible.playbook.become.display')
become = Become()
ds = {}
assert become._preprocess_data_become(ds) == {}
display_mock.reset_mock()
ds = {'sudo': True}
out = become._preprocess_data_become(ds)
assert 'sudo' not in out
assert out.get('become_method') == 'sudo'
display_mock.deprecated.assert_called_once_with(
"Instead of sudo/sudo_user, use become/become_user and make sure become_method is 'sudo' (default)",
'2.9'
)
ds = {'sudo_user': 'root'}
out = become._preprocess_data_become(ds)
assert 'sudo_user' not in out
assert out.get('become_user') == 'root'
ds = {'sudo': True, 'sudo_user': 'root'}
out = become._preprocess_data_become(ds)
assert 'sudo' not in out
assert 'sudo_user' not in out
assert out.get('become_method') == 'sudo'
assert out.get('become_user') == 'root'
display_mock.reset_mock()
ds = {'su': True}
out = become._preprocess_data_become(ds)
assert 'su' not in out
assert out.get('become_method') == 'su'
display_mock.deprecated.assert_called_once_with(
"Instead of su/su_user, use become/become_user and set become_method to 'su' (default is sudo)",
'2.9'
)
display_mock.reset_mock()
ds = {'su_user': 'root'}
out = become._preprocess_data_become(ds)
assert 'su_user' not in out
assert out.get('become_user') == 'root'
ds = {'su': True, 'su_user': 'root'}
out = become._preprocess_data_become(ds)
assert 'su' not in out
assert 'su_user' not in out
assert out.get('become_method') == 'su'
assert out.get('become_user') == 'root'
| gpl-3.0 |
jocave/snapcraft | snapcraft/tests/test_sources.py | 2 | 20413 | # -*- Mode:Python; indent-tabs-mode:nil; tab-width:4 -*-
#
# Copyright (C) 2015, 2016 Canonical Ltd
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import os
import http.server
import threading
import unittest.mock
import fixtures
from snapcraft.internal import sources
from snapcraft import tests
class FakeTarballHTTPRequestHandler(http.server.BaseHTTPRequestHandler):
def do_GET(self):
data = 'Test fake compressed file'
self.send_response(200)
self.send_header('Content-Length', len(data))
self.send_header('Content-type', 'text/html')
self.end_headers()
self.wfile.write(data.encode())
def log_message(self, *args):
# Overwritten so the test does not write to stderr.
pass
class TestTar(tests.TestCase):
@unittest.mock.patch('snapcraft.sources.Tar.provision')
def test_pull_tarball_must_download_to_sourcedir(self, mock_prov):
self.useFixture(fixtures.EnvironmentVariable(
'no_proxy', 'localhost,127.0.0.1'))
server = http.server.HTTPServer(
('127.0.0.1', 0), FakeTarballHTTPRequestHandler)
server_thread = threading.Thread(target=server.serve_forever)
self.addCleanup(server_thread.join)
self.addCleanup(server.server_close)
self.addCleanup(server.shutdown)
server_thread.start()
plugin_name = 'test_plugin'
dest_dir = os.path.join('parts', plugin_name, 'src')
os.makedirs(dest_dir)
tar_file_name = 'test.tar'
source = 'http://{}:{}/{file_name}'.format(
*server.server_address, file_name=tar_file_name)
tar_source = sources.Tar(source, dest_dir)
tar_source.pull()
mock_prov.assert_called_once_with(dest_dir)
with open(os.path.join(dest_dir, tar_file_name), 'r') as tar_file:
self.assertEqual('Test fake compressed file', tar_file.read())
class TestZip(tests.TestCase):
def setUp(self):
super().setUp()
self.useFixture(fixtures.EnvironmentVariable(
'no_proxy', 'localhost,127.0.0.1'))
self.server = http.server.HTTPServer(
('127.0.0.1', 0), FakeTarballHTTPRequestHandler)
server_thread = threading.Thread(target=self.server.serve_forever)
self.addCleanup(server_thread.join)
self.addCleanup(self.server.server_close)
self.addCleanup(self.server.shutdown)
server_thread.start()
@unittest.mock.patch('zipfile.ZipFile')
def test_pull_zipfile_must_download_and_extract(self, mock_zip):
dest_dir = 'src'
os.makedirs(dest_dir)
zip_file_name = 'test.zip'
source = 'http://{}:{}/{file_name}'.format(
*self.server.server_address, file_name=zip_file_name)
zip_source = sources.Zip(source, dest_dir)
zip_source.pull()
mock_zip.assert_called_once_with(
os.path.join(zip_source.source_dir, zip_file_name))
@unittest.mock.patch('zipfile.ZipFile')
def test_extract_and_keep_zipfile(self, mock_zip):
zip_file_name = 'test.zip'
source = 'http://{}:{}/{file_name}'.format(
*self.server.server_address, file_name=zip_file_name)
dest_dir = os.path.abspath(os.curdir)
zip_source = sources.Zip(source, dest_dir)
zip_source.download()
zip_source.provision(dst=dest_dir, keep_zip=True)
zip_download = os.path.join(zip_source.source_dir, zip_file_name)
mock_zip.assert_called_once_with(zip_download)
with open(zip_download, 'r') as zip_file:
self.assertEqual('Test fake compressed file', zip_file.read())
class SourceTestCase(tests.TestCase):
def setUp(self):
super().setUp()
patcher = unittest.mock.patch('subprocess.check_call')
self.mock_run = patcher.start()
self.mock_run.return_value = True
self.addCleanup(patcher.stop)
patcher = unittest.mock.patch('os.rmdir')
self.mock_rmdir = patcher.start()
self.addCleanup(patcher.stop)
patcher = unittest.mock.patch('os.path.exists')
self.mock_path_exists = patcher.start()
self.mock_path_exists.return_value = False
self.addCleanup(patcher.stop)
class TestBazaar(SourceTestCase):
def test_pull(self):
bzr = sources.Bazaar('lp:my-source', 'source_dir')
bzr.pull()
self.mock_rmdir.assert_called_once_with('source_dir')
self.mock_run.assert_called_once_with(
['bzr', 'branch', 'lp:my-source', 'source_dir'])
def test_pull_tag(self):
bzr = sources.Bazaar(
'lp:my-source', 'source_dir', source_tag='tag')
bzr.pull()
self.mock_run.assert_called_once_with(
['bzr', 'branch', '-r', 'tag:tag', 'lp:my-source',
'source_dir'])
def test_pull_existing_with_tag(self):
self.mock_path_exists.return_value = True
bzr = sources.Bazaar(
'lp:my-source', 'source_dir', source_tag='tag')
bzr.pull()
self.mock_run.assert_called_once_with(
['bzr', 'pull', '-r', 'tag:tag', 'lp:my-source', '-d',
'source_dir'])
def test_init_with_source_branch_raises_exception(self):
with self.assertRaises(
sources.IncompatibleOptionsError) as raised:
sources.Bazaar('lp:mysource', 'source_dir', source_branch='branch')
expected_message = 'can\'t specify a source-branch for a bzr source'
self.assertEqual(raised.exception.message, expected_message)
class TestGit(SourceTestCase):
def test_pull(self):
git = sources.Git('git://my-source', 'source_dir')
git.pull()
self.mock_run.assert_called_once_with(
['git', 'clone', '--depth', '1', '--recursive', 'git://my-source',
'source_dir'])
def test_pull_branch(self):
git = sources.Git('git://my-source', 'source_dir',
source_branch='my-branch')
git.pull()
self.mock_run.assert_called_once_with(
['git', 'clone', '--depth', '1', '--recursive', '--branch',
'my-branch', 'git://my-source', 'source_dir'])
def test_pull_tag(self):
git = sources.Git('git://my-source', 'source_dir', source_tag='tag')
git.pull()
self.mock_run.assert_called_once_with(
['git', 'clone', '--depth', '1', '--recursive', '--branch', 'tag',
'git://my-source', 'source_dir'])
def test_pull_existing(self):
self.mock_path_exists.return_value = True
git = sources.Git('git://my-source', 'source_dir')
git.pull()
self.mock_run.assert_has_calls([
unittest.mock.call(['git', '-C', 'source_dir', 'pull',
'--recurse-submodules=yes', 'git://my-source',
'HEAD']),
unittest.mock.call(['git', '-C', 'source_dir', 'submodule',
'update'])
])
def test_pull_existing_with_tag(self):
self.mock_path_exists.return_value = True
git = sources.Git('git://my-source', 'source_dir', source_tag='tag')
git.pull()
self.mock_run.assert_has_calls([
unittest.mock.call(['git', '-C', 'source_dir', 'pull',
'--recurse-submodules=yes', 'git://my-source',
'refs/tags/tag']),
unittest.mock.call(['git', '-C', 'source_dir', 'submodule',
'update'])
])
def test_pull_existing_with_branch(self):
self.mock_path_exists.return_value = True
git = sources.Git('git://my-source', 'source_dir',
source_branch='my-branch')
git.pull()
self.mock_run.assert_has_calls([
unittest.mock.call(['git', '-C', 'source_dir', 'pull',
'--recurse-submodules=yes', 'git://my-source',
'refs/heads/my-branch']),
unittest.mock.call(['git', '-C', 'source_dir', 'submodule',
'update'])
])
def test_init_with_source_branch_and_tag_raises_exception(self):
with self.assertRaises(sources.IncompatibleOptionsError) as raised:
sources.Git('git://mysource', 'source_dir',
source_tag='tag', source_branch='branch')
expected_message = \
'can\'t specify both source-tag and source-branch for a git source'
self.assertEqual(raised.exception.message, expected_message)
class TestMercurial(SourceTestCase):
def test_pull(self):
hg = sources.Mercurial('hg://my-source', 'source_dir')
hg.pull()
self.mock_run.assert_called_once_with(
['hg', 'clone', 'hg://my-source', 'source_dir'])
def test_pull_branch(self):
hg = sources.Mercurial('hg://my-source', 'source_dir',
source_branch='my-branch')
hg.pull()
self.mock_run.assert_called_once_with(
['hg', 'clone', '-u', 'my-branch', 'hg://my-source',
'source_dir'])
def test_pull_tag(self):
hg = sources.Mercurial('hg://my-source', 'source_dir',
source_tag='tag')
hg.pull()
self.mock_run.assert_called_once_with(
['hg', 'clone', '-u', 'tag', 'hg://my-source',
'source_dir'])
def test_pull_existing(self):
self.mock_path_exists.return_value = True
hg = sources.Mercurial('hg://my-source', 'source_dir')
hg.pull()
self.mock_run.assert_called_once_with(
['hg', 'pull', 'hg://my-source'])
def test_pull_existing_with_tag(self):
self.mock_path_exists.return_value = True
hg = sources.Mercurial('hg://my-source', 'source_dir',
source_tag='tag')
hg.pull()
self.mock_run.assert_called_once_with(
['hg', 'pull', '-r', 'tag', 'hg://my-source'])
def test_pull_existing_with_branch(self):
self.mock_path_exists.return_value = True
hg = sources.Mercurial('hg://my-source', 'source_dir',
source_branch='my-branch')
hg.pull()
self.mock_run.assert_called_once_with(
['hg', 'pull', '-b', 'my-branch', 'hg://my-source'])
def test_init_with_source_branch_and_tag_raises_exception(self):
with self.assertRaises(sources.IncompatibleOptionsError) as raised:
sources.Mercurial(
'hg://mysource', 'source_dir', source_tag='tag',
source_branch='branch')
expected_message = (
'can\'t specify both source-tag and source-branch for a mercurial '
'source')
self.assertEqual(raised.exception.message, expected_message)
class TestSubversion(SourceTestCase):
def test_pull_remote(self):
svn = sources.Subversion('svn://my-source', 'source_dir')
svn.pull()
self.mock_run.assert_called_once_with(
['svn', 'checkout', 'svn://my-source', 'source_dir'])
def test_pull_local_absolute_path(self):
svn = sources.Subversion(self.path, 'source_dir')
svn.pull()
self.mock_run.assert_called_once_with(
['svn', 'checkout', 'file://'+self.path, 'source_dir'])
def test_pull_local_relative_path(self):
os.mkdir("my-source")
svn = sources.Subversion('my-source', 'source_dir')
svn.pull()
self.mock_run.assert_called_once_with(
['svn', 'checkout',
'file://{}'.format(os.path.join(self.path, 'my-source')),
'source_dir'])
def test_pull_existing(self):
self.mock_path_exists.return_value = True
svn = sources.Subversion('svn://my-source', 'source_dir')
svn.pull()
self.mock_run.assert_called_once_with(
['svn', 'update'], cwd=svn.source_dir)
def test_init_with_source_tag_raises_exception(self):
with self.assertRaises(sources.IncompatibleOptionsError) as raised:
sources.Subversion(
'svn://mysource', 'source_dir', source_tag='tag')
expected_message = (
"Can't specify source-tag for a Subversion source")
self.assertEqual(raised.exception.message, expected_message)
def test_init_with_source_branch_raises_exception(self):
with self.assertRaises(sources.IncompatibleOptionsError) as raised:
sources.Subversion(
'svn://mysource', 'source_dir', source_branch='branch')
expected_message = (
"Can't specify source-branch for a Subversion source")
self.assertEqual(raised.exception.message, expected_message)
def test_init_with_source_branch_and_tag_raises_exception(self):
with self.assertRaises(sources.IncompatibleOptionsError) as raised:
sources.Subversion(
'svn://mysource', 'source_dir', source_tag='tag',
source_branch='branch')
expected_message = (
"Can't specify source-tag OR source-branch for a Subversion "
"source")
self.assertEqual(raised.exception.message, expected_message)
class TestLocal(tests.TestCase):
def test_pull_with_existing_empty_source_dir_creates_hardlinks(self):
os.makedirs(os.path.join('src', 'dir'))
open(os.path.join('src', 'dir', 'file'), 'w').close()
os.mkdir('destination')
local = sources.Local('src', 'destination')
local.pull()
# Verify that the directories are not symlinks, but the file is a
# hardlink.
self.assertFalse(os.path.islink('destination'))
self.assertFalse(os.path.islink(os.path.join('destination', 'dir')))
self.assertGreater(
os.stat(os.path.join('destination', 'dir', 'file')).st_nlink, 1)
def test_pull_with_existing_source_link_creates_symlink(self):
os.makedirs(os.path.join('src', 'dir'))
open(os.path.join('src', 'dir', 'file'), 'w').close()
# Note that this is a symlink now instead of a directory
os.symlink('dummy', 'destination')
local = sources.Local('src', 'destination')
local.pull()
self.assertFalse(os.path.islink('destination'))
self.assertFalse(os.path.islink(os.path.join('destination', 'dir')))
self.assertGreater(
os.stat(os.path.join('destination', 'dir', 'file')).st_nlink, 1)
def test_pull_with_existing_source_file_wipes_and_creates_hardlinks(self):
os.makedirs(os.path.join('src', 'dir'))
open(os.path.join('src', 'dir', 'file'), 'w').close()
# Note that this is a file now instead of a directory
open('destination', 'w').close()
local = sources.Local('src', 'destination')
local.pull()
self.assertFalse(os.path.isfile('destination'))
self.assertFalse(os.path.islink('destination'))
self.assertFalse(os.path.islink(os.path.join('destination', 'dir')))
self.assertGreater(
os.stat(os.path.join('destination', 'dir', 'file')).st_nlink, 1)
def test_pulling_twice_with_existing_source_dir_recreates_hardlinks(self):
os.makedirs(os.path.join('src', 'dir'))
open(os.path.join('src', 'dir', 'file'), 'w').close()
os.mkdir('destination')
local = sources.Local('src', 'destination')
local.pull()
local.pull()
# Verify that the directories are not symlinks, but the file is a
# hardlink.
self.assertFalse(os.path.islink('destination'))
self.assertFalse(os.path.islink(os.path.join('destination', 'dir')))
self.assertGreater(
os.stat(os.path.join('destination', 'dir', 'file')).st_nlink, 1)
def test_pull_ignores_snapcraft_specific_data(self):
# Make the snapcraft-specific directories
os.makedirs(os.path.join('src', 'parts'))
os.makedirs(os.path.join('src', 'stage'))
os.makedirs(os.path.join('src', 'prime'))
# Make the snapcraft.yaml (and hidden one) and a built snap
open(os.path.join('src', 'snapcraft.yaml'), 'w').close()
open(os.path.join('src', '.snapcraft.yaml'), 'w').close()
open(os.path.join('src', 'foo.snap'), 'w').close()
# Now make some real files
os.makedirs(os.path.join('src', 'dir'))
open(os.path.join('src', 'dir', 'file'), 'w').close()
os.mkdir('destination')
local = sources.Local('src', 'destination')
local.pull()
# Verify that the snapcraft-specific stuff got filtered out
self.assertFalse(os.path.exists(os.path.join('destination', 'parts')))
self.assertFalse(os.path.exists(os.path.join('destination', 'stage')))
self.assertFalse(os.path.exists(os.path.join('destination', 'prime')))
self.assertFalse(
os.path.exists(os.path.join('destination', 'snapcraft.yaml')))
self.assertFalse(
os.path.exists(os.path.join('destination', '.snapcraft.yaml')))
self.assertFalse(
os.path.exists(os.path.join('destination', 'foo.snap')))
# Verify that the real stuff made it in.
self.assertFalse(os.path.islink('destination'))
self.assertFalse(os.path.islink(os.path.join('destination', 'dir')))
self.assertGreater(
os.stat(os.path.join('destination', 'dir', 'file')).st_nlink, 1)
class TestUri(tests.TestCase):
def test_get_tar_source_from_uri(self):
test_sources = [
'https://golang.tar.gz',
'https://golang.tar.xz',
'https://golang.tar.bz2',
'https://golang.tar.tgz',
'https://golang.tar',
]
for source in test_sources:
with self.subTest(key=source):
self.assertEqual(
sources._get_source_type_from_uri(source), 'tar')
@unittest.mock.patch('snapcraft.sources.Git.pull')
def test_get_git_source_from_uri(self, mock_pull):
test_sources = [
'git://github.com:ubuntu-core/snapcraft.git',
'[email protected]:ubuntu-core/snapcraft.git',
'https://github.com:ubuntu-core/snapcraft.git',
]
for source in test_sources:
with self.subTest(key=source):
options = tests.MockOptions(source=source)
sources.get(
sourcedir='dummy',
builddir='dummy',
options=options)
mock_pull.assert_called_once_with()
mock_pull.reset_mock()
@unittest.mock.patch('snapcraft.sources.Bazaar.pull')
def test_get_bzr_source_from_uri(self, mock_pull):
test_sources = [
'lp:snapcraft_test_source',
'bzr:dummy-source'
]
for source in test_sources:
with self.subTest(key=source):
options = tests.MockOptions(source=source)
sources.get(
sourcedir='dummy',
builddir='dummy',
options=options)
mock_pull.assert_called_once_with()
mock_pull.reset_mock()
@unittest.mock.patch('snapcraft.sources.Subversion.pull')
def test_get_svn_source_from_uri(self, mock_pull):
test_sources = [
'svn://sylpheed.sraoss.jp/sylpheed/trunk'
]
for source in test_sources:
with self.subTest(key=source):
options = tests.MockOptions(source=source)
sources.get(
sourcedir='dummy',
builddir='dummy',
options=options)
mock_pull.assert_called_once_with()
mock_pull.reset_mock()
| gpl-3.0 |
strint/tensorflow | tensorflow/python/layers/base.py | 8 | 12144 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
# pylint: disable=unused-import,g-bad-import-order
"""Contains the base Layer class, from which all layers inherit.
This is a private class and its internal implementation is subject to changes
in the future.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import inspect
import re
from six.moves import xrange # pylint: disable=redefined-builtin
import numpy as np
import six
from tensorflow.python.framework import ops
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import variables as tf_variables
from tensorflow.python.ops import variable_scope as vs
class _Layer(object):
"""Base layer class.
WARNING: Do not subclass this layer unless you know what you are doing:
the API is subject to future changes.
This is the class from which all layers inherit, implementing common
infrastructure functionality.
A layer is a class implementing common neural networks operations, such
as convolution, batch norm, etc. These operations require managing variables,
losses, and updates, as well as applying TensorFlow ops to input tensors.
Properties:
trainable: Whether the layer should be trained (boolean).
name: The name of the layer (string).
dtype: Default dtype of the layer (dtypes.float32).
trainable_variables: List of trainable variables.
non_trainable_variables: List of non-trainable variables.
variables: List of all variables of this layer, trainable and non-trainable.
updates: List of update ops of this layer.
losses: List of losses added by this layer.
"""
def __init__(self, trainable=True, name=None,
dtype=dtypes.float32, **kwargs):
# We use a kwargs dict here because these kwargs only exist
# for compatibility reasons.
# The list of kwargs is subject to changes in the future.
# We do not want to commit to it or to expose the list to users at all.
# Note this is exactly as safe as defining kwargs in the function signature,
# the only difference being that the list of valid kwargs is defined
# below rather rather in the signature, and default values are defined
# in calls to kwargs.get().
allowed_kwargs = {
'_scope',
'_reuse',
}
for kwarg in kwargs:
if kwarg not in allowed_kwargs:
raise TypeError('Keyword argument not understood:', kwarg)
self._trainable = trainable
self._built = False
self._trainable_variables = []
self._non_trainable_variables = []
self._updates = []
self._losses = []
self._reuse = kwargs.get('_reuse')
self.dtype = dtype
# Determine base name (non-unique).
base_name = name
if not name:
base_name = _to_snake_case(self.__class__.__name__)
# Determine variable scope.
scope = kwargs.get('_scope')
if scope:
self._scope = next(vs.variable_scope(scope).gen)
else:
self._scope = next(vs.variable_scope(None, default_name=base_name).gen)
# Unique name is borrowed from scope to match variable names.
self.name = self._scope.name
def __setattr__(self, name, value):
if hasattr(self, name):
# Only allow private attributes to be set more than once, under the
# convention that private attributes should only be set from inside
# the class.
# All attributes meant to be set several times should be set to private.
if name[0] != '_':
raise AttributeError('Read-only property cannot be set: %s' % name)
super(_Layer, self).__setattr__(name, value)
@property
def trainable_variables(self):
return self._trainable_variables if self.trainable else []
@property
def non_trainable_variables(self):
return self._non_trainable_variables if self.trainable else self.variables
@property
def trainable_weights(self):
return self.trainable_variables
@property
def non_trainable_weights(self):
return self.non_trainable_variables
@property
def variables(self):
"""Returns the list of all layer variables/weights.
Returns:
A list of variables.
"""
return self._trainable_variables + self._non_trainable_variables
@property
def updates(self):
return self._updates
@property
def losses(self):
return self._losses
@property
def built(self):
return self._built
@property
def trainable(self):
return self._trainable
@property
def weights(self):
"""Returns the list of all layer variables/weights.
Returns:
A list of variables.
"""
return self.variables
def build(self, _):
"""Creates the variables of the layer.
"""
self._built = True
def call(self, inputs, **kwargs):
"""The logic of the layer lives here.
Arguments:
inputs: input tensor(s).
**kwargs: additional keyword arguments.
Returns:
Output tensor(s).
"""
raise NotImplementedError
def _compute_output_shape(self, input_shape):
"""Computes the output shape of the layer given the input shape.
Assumes that the layer will be built to match that input shape.
Args:
input_shape: A (possibly nested tuple of) `TensorShape`. It need not
be fully defined (e.g. the batch size may be unknown).
Returns:
A (possibly nested tuple of) `TensorShape`.
Raises:
TypeError: if `input_shape` is not a (possibly nested tuple of)
`TensorShape`.
ValueError: if `input_shape` is incomplete or is incompatible with the
the layer.
"""
raise NotImplementedError
def _add_variable(self, name, shape, dtype=None,
initializer=None, regularizer=None, trainable=True,
variable_getter=vs.get_variable):
"""Adds a new variable to the layer.
Arguments:
name: variable name.
shape: variable shape.
dtype: The type of the variable. Defaults to `self.dtype`.
initializer: initializer instance (callable).
regularizer: regularizer instance (callable).
trainable: whether the variable should be part of the layer's
"trainable_variables" (e.g. variables, biases)
or "non_trainable_variables" (e.g. BatchNorm mean, stddev).
variable_getter: The getter to use for TensorFlow variables.
Returns:
The created variable.
"""
if dtype is None:
dtype = self.dtype
existing_variables = set(tf_variables.global_variables())
variable = variable_getter(name,
shape=shape,
initializer=initializer,
dtype=dtype,
trainable=trainable and self.trainable)
# TODO(sguada) fix name = variable.op.name
if variable in existing_variables:
return variable
if regularizer:
# To match the behavior of tf.get_variable(), we only
# apply regularization if the variable is newly created.
if isinstance(variable, tf_variables.PartitionedVariable):
for v in variable:
with ops.colocate_with(v.op):
with ops.name_scope(name + '/Regularizer'):
regularization = regularizer(v)
if regularization is not None:
self._losses.append(regularization)
_add_elements_to_collection(
regularization, ops.GraphKeys.REGULARIZATION_LOSSES)
else:
with ops.colocate_with(variable.op):
with ops.name_scope(name + '/Regularizer'):
regularization = regularizer(variable)
if regularization is not None:
self._losses.append(regularization)
_add_elements_to_collection(
regularization, ops.GraphKeys.REGULARIZATION_LOSSES)
if trainable:
self._trainable_variables.append(variable)
else:
self._non_trainable_variables.append(variable)
return variable
def __call__(self, inputs, **kwargs):
"""Wraps `call`, applying pre- and post-processing steps.
Arguments:
inputs: input tensor(s).
**kwargs: additional keyword arguments to be passed to `self.call`.
Returns:
Output tensor(s).
"""
# Define a custom getter to override tf.get_variable when creating layer
# variables. The current custom getter is nested by the variable scope.
def variable_getter(getter, name, shape, dtype=None, initializer=None,
regularizer=None, trainable=True, **kwargs):
return self._add_variable(
name, shape, initializer=initializer, regularizer=regularizer,
dtype=dtype, trainable=trainable,
variable_getter=functools.partial(getter, **kwargs))
# Build (if necessary) and call the layer, inside a variable scope.
with vs.variable_scope(self._scope,
reuse=True if self._built else self._reuse,
custom_getter=variable_getter) as scope:
with ops.name_scope(scope.original_name_scope):
if not self.built:
input_list = _to_list(inputs)
input_shapes = [x.get_shape() for x in input_list]
if len(input_shapes) == 1:
self.build(input_shapes[0])
else:
self.build(input_shapes)
self._built = True
outputs = self.call(inputs, **kwargs)
# Apply activity regularization.
# Note that it should be applied every time the layer creates a new
# output, since it is output-specific.
if hasattr(self, 'activity_regularizer') and self.activity_regularizer:
output_list = _to_list(outputs)
for output in output_list:
with ops.name_scope('ActivityRegularizer'):
activity_regularization = self.activity_regularizer(output)
self._losses.append(activity_regularization)
_add_elements_to_collection(
activity_regularization, ops.GraphKeys.REGULARIZATION_LOSSES)
# Update global default collections.
_add_elements_to_collection(self.updates, ops.GraphKeys.UPDATE_OPS)
return outputs
def apply(self, inputs, **kwargs):
"""Apply the layer on a input.
This simply wraps `self.__call__`.
Arguments:
inputs: Input tensor(s).
**kwargs: additional keyword arguments to be passed to `self.call`.
Returns:
Output tensor(s).
"""
return self.__call__(inputs, **kwargs)
def _to_snake_case(name):
intermediate = re.sub('(.)([A-Z][a-z0-9]+)', r'\1_\2', name)
insecure = re.sub('([a-z])([A-Z])', r'\1_\2', intermediate).lower()
# If the class is private the name starts with "_" which is not secure
# for creating scopes. We prefix the name with "private" in this case.
if insecure[0] != '_':
return insecure
return 'private' + insecure
def _to_list(x):
"""This normalizes a list/tuple or single element into a list.
If a single element is passed, we return
a list of size 1 containing the element.
Arguments:
x: list or tuple or single element.
Returns:
A list.
"""
if isinstance(x, (list, tuple)):
return list(x)
return [x]
def _add_elements_to_collection(elements, collections):
elements = _to_list(elements)
collections = _to_list(collections)
for name in collections:
collection = ops.get_collection_ref(name)
collection_set = set(collection)
for element in elements:
if element not in collection_set:
collection.append(element)
| apache-2.0 |
curiosityio/taiga-docker | taiga-back/taiga-back/tests/integration/test_watch_tasks.py | 2 | 5090 | # Copyright (C) 2014-2016 Andrey Antukh <[email protected]>
# Copyright (C) 2014-2016 Jesús Espino <[email protected]>
# Copyright (C) 2014-2016 David Barragán <[email protected]>
# Copyright (C) 2014-2016 Alejandro Alonso <[email protected]>
# Copyright (C) 2014-2016 Anler Hernández <[email protected]>
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import pytest
import json
from django.core.urlresolvers import reverse
from .. import factories as f
pytestmark = pytest.mark.django_db
def test_watch_task(client):
user = f.UserFactory.create()
task = f.create_task(owner=user, milestone=None)
f.MembershipFactory.create(project=task.project, user=user, is_admin=True)
url = reverse("tasks-watch", args=(task.id,))
client.login(user)
response = client.post(url)
assert response.status_code == 200
def test_unwatch_task(client):
user = f.UserFactory.create()
task = f.create_task(owner=user, milestone=None)
f.MembershipFactory.create(project=task.project, user=user, is_admin=True)
url = reverse("tasks-watch", args=(task.id,))
client.login(user)
response = client.post(url)
assert response.status_code == 200
def test_list_task_watchers(client):
user = f.UserFactory.create()
task = f.TaskFactory(owner=user)
f.MembershipFactory.create(project=task.project, user=user, is_admin=True)
f.WatchedFactory.create(content_object=task, user=user)
url = reverse("task-watchers-list", args=(task.id,))
client.login(user)
response = client.get(url)
assert response.status_code == 200
assert response.data[0]['id'] == user.id
def test_get_task_watcher(client):
user = f.UserFactory.create()
task = f.TaskFactory(owner=user)
f.MembershipFactory.create(project=task.project, user=user, is_admin=True)
watch = f.WatchedFactory.create(content_object=task, user=user)
url = reverse("task-watchers-detail", args=(task.id, watch.user.id))
client.login(user)
response = client.get(url)
assert response.status_code == 200
assert response.data['id'] == watch.user.id
def test_get_task_watchers(client):
user = f.UserFactory.create()
task = f.TaskFactory(owner=user)
f.MembershipFactory.create(project=task.project, user=user, is_admin=True)
url = reverse("tasks-detail", args=(task.id,))
f.WatchedFactory.create(content_object=task, user=user)
client.login(user)
response = client.get(url)
assert response.status_code == 200
assert response.data['watchers'] == [user.id]
assert response.data['total_watchers'] == 1
def test_get_task_is_watcher(client):
user = f.UserFactory.create()
task = f.create_task(owner=user, milestone=None)
f.MembershipFactory.create(project=task.project, user=user, is_admin=True)
url_detail = reverse("tasks-detail", args=(task.id,))
url_watch = reverse("tasks-watch", args=(task.id,))
url_unwatch = reverse("tasks-unwatch", args=(task.id,))
client.login(user)
response = client.get(url_detail)
assert response.status_code == 200
assert response.data['watchers'] == []
assert response.data['is_watcher'] == False
response = client.post(url_watch)
assert response.status_code == 200
response = client.get(url_detail)
assert response.status_code == 200
assert response.data['watchers'] == [user.id]
assert response.data['is_watcher'] == True
response = client.post(url_unwatch)
assert response.status_code == 200
response = client.get(url_detail)
assert response.status_code == 200
assert response.data['watchers'] == []
assert response.data['is_watcher'] == False
def test_remove_task_watcher(client):
user = f.UserFactory.create()
project = f.ProjectFactory.create()
task = f.TaskFactory(project=project,
user_story=None,
status__project=project,
milestone__project=project)
task.add_watcher(user)
role = f.RoleFactory.create(project=project, permissions=['modify_task', 'view_tasks'])
f.MembershipFactory.create(project=project, user=user, role=role)
url = reverse("tasks-detail", args=(task.id,))
client.login(user)
data = {"version": task.version, "watchers": []}
response = client.json.patch(url, json.dumps(data))
assert response.status_code == 200
assert response.data['watchers'] == []
assert response.data['is_watcher'] == False
| mit |
ayosef/pynet_test | ANSIBLE/library/eos_routemap.py | 8 | 20166 | #!/usr/bin/python
#
# Copyright (c) 2015, Arista Networks, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# Neither the name of Arista Networks nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL ARISTA NETWORKS
# BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
# BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
# OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
# IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
DOCUMENTATION = """
---
module: eos_routemap
short_description: Manage EOS routemap resources
description:
- This module will manage routemap entries on EOS nodes
version_added: 1.2.0
category: Route Policy
author: Arista EOS+
requirements:
- Arista EOS 4.13.7M or later with command API enabled
- Python Client for eAPI 0.4.0 or later
notes:
- All configuration is idempotent unless otherwise specified
- Supports eos metaparameters for using the eAPI transport
- Supports stateful resource configuration.
options:
name:
description:
- The name of the routemap to manage.
required: true
default: null
choices: []
aliases: []
version_added: 1.2.0
action:
description:
- The action associated with the routemap name.
required: true
default: 'permit'
choices: ['permit','deny']
aliases: []
version_added: 1.2.0
seqno:
description:
- The sequence number of the rule that this entry corresponds to.
required: true
default: null
choices: []
aliases: []
version_added: 1.2.0
description:
description:
- The description for this routemap entry.
required: false
default: null
choices: []
aliases: []
version_added: 1.2.0
match:
description:
- The list of match statements that define the routemap entry. The
match statements should be a comma separated list of match statements
without the word match at the beginning of the string. See the example
below for more information.
required: false
default: null
choices: []
aliases: []
version_added: 1.2.0
set:
description:
- The list of set statements that define the routemap entry. The
set statements should be a comma separated list of set statements
without the word set at the beginning of the string. See the example
below for more information.
required: false
default: null
choices: []
aliases: []
version_added: 1.2.0
continue:
description:
- The statement defines the next routemap clause to evaluate.
required: false
default: null
choices: []
aliases: []
version_added: 1.2.0
"""
EXAMPLES = """
- eos_routemap: name=rm1 action=permit seqno=10
description='this is a great routemap'
match='as 50,interface Ethernet2'
set='tag 100,weight 1000'
continue=20
"""
#<<EOS_COMMON_MODULE_START>>
import syslog
import collections
from ansible.module_utils.basic import *
try:
import pyeapi
PYEAPI_AVAILABLE = True
except ImportError:
PYEAPI_AVAILABLE = False
DEFAULT_SYSLOG_PRIORITY = syslog.LOG_NOTICE
DEFAULT_CONNECTION = 'localhost'
TRANSPORTS = ['socket', 'http', 'https', 'http_local']
class EosConnection(object):
__attributes__ = ['username', 'password', 'host', 'transport', 'port']
def __init__(self, **kwargs):
self.connection = kwargs['connection']
self.transport = kwargs.get('transport')
self.username = kwargs.get('username')
self.password = kwargs.get('password')
self.host = kwargs.get('host')
self.port = kwargs.get('port')
self.config = kwargs.get('config')
def connect(self):
if self.config is not None:
pyeapi.load_config(self.config)
config = dict()
if self.connection is not None:
config = pyeapi.config_for(self.connection)
if not config:
msg = 'Connection name "{}" not found'.format(self.connection)
for key in self.__attributes__:
if getattr(self, key) is not None:
config[key] = getattr(self, key)
if 'transport' not in config:
raise ValueError('Connection must define a transport')
connection = pyeapi.client.make_connection(**config)
node = pyeapi.client.Node(connection, **config)
try:
node.enable('show version')
except (pyeapi.eapilib.ConnectionError, pyeapi.eapilib.CommandError):
raise ValueError('unable to connect to {}'.format(node))
return node
class EosAnsibleModule(AnsibleModule):
meta_args = {
'config': dict(),
'username': dict(),
'password': dict(),
'host': dict(),
'connection': dict(default=DEFAULT_CONNECTION),
'transport': dict(choices=TRANSPORTS),
'port': dict(),
'debug': dict(type='bool', default='false'),
'logging': dict(type='bool', default='true')
}
stateful_args = {
'state': dict(default='present', choices=['present', 'absent']),
}
def __init__(self, stateful=True, autorefresh=False, *args, **kwargs):
kwargs['argument_spec'].update(self.meta_args)
self._stateful = stateful
if stateful:
kwargs['argument_spec'].update(self.stateful_args)
## Ok, so in Ansible 2.0,
## AnsibleModule.__init__() sets self.params and then
## calls self.log()
## (through self._log_invocation())
##
## However, self.log() (overridden in EosAnsibleModule)
## references self._logging
## and self._logging (defined in EosAnsibleModule)
## references self.params.
##
## So ... I'm defining self._logging without "or self.params['logging']"
## *before* AnsibleModule.__init__() to avoid a "ref before def".
##
## I verified that this works with Ansible 1.9.4 and 2.0.0.2.
## The only caveat is that the first log message in
## AnsibleModule.__init__() won't be subject to the value of
## self.params['logging'].
self._logging = kwargs.get('logging')
super(EosAnsibleModule, self).__init__(*args, **kwargs)
self.result = dict(changed=False, changes=dict())
self._debug = kwargs.get('debug') or self.boolean(self.params['debug'])
self._logging = kwargs.get('logging') or self.params['logging']
self.log('DEBUG flag is %s' % self._debug)
self.debug('pyeapi_version', self.check_pyeapi())
self.debug('stateful', self._stateful)
self.debug('params', self.params)
self._attributes = self.map_argument_spec()
self.validate()
self._autorefresh = autorefresh
self._node = EosConnection(**self.params)
self._node.connect()
self._node = self.connect()
self._instance = None
self.desired_state = self.params['state'] if self._stateful else None
self.exit_after_flush = kwargs.get('exit_after_flush')
@property
def instance(self):
if self._instance:
return self._instance
func = self.func('instance')
if not func:
self.fail('Module does not support "instance"')
try:
self._instance = func(self)
except Exception as exc:
self.fail('instance[error]: %s' % exc.message)
self.log("called instance: %s" % self._instance)
return self._instance
@property
def attributes(self):
return self._attributes
@property
def node(self):
return self._node
def check_pyeapi(self):
if not PYEAPI_AVAILABLE:
self.fail('Unable to import pyeapi, is it installed?')
return pyeapi.__version__
def map_argument_spec(self):
"""map_argument_spec maps only the module argument spec to attrs
This method will map the argumentspec minus the meta_args to attrs
and return the attrs. This returns a dict object that includes only
the original argspec plus the stateful_args (if self._stateful=True)
Returns:
dict: Returns a dict object that includes the original
argument_spec plus stateful_args with values minus meta_args
"""
keys = set(self.params).difference(self.meta_args)
attrs = dict()
attrs = dict([(k, self.params[k]) for k in self.params if k in keys])
if 'CHECKMODE' in attrs:
del attrs['CHECKMODE']
return attrs
def validate(self):
for key, value in self.attributes.iteritems():
func = self.func('validate_%s' % key)
if func:
self.attributes[key] = func(value)
def create(self):
if not self.check_mode:
func = self.func('create')
if not func:
self.fail('Module must define "create" function')
return self.invoke(func, self)
def remove(self):
if not self.check_mode:
func = self.func('remove')
if not func:
self.fail('Module most define "remove" function')
return self.invoke(func, self)
def flush(self, exit_after_flush=False):
self.exit_after_flush = exit_after_flush
if self.desired_state == 'present' or not self._stateful:
if self.instance.get('state') == 'absent':
changed = self.create()
self.result['changed'] = changed or True
self.refresh()
# After a create command, flush the running-config
# so we get the latest for any other attributes
self._node._running_config = None
changeset = self.attributes.viewitems() - self.instance.viewitems()
if self._debug:
self.debug('desired_state', self.attributes)
self.debug('current_state', self.instance)
changes = self.update(changeset)
if changes:
self.result['changes'] = changes
self.result['changed'] = True
self._attributes.update(changes)
flush = self.func('flush')
if flush:
self.invoke(flush, self)
elif self.desired_state == 'absent' and self._stateful:
if self.instance.get('state') == 'present':
changed = self.remove()
self.result['changed'] = changed or True
elif self._stateful:
if self.desired_state != self.instance.get('state'):
func = self.func(self.desired_state)
changed = self.invoke(func, self)
self.result['changed'] = changed or True
self.refresh()
# By calling self.instance here we trigger another show running-config
# all which causes delay. Only if debug is enabled do we call this
# since it will display the latest state of the object.
if self._debug:
self.result['instance'] = self.instance
if self.exit_after_flush:
self.exit()
def update(self, changeset):
changes = dict()
for key, value in changeset:
if value is not None:
changes[key] = value
func = self.func('set_%s' % key)
if func and not self.check_mode:
try:
self.invoke(func, self)
except Exception as exc:
self.fail(exc.message)
return changes
def connect(self):
if self.params['config']:
pyeapi.load_config(self.params['config'])
config = dict()
if self.params['connection']:
config = pyeapi.config_for(self.params['connection'])
if not config:
msg = 'Connection name "%s" not found' % self.params['connection']
self.fail(msg)
if self.params['username']:
config['username'] = self.params['username']
if self.params['password']:
config['password'] = self.params['password']
if self.params['transport']:
config['transport'] = self.params['transport']
if self.params['port']:
config['port'] = self.params['port']
if self.params['host']:
config['host'] = self.params['host']
if 'transport' not in config:
self.fail('Connection must define a transport')
connection = pyeapi.client.make_connection(**config)
self.log('Creating connection with autorefresh=%s' % self._autorefresh)
node = pyeapi.client.Node(connection, autorefresh=self._autorefresh,
**config)
try:
resp = node.enable('show version')
self.debug('eos_version', resp[0]['result']['version'])
self.debug('eos_model', resp[0]['result']['modelName'])
except (pyeapi.eapilib.ConnectionError, pyeapi.eapilib.CommandError):
self.fail('unable to connect to %s' % node)
else:
self.log('Connected to node %s' % node)
self.debug('node', str(node))
return node
def config(self, commands):
self.result['changed'] = True
if not self.check_mode:
self.node.config(commands)
def api(self, module):
return self.node.api(module)
def func(self, name):
return globals().get(name)
def invoke(self, func, *args, **kwargs):
try:
return func(*args, **kwargs)
except Exception as exc:
self.fail(exc.message)
def invoke_function(self, name, *args, **kwargs):
func = self.func(name)
if func:
return self.invoke(func, *args, **kwargs)
def fail(self, msg):
self.invoke_function('on_fail', self)
self.log('ERROR: %s' % msg, syslog.LOG_ERR)
self.fail_json(msg=msg)
def exit(self):
self.invoke_function('on_exit', self)
self.log('Module completed successfully')
self.exit_json(**self.result)
def refresh(self):
self._instance = None
def debug(self, key, value):
if self._debug:
if 'debug' not in self.result:
self.result['debug'] = dict()
self.result['debug'][key] = value
def log(self, message, log_args=None, priority=None):
if self._logging:
syslog.openlog('ansible-eos')
priority = priority or DEFAULT_SYSLOG_PRIORITY
syslog.syslog(priority, str(message))
@classmethod
def add_state(cls, name):
cls.stateful_args['state']['choices'].append(name)
#<<EOS_COMMON_MODULE_END>>
def instance(module):
""" Returns an instance of Routemaps based on name, action and sequence
number.
"""
name = module.attributes['name']
action = module.attributes['action']
seqno = int(module.attributes['seqno'])
_instance = dict(name=name, action=action, seqno=seqno, state='absent')
try:
result = module.api('routemaps').get(name)[action][seqno]
except:
result = None
if result:
_instance['state'] = 'present'
_instance['seqno'] = str(seqno)
_instance['set'] = ','.join(result['set'])
desc = result['description']
_instance['description'] = desc if desc else ''
_instance['match'] = ','.join(result['match'])
cont = result['continue']
_instance['continue'] = str(cont) if cont else ''
return _instance
def create(module):
name = module.attributes['name']
action = module.attributes['action']
seqno = int(module.attributes['seqno'])
module.log('Invoked create for eos_routemap[%s %s %s]'
% (name, action, seqno))
module.api('routemaps').create(name, action, seqno)
def remove(module):
name = module.attributes['name']
action = module.attributes['action']
seqno = int(module.attributes['seqno'])
module.log('Invoked remove for eos_routemap[%s %s %s]'
% (name, action, seqno))
module.api('routemaps').delete(name, action, seqno)
def set_description(module):
""" Configures the description for the routemap
"""
name = module.attributes['name']
action = module.attributes['action']
seqno = int(module.attributes['seqno'])
value = module.attributes['description']
module.log('Invoked set_description with %s for eos_routemap[%s %s %s]'
% (value, name, action, seqno))
if value == '':
module.node.api('routemaps').set_description(name, action, seqno,
disable=True)
else:
module.node.api('routemaps').set_description(name, action, seqno, value)
def set_continue(module):
""" Configures the continue value for the routemap
"""
name = module.attributes['name']
action = module.attributes['action']
seqno = int(module.attributes['seqno'])
try:
value = int(module.attributes['continue'])
except:
value = None
module.log('Invoked set_continue for eos_routemap[%s %s %s]'
% (name, action, seqno))
if value is None:
module.node.api('routemaps').set_continue(name, action, seqno,
disable=True)
else:
module.node.api('routemaps').set_continue(name, action, seqno, value)
def set_match(module):
""" Configures the match statements for the routemap
"""
name = module.attributes['name']
action = module.attributes['action']
seqno = int(module.attributes['seqno'])
statements = module.attributes['match'].split(',')
module.log('Invoked set_match for eos_routemap[%s %s %s]'
% (name, action, seqno))
module.node.api('routemaps').set_match_statements(name, action, seqno,
statements)
def set_set(module):
""" Configures the set statements for the routemap
"""
name = module.attributes['name']
action = module.attributes['action']
seqno = int(module.attributes['seqno'])
statements = module.attributes['set'].split(',')
module.log('Invoked set_set for eos_routemap[%s %s %s]'
% (name, action, seqno))
module.node.api('routemaps').set_set_statements(name, action, seqno,
statements)
def main():
""" The main module routine called when the module is run by Ansible
"""
argument_spec = dict(
name=dict(required=True),
action=dict(default='permit', choices=['permit', 'deny']),
seqno=dict(required=True),
description=dict(),
match=dict(),
set=dict()
)
argument_spec['continue'] = dict()
module = EosAnsibleModule(argument_spec=argument_spec,
supports_check_mode=True)
module.flush(True)
main() | apache-2.0 |
Dhivyap/ansible | lib/ansible/modules/cloud/digital_ocean/digital_ocean_certificate.py | 89 | 5460 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (c) 2017, Abhijeet Kasurde <[email protected]>
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: digital_ocean_certificate
short_description: Manage certificates in DigitalOcean.
description:
- Create, Retrieve and remove certificates DigitalOcean.
author: "Abhijeet Kasurde (@Akasurde)"
version_added: "2.5"
options:
name:
description:
- The name of the certificate.
required: true
private_key:
description:
- A PEM-formatted private key content of SSL Certificate.
leaf_certificate:
description:
- A PEM-formatted public SSL Certificate.
certificate_chain:
description:
- The full PEM-formatted trust chain between the certificate authority's certificate and your domain's SSL certificate.
state:
description:
- Whether the certificate should be present or absent.
default: present
choices: ['present', 'absent']
extends_documentation_fragment: digital_ocean.documentation
notes:
- Two environment variables can be used, DO_API_KEY, DO_OAUTH_TOKEN and DO_API_TOKEN.
They both refer to the v2 token.
'''
EXAMPLES = '''
- name: create a certificate
digital_ocean_certificate:
name: production
state: present
private_key: "-----BEGIN PRIVATE KEY-----\nMIIEvgIBADANBgkqhkM8OI7pRpgyj1I\n-----END PRIVATE KEY-----"
leaf_certificate: "-----BEGIN CERTIFICATE-----\nMIIFDmg2Iaw==\n-----END CERTIFICATE-----"
oauth_token: b7d03a6947b217efb6f3ec3bd365652
- name: create a certificate using file lookup plugin
digital_ocean_certificate:
name: production
state: present
private_key: "{{ lookup('file', 'test.key') }}"
leaf_certificate: "{{ lookup('file', 'test.cert') }}"
oauth_token: "{{ oauth_token }}"
- name: create a certificate with trust chain
digital_ocean_certificate:
name: production
state: present
private_key: "{{ lookup('file', 'test.key') }}"
leaf_certificate: "{{ lookup('file', 'test.cert') }}"
certificate_chain: "{{ lookup('file', 'chain.cert') }}"
oauth_token: "{{ oauth_token }}"
- name: remove a certificate
digital_ocean_certificate:
name: production
state: absent
oauth_token: "{{ oauth_token }}"
'''
RETURN = ''' # '''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.digital_ocean import DigitalOceanHelper
from ansible.module_utils._text import to_native
def core(module):
state = module.params['state']
name = module.params['name']
rest = DigitalOceanHelper(module)
results = dict(changed=False)
response = rest.get('certificates')
status_code = response.status_code
resp_json = response.json
if status_code != 200:
module.fail_json(msg="Failed to retrieve certificates for DigitalOcean")
if state == 'present':
for cert in resp_json['certificates']:
if cert['name'] == name:
module.fail_json(msg="Certificate name %s already exists" % name)
# Certificate does not exist, let us create it
cert_data = dict(name=name,
private_key=module.params['private_key'],
leaf_certificate=module.params['leaf_certificate'])
if module.params['certificate_chain'] is not None:
cert_data.update(certificate_chain=module.params['certificate_chain'])
response = rest.post("certificates", data=cert_data)
status_code = response.status_code
if status_code == 500:
module.fail_json(msg="Failed to upload certificates as the certificates are malformed.")
resp_json = response.json
if status_code == 201:
results.update(changed=True, response=resp_json)
elif status_code == 422:
results.update(changed=False, response=resp_json)
elif state == 'absent':
cert_id_del = None
for cert in resp_json['certificates']:
if cert['name'] == name:
cert_id_del = cert['id']
if cert_id_del is not None:
url = "certificates/{0}".format(cert_id_del)
response = rest.delete(url)
if response.status_code == 204:
results.update(changed=True)
else:
results.update(changed=False)
else:
module.fail_json(msg="Failed to find certificate %s" % name)
module.exit_json(**results)
def main():
argument_spec = DigitalOceanHelper.digital_ocean_argument_spec()
argument_spec.update(
name=dict(type='str'),
leaf_certificate=dict(type='str'),
private_key=dict(type='str', no_log=True),
state=dict(choices=['present', 'absent'], default='present'),
certificate_chain=dict(type='str')
)
module = AnsibleModule(
argument_spec=argument_spec,
required_if=[('state', 'present', ['name', 'leaf_certificate', 'private_key']),
('state', 'absent', ['name'])
],
)
try:
core(module)
except Exception as e:
module.fail_json(msg=to_native(e))
if __name__ == '__main__':
main()
| gpl-3.0 |
mancoast/CPythonPyc_test | cpython/276_test_zipfile.py | 36 | 61036 | # We can test part of the module without zlib.
try:
import zlib
except ImportError:
zlib = None
import os
import io
import sys
import time
import shutil
import struct
import zipfile
import unittest
from StringIO import StringIO
from tempfile import TemporaryFile
from random import randint, random
from unittest import skipUnless
from test.test_support import TESTFN, TESTFN_UNICODE, TESTFN_ENCODING, \
run_unittest, findfile, unlink
try:
TESTFN_UNICODE.encode(TESTFN_ENCODING)
except (UnicodeError, TypeError):
# Either the file system encoding is None, or the file name
# cannot be encoded in the file system encoding.
TESTFN_UNICODE = None
TESTFN2 = TESTFN + "2"
TESTFNDIR = TESTFN + "d"
FIXEDTEST_SIZE = 1000
SMALL_TEST_DATA = [('_ziptest1', '1q2w3e4r5t'),
('ziptest2dir/_ziptest2', 'qawsedrftg'),
('ziptest2dir/ziptest3dir/_ziptest3', 'azsxdcfvgb'),
('ziptest2dir/ziptest3dir/ziptest4dir/_ziptest3', '6y7u8i9o0p')]
class TestsWithSourceFile(unittest.TestCase):
def setUp(self):
self.line_gen = ["Zipfile test line %d. random float: %f" % (i, random())
for i in xrange(FIXEDTEST_SIZE)]
self.data = '\n'.join(self.line_gen) + '\n'
# Make a source file with some lines
with open(TESTFN, "wb") as fp:
fp.write(self.data)
def make_test_archive(self, f, compression):
# Create the ZIP archive
with zipfile.ZipFile(f, "w", compression) as zipfp:
zipfp.write(TESTFN, "another.name")
zipfp.write(TESTFN, TESTFN)
zipfp.writestr("strfile", self.data)
def zip_test(self, f, compression):
self.make_test_archive(f, compression)
# Read the ZIP archive
with zipfile.ZipFile(f, "r", compression) as zipfp:
self.assertEqual(zipfp.read(TESTFN), self.data)
self.assertEqual(zipfp.read("another.name"), self.data)
self.assertEqual(zipfp.read("strfile"), self.data)
# Print the ZIP directory
fp = StringIO()
stdout = sys.stdout
try:
sys.stdout = fp
zipfp.printdir()
finally:
sys.stdout = stdout
directory = fp.getvalue()
lines = directory.splitlines()
self.assertEqual(len(lines), 4) # Number of files + header
self.assertIn('File Name', lines[0])
self.assertIn('Modified', lines[0])
self.assertIn('Size', lines[0])
fn, date, time_, size = lines[1].split()
self.assertEqual(fn, 'another.name')
self.assertTrue(time.strptime(date, '%Y-%m-%d'))
self.assertTrue(time.strptime(time_, '%H:%M:%S'))
self.assertEqual(size, str(len(self.data)))
# Check the namelist
names = zipfp.namelist()
self.assertEqual(len(names), 3)
self.assertIn(TESTFN, names)
self.assertIn("another.name", names)
self.assertIn("strfile", names)
# Check infolist
infos = zipfp.infolist()
names = [i.filename for i in infos]
self.assertEqual(len(names), 3)
self.assertIn(TESTFN, names)
self.assertIn("another.name", names)
self.assertIn("strfile", names)
for i in infos:
self.assertEqual(i.file_size, len(self.data))
# check getinfo
for nm in (TESTFN, "another.name", "strfile"):
info = zipfp.getinfo(nm)
self.assertEqual(info.filename, nm)
self.assertEqual(info.file_size, len(self.data))
# Check that testzip doesn't raise an exception
zipfp.testzip()
def test_stored(self):
for f in (TESTFN2, TemporaryFile(), StringIO()):
self.zip_test(f, zipfile.ZIP_STORED)
def zip_open_test(self, f, compression):
self.make_test_archive(f, compression)
# Read the ZIP archive
with zipfile.ZipFile(f, "r", compression) as zipfp:
zipdata1 = []
with zipfp.open(TESTFN) as zipopen1:
while True:
read_data = zipopen1.read(256)
if not read_data:
break
zipdata1.append(read_data)
zipdata2 = []
with zipfp.open("another.name") as zipopen2:
while True:
read_data = zipopen2.read(256)
if not read_data:
break
zipdata2.append(read_data)
self.assertEqual(''.join(zipdata1), self.data)
self.assertEqual(''.join(zipdata2), self.data)
def test_open_stored(self):
for f in (TESTFN2, TemporaryFile(), StringIO()):
self.zip_open_test(f, zipfile.ZIP_STORED)
def test_open_via_zip_info(self):
# Create the ZIP archive
with zipfile.ZipFile(TESTFN2, "w", zipfile.ZIP_STORED) as zipfp:
zipfp.writestr("name", "foo")
zipfp.writestr("name", "bar")
with zipfile.ZipFile(TESTFN2, "r") as zipfp:
infos = zipfp.infolist()
data = ""
for info in infos:
with zipfp.open(info) as f:
data += f.read()
self.assertTrue(data == "foobar" or data == "barfoo")
data = ""
for info in infos:
data += zipfp.read(info)
self.assertTrue(data == "foobar" or data == "barfoo")
def zip_random_open_test(self, f, compression):
self.make_test_archive(f, compression)
# Read the ZIP archive
with zipfile.ZipFile(f, "r", compression) as zipfp:
zipdata1 = []
with zipfp.open(TESTFN) as zipopen1:
while True:
read_data = zipopen1.read(randint(1, 1024))
if not read_data:
break
zipdata1.append(read_data)
self.assertEqual(''.join(zipdata1), self.data)
def test_random_open_stored(self):
for f in (TESTFN2, TemporaryFile(), StringIO()):
self.zip_random_open_test(f, zipfile.ZIP_STORED)
def test_univeral_readaheads(self):
f = StringIO()
data = 'a\r\n' * 16 * 1024
with zipfile.ZipFile(f, 'w', zipfile.ZIP_STORED) as zipfp:
zipfp.writestr(TESTFN, data)
data2 = ''
with zipfile.ZipFile(f, 'r') as zipfp:
with zipfp.open(TESTFN, 'rU') as zipopen:
for line in zipopen:
data2 += line
self.assertEqual(data, data2.replace('\n', '\r\n'))
def zip_readline_read_test(self, f, compression):
self.make_test_archive(f, compression)
# Read the ZIP archive
with zipfile.ZipFile(f, "r") as zipfp:
with zipfp.open(TESTFN) as zipopen:
data = ''
while True:
read = zipopen.readline()
if not read:
break
data += read
read = zipopen.read(100)
if not read:
break
data += read
self.assertEqual(data, self.data)
def zip_readline_test(self, f, compression):
self.make_test_archive(f, compression)
# Read the ZIP archive
with zipfile.ZipFile(f, "r") as zipfp:
with zipfp.open(TESTFN) as zipopen:
for line in self.line_gen:
linedata = zipopen.readline()
self.assertEqual(linedata, line + '\n')
def zip_readlines_test(self, f, compression):
self.make_test_archive(f, compression)
# Read the ZIP archive
with zipfile.ZipFile(f, "r") as zipfp:
with zipfp.open(TESTFN) as zo:
ziplines = zo.readlines()
for line, zipline in zip(self.line_gen, ziplines):
self.assertEqual(zipline, line + '\n')
def zip_iterlines_test(self, f, compression):
self.make_test_archive(f, compression)
# Read the ZIP archive
with zipfile.ZipFile(f, "r") as zipfp:
for line, zipline in zip(self.line_gen, zipfp.open(TESTFN)):
self.assertEqual(zipline, line + '\n')
def test_readline_read_stored(self):
# Issue #7610: calls to readline() interleaved with calls to read().
for f in (TESTFN2, TemporaryFile(), StringIO()):
self.zip_readline_read_test(f, zipfile.ZIP_STORED)
def test_readline_stored(self):
for f in (TESTFN2, TemporaryFile(), StringIO()):
self.zip_readline_test(f, zipfile.ZIP_STORED)
def test_readlines_stored(self):
for f in (TESTFN2, TemporaryFile(), StringIO()):
self.zip_readlines_test(f, zipfile.ZIP_STORED)
def test_iterlines_stored(self):
for f in (TESTFN2, TemporaryFile(), StringIO()):
self.zip_iterlines_test(f, zipfile.ZIP_STORED)
@skipUnless(zlib, "requires zlib")
def test_deflated(self):
for f in (TESTFN2, TemporaryFile(), StringIO()):
self.zip_test(f, zipfile.ZIP_DEFLATED)
@skipUnless(zlib, "requires zlib")
def test_open_deflated(self):
for f in (TESTFN2, TemporaryFile(), StringIO()):
self.zip_open_test(f, zipfile.ZIP_DEFLATED)
@skipUnless(zlib, "requires zlib")
def test_random_open_deflated(self):
for f in (TESTFN2, TemporaryFile(), StringIO()):
self.zip_random_open_test(f, zipfile.ZIP_DEFLATED)
@skipUnless(zlib, "requires zlib")
def test_readline_read_deflated(self):
# Issue #7610: calls to readline() interleaved with calls to read().
for f in (TESTFN2, TemporaryFile(), StringIO()):
self.zip_readline_read_test(f, zipfile.ZIP_DEFLATED)
@skipUnless(zlib, "requires zlib")
def test_readline_deflated(self):
for f in (TESTFN2, TemporaryFile(), StringIO()):
self.zip_readline_test(f, zipfile.ZIP_DEFLATED)
@skipUnless(zlib, "requires zlib")
def test_readlines_deflated(self):
for f in (TESTFN2, TemporaryFile(), StringIO()):
self.zip_readlines_test(f, zipfile.ZIP_DEFLATED)
@skipUnless(zlib, "requires zlib")
def test_iterlines_deflated(self):
for f in (TESTFN2, TemporaryFile(), StringIO()):
self.zip_iterlines_test(f, zipfile.ZIP_DEFLATED)
@skipUnless(zlib, "requires zlib")
def test_low_compression(self):
"""Check for cases where compressed data is larger than original."""
# Create the ZIP archive
with zipfile.ZipFile(TESTFN2, "w", zipfile.ZIP_DEFLATED) as zipfp:
zipfp.writestr("strfile", '12')
# Get an open object for strfile
with zipfile.ZipFile(TESTFN2, "r", zipfile.ZIP_DEFLATED) as zipfp:
with zipfp.open("strfile") as openobj:
self.assertEqual(openobj.read(1), '1')
self.assertEqual(openobj.read(1), '2')
def test_absolute_arcnames(self):
with zipfile.ZipFile(TESTFN2, "w", zipfile.ZIP_STORED) as zipfp:
zipfp.write(TESTFN, "/absolute")
with zipfile.ZipFile(TESTFN2, "r", zipfile.ZIP_STORED) as zipfp:
self.assertEqual(zipfp.namelist(), ["absolute"])
def test_append_to_zip_file(self):
"""Test appending to an existing zipfile."""
with zipfile.ZipFile(TESTFN2, "w", zipfile.ZIP_STORED) as zipfp:
zipfp.write(TESTFN, TESTFN)
with zipfile.ZipFile(TESTFN2, "a", zipfile.ZIP_STORED) as zipfp:
zipfp.writestr("strfile", self.data)
self.assertEqual(zipfp.namelist(), [TESTFN, "strfile"])
def test_append_to_non_zip_file(self):
"""Test appending to an existing file that is not a zipfile."""
# NOTE: this test fails if len(d) < 22 because of the first
# line "fpin.seek(-22, 2)" in _EndRecData
data = 'I am not a ZipFile!'*10
with open(TESTFN2, 'wb') as f:
f.write(data)
with zipfile.ZipFile(TESTFN2, "a", zipfile.ZIP_STORED) as zipfp:
zipfp.write(TESTFN, TESTFN)
with open(TESTFN2, 'rb') as f:
f.seek(len(data))
with zipfile.ZipFile(f, "r") as zipfp:
self.assertEqual(zipfp.namelist(), [TESTFN])
def test_ignores_newline_at_end(self):
with zipfile.ZipFile(TESTFN2, "w", zipfile.ZIP_STORED) as zipfp:
zipfp.write(TESTFN, TESTFN)
with open(TESTFN2, 'a') as f:
f.write("\r\n\00\00\00")
with zipfile.ZipFile(TESTFN2, "r") as zipfp:
self.assertIsInstance(zipfp, zipfile.ZipFile)
def test_ignores_stuff_appended_past_comments(self):
with zipfile.ZipFile(TESTFN2, "w", zipfile.ZIP_STORED) as zipfp:
zipfp.comment = b"this is a comment"
zipfp.write(TESTFN, TESTFN)
with open(TESTFN2, 'a') as f:
f.write("abcdef\r\n")
with zipfile.ZipFile(TESTFN2, "r") as zipfp:
self.assertIsInstance(zipfp, zipfile.ZipFile)
self.assertEqual(zipfp.comment, b"this is a comment")
def test_write_default_name(self):
"""Check that calling ZipFile.write without arcname specified
produces the expected result."""
with zipfile.ZipFile(TESTFN2, "w") as zipfp:
zipfp.write(TESTFN)
self.assertEqual(zipfp.read(TESTFN), open(TESTFN).read())
@skipUnless(zlib, "requires zlib")
def test_per_file_compression(self):
"""Check that files within a Zip archive can have different
compression options."""
with zipfile.ZipFile(TESTFN2, "w") as zipfp:
zipfp.write(TESTFN, 'storeme', zipfile.ZIP_STORED)
zipfp.write(TESTFN, 'deflateme', zipfile.ZIP_DEFLATED)
sinfo = zipfp.getinfo('storeme')
dinfo = zipfp.getinfo('deflateme')
self.assertEqual(sinfo.compress_type, zipfile.ZIP_STORED)
self.assertEqual(dinfo.compress_type, zipfile.ZIP_DEFLATED)
def test_write_to_readonly(self):
"""Check that trying to call write() on a readonly ZipFile object
raises a RuntimeError."""
with zipfile.ZipFile(TESTFN2, mode="w") as zipfp:
zipfp.writestr("somefile.txt", "bogus")
with zipfile.ZipFile(TESTFN2, mode="r") as zipfp:
self.assertRaises(RuntimeError, zipfp.write, TESTFN)
def test_extract(self):
with zipfile.ZipFile(TESTFN2, "w", zipfile.ZIP_STORED) as zipfp:
for fpath, fdata in SMALL_TEST_DATA:
zipfp.writestr(fpath, fdata)
with zipfile.ZipFile(TESTFN2, "r") as zipfp:
for fpath, fdata in SMALL_TEST_DATA:
writtenfile = zipfp.extract(fpath)
# make sure it was written to the right place
correctfile = os.path.join(os.getcwd(), fpath)
correctfile = os.path.normpath(correctfile)
self.assertEqual(writtenfile, correctfile)
# make sure correct data is in correct file
self.assertEqual(fdata, open(writtenfile, "rb").read())
os.remove(writtenfile)
# remove the test file subdirectories
shutil.rmtree(os.path.join(os.getcwd(), 'ziptest2dir'))
def test_extract_all(self):
with zipfile.ZipFile(TESTFN2, "w", zipfile.ZIP_STORED) as zipfp:
for fpath, fdata in SMALL_TEST_DATA:
zipfp.writestr(fpath, fdata)
with zipfile.ZipFile(TESTFN2, "r") as zipfp:
zipfp.extractall()
for fpath, fdata in SMALL_TEST_DATA:
outfile = os.path.join(os.getcwd(), fpath)
self.assertEqual(fdata, open(outfile, "rb").read())
os.remove(outfile)
# remove the test file subdirectories
shutil.rmtree(os.path.join(os.getcwd(), 'ziptest2dir'))
def check_file(self, filename, content):
self.assertTrue(os.path.isfile(filename))
with open(filename, 'rb') as f:
self.assertEqual(f.read(), content)
@skipUnless(TESTFN_UNICODE, "No Unicode filesystem semantics on this platform.")
def test_extract_unicode_filenames(self):
fnames = [u'foo.txt', os.path.basename(TESTFN_UNICODE)]
content = 'Test for unicode filename'
with zipfile.ZipFile(TESTFN2, "w", zipfile.ZIP_STORED) as zipfp:
for fname in fnames:
zipfp.writestr(fname, content)
with zipfile.ZipFile(TESTFN2, "r") as zipfp:
for fname in fnames:
writtenfile = zipfp.extract(fname)
# make sure it was written to the right place
correctfile = os.path.join(os.getcwd(), fname)
correctfile = os.path.normpath(correctfile)
self.assertEqual(writtenfile, correctfile)
self.check_file(writtenfile, content)
os.remove(writtenfile)
def test_extract_hackers_arcnames(self):
hacknames = [
('../foo/bar', 'foo/bar'),
('foo/../bar', 'foo/bar'),
('foo/../../bar', 'foo/bar'),
('foo/bar/..', 'foo/bar'),
('./../foo/bar', 'foo/bar'),
('/foo/bar', 'foo/bar'),
('/foo/../bar', 'foo/bar'),
('/foo/../../bar', 'foo/bar'),
]
if os.path.sep == '\\':
hacknames.extend([
(r'..\foo\bar', 'foo/bar'),
(r'..\/foo\/bar', 'foo/bar'),
(r'foo/\..\/bar', 'foo/bar'),
(r'foo\/../\bar', 'foo/bar'),
(r'C:foo/bar', 'foo/bar'),
(r'C:/foo/bar', 'foo/bar'),
(r'C://foo/bar', 'foo/bar'),
(r'C:\foo\bar', 'foo/bar'),
(r'//conky/mountpoint/foo/bar', 'conky/mountpoint/foo/bar'),
(r'\\conky\mountpoint\foo\bar', 'conky/mountpoint/foo/bar'),
(r'///conky/mountpoint/foo/bar', 'conky/mountpoint/foo/bar'),
(r'\\\conky\mountpoint\foo\bar', 'conky/mountpoint/foo/bar'),
(r'//conky//mountpoint/foo/bar', 'conky/mountpoint/foo/bar'),
(r'\\conky\\mountpoint\foo\bar', 'conky/mountpoint/foo/bar'),
(r'//?/C:/foo/bar', '_/C_/foo/bar'),
(r'\\?\C:\foo\bar', '_/C_/foo/bar'),
(r'C:/../C:/foo/bar', 'C_/foo/bar'),
(r'a:b\c<d>e|f"g?h*i', 'b/c_d_e_f_g_h_i'),
('../../foo../../ba..r', 'foo/ba..r'),
])
else: # Unix
hacknames.extend([
('//foo/bar', 'foo/bar'),
('../../foo../../ba..r', 'foo../ba..r'),
(r'foo/..\bar', r'foo/..\bar'),
])
for arcname, fixedname in hacknames:
content = b'foobar' + arcname.encode()
with zipfile.ZipFile(TESTFN2, 'w', zipfile.ZIP_STORED) as zipfp:
zinfo = zipfile.ZipInfo()
# preserve backslashes
zinfo.filename = arcname
zinfo.external_attr = 0o600 << 16
zipfp.writestr(zinfo, content)
arcname = arcname.replace(os.sep, "/")
targetpath = os.path.join('target', 'subdir', 'subsub')
correctfile = os.path.join(targetpath, *fixedname.split('/'))
with zipfile.ZipFile(TESTFN2, 'r') as zipfp:
writtenfile = zipfp.extract(arcname, targetpath)
self.assertEqual(writtenfile, correctfile,
msg="extract %r" % arcname)
self.check_file(correctfile, content)
shutil.rmtree('target')
with zipfile.ZipFile(TESTFN2, 'r') as zipfp:
zipfp.extractall(targetpath)
self.check_file(correctfile, content)
shutil.rmtree('target')
correctfile = os.path.join(os.getcwd(), *fixedname.split('/'))
with zipfile.ZipFile(TESTFN2, 'r') as zipfp:
writtenfile = zipfp.extract(arcname)
self.assertEqual(writtenfile, correctfile,
msg="extract %r" % arcname)
self.check_file(correctfile, content)
shutil.rmtree(fixedname.split('/')[0])
with zipfile.ZipFile(TESTFN2, 'r') as zipfp:
zipfp.extractall()
self.check_file(correctfile, content)
shutil.rmtree(fixedname.split('/')[0])
os.remove(TESTFN2)
def test_writestr_compression(self):
zipfp = zipfile.ZipFile(TESTFN2, "w")
zipfp.writestr("a.txt", "hello world", compress_type=zipfile.ZIP_STORED)
if zlib:
zipfp.writestr("b.txt", "hello world", compress_type=zipfile.ZIP_DEFLATED)
info = zipfp.getinfo('a.txt')
self.assertEqual(info.compress_type, zipfile.ZIP_STORED)
if zlib:
info = zipfp.getinfo('b.txt')
self.assertEqual(info.compress_type, zipfile.ZIP_DEFLATED)
def zip_test_writestr_permissions(self, f, compression):
# Make sure that writestr creates files with mode 0600,
# when it is passed a name rather than a ZipInfo instance.
self.make_test_archive(f, compression)
with zipfile.ZipFile(f, "r") as zipfp:
zinfo = zipfp.getinfo('strfile')
self.assertEqual(zinfo.external_attr, 0600 << 16)
def test_writestr_permissions(self):
for f in (TESTFN2, TemporaryFile(), StringIO()):
self.zip_test_writestr_permissions(f, zipfile.ZIP_STORED)
def test_close(self):
"""Check that the zipfile is closed after the 'with' block."""
with zipfile.ZipFile(TESTFN2, "w") as zipfp:
for fpath, fdata in SMALL_TEST_DATA:
zipfp.writestr(fpath, fdata)
self.assertTrue(zipfp.fp is not None, 'zipfp is not open')
self.assertTrue(zipfp.fp is None, 'zipfp is not closed')
with zipfile.ZipFile(TESTFN2, "r") as zipfp:
self.assertTrue(zipfp.fp is not None, 'zipfp is not open')
self.assertTrue(zipfp.fp is None, 'zipfp is not closed')
def test_close_on_exception(self):
"""Check that the zipfile is closed if an exception is raised in the
'with' block."""
with zipfile.ZipFile(TESTFN2, "w") as zipfp:
for fpath, fdata in SMALL_TEST_DATA:
zipfp.writestr(fpath, fdata)
try:
with zipfile.ZipFile(TESTFN2, "r") as zipfp2:
raise zipfile.BadZipfile()
except zipfile.BadZipfile:
self.assertTrue(zipfp2.fp is None, 'zipfp is not closed')
def test_add_file_before_1980(self):
# Set atime and mtime to 1970-01-01
os.utime(TESTFN, (0, 0))
with zipfile.ZipFile(TESTFN2, "w") as zipfp:
self.assertRaises(ValueError, zipfp.write, TESTFN)
def tearDown(self):
unlink(TESTFN)
unlink(TESTFN2)
class TestZip64InSmallFiles(unittest.TestCase):
# These tests test the ZIP64 functionality without using large files,
# see test_zipfile64 for proper tests.
def setUp(self):
self._limit = zipfile.ZIP64_LIMIT
zipfile.ZIP64_LIMIT = 5
line_gen = ("Test of zipfile line %d." % i
for i in range(0, FIXEDTEST_SIZE))
self.data = '\n'.join(line_gen)
# Make a source file with some lines
with open(TESTFN, "wb") as fp:
fp.write(self.data)
def large_file_exception_test(self, f, compression):
with zipfile.ZipFile(f, "w", compression) as zipfp:
self.assertRaises(zipfile.LargeZipFile,
zipfp.write, TESTFN, "another.name")
def large_file_exception_test2(self, f, compression):
with zipfile.ZipFile(f, "w", compression) as zipfp:
self.assertRaises(zipfile.LargeZipFile,
zipfp.writestr, "another.name", self.data)
def test_large_file_exception(self):
for f in (TESTFN2, TemporaryFile(), StringIO()):
self.large_file_exception_test(f, zipfile.ZIP_STORED)
self.large_file_exception_test2(f, zipfile.ZIP_STORED)
def zip_test(self, f, compression):
# Create the ZIP archive
with zipfile.ZipFile(f, "w", compression, allowZip64=True) as zipfp:
zipfp.write(TESTFN, "another.name")
zipfp.write(TESTFN, TESTFN)
zipfp.writestr("strfile", self.data)
# Read the ZIP archive
with zipfile.ZipFile(f, "r", compression) as zipfp:
self.assertEqual(zipfp.read(TESTFN), self.data)
self.assertEqual(zipfp.read("another.name"), self.data)
self.assertEqual(zipfp.read("strfile"), self.data)
# Print the ZIP directory
fp = StringIO()
stdout = sys.stdout
try:
sys.stdout = fp
zipfp.printdir()
finally:
sys.stdout = stdout
directory = fp.getvalue()
lines = directory.splitlines()
self.assertEqual(len(lines), 4) # Number of files + header
self.assertIn('File Name', lines[0])
self.assertIn('Modified', lines[0])
self.assertIn('Size', lines[0])
fn, date, time_, size = lines[1].split()
self.assertEqual(fn, 'another.name')
self.assertTrue(time.strptime(date, '%Y-%m-%d'))
self.assertTrue(time.strptime(time_, '%H:%M:%S'))
self.assertEqual(size, str(len(self.data)))
# Check the namelist
names = zipfp.namelist()
self.assertEqual(len(names), 3)
self.assertIn(TESTFN, names)
self.assertIn("another.name", names)
self.assertIn("strfile", names)
# Check infolist
infos = zipfp.infolist()
names = [i.filename for i in infos]
self.assertEqual(len(names), 3)
self.assertIn(TESTFN, names)
self.assertIn("another.name", names)
self.assertIn("strfile", names)
for i in infos:
self.assertEqual(i.file_size, len(self.data))
# check getinfo
for nm in (TESTFN, "another.name", "strfile"):
info = zipfp.getinfo(nm)
self.assertEqual(info.filename, nm)
self.assertEqual(info.file_size, len(self.data))
# Check that testzip doesn't raise an exception
zipfp.testzip()
def test_stored(self):
for f in (TESTFN2, TemporaryFile(), StringIO()):
self.zip_test(f, zipfile.ZIP_STORED)
@skipUnless(zlib, "requires zlib")
def test_deflated(self):
for f in (TESTFN2, TemporaryFile(), StringIO()):
self.zip_test(f, zipfile.ZIP_DEFLATED)
def test_absolute_arcnames(self):
with zipfile.ZipFile(TESTFN2, "w", zipfile.ZIP_STORED,
allowZip64=True) as zipfp:
zipfp.write(TESTFN, "/absolute")
with zipfile.ZipFile(TESTFN2, "r", zipfile.ZIP_STORED) as zipfp:
self.assertEqual(zipfp.namelist(), ["absolute"])
def tearDown(self):
zipfile.ZIP64_LIMIT = self._limit
unlink(TESTFN)
unlink(TESTFN2)
class PyZipFileTests(unittest.TestCase):
def test_write_pyfile(self):
with zipfile.PyZipFile(TemporaryFile(), "w") as zipfp:
fn = __file__
if fn.endswith('.pyc') or fn.endswith('.pyo'):
fn = fn[:-1]
zipfp.writepy(fn)
bn = os.path.basename(fn)
self.assertNotIn(bn, zipfp.namelist())
self.assertTrue(bn + 'o' in zipfp.namelist() or
bn + 'c' in zipfp.namelist())
with zipfile.PyZipFile(TemporaryFile(), "w") as zipfp:
fn = __file__
if fn.endswith(('.pyc', '.pyo')):
fn = fn[:-1]
zipfp.writepy(fn, "testpackage")
bn = "%s/%s" % ("testpackage", os.path.basename(fn))
self.assertNotIn(bn, zipfp.namelist())
self.assertTrue(bn + 'o' in zipfp.namelist() or
bn + 'c' in zipfp.namelist())
def test_write_python_package(self):
import email
packagedir = os.path.dirname(email.__file__)
with zipfile.PyZipFile(TemporaryFile(), "w") as zipfp:
zipfp.writepy(packagedir)
# Check for a couple of modules at different levels of the
# hierarchy
names = zipfp.namelist()
self.assertTrue('email/__init__.pyo' in names or
'email/__init__.pyc' in names)
self.assertTrue('email/mime/text.pyo' in names or
'email/mime/text.pyc' in names)
def test_write_python_directory(self):
os.mkdir(TESTFN2)
try:
with open(os.path.join(TESTFN2, "mod1.py"), "w") as fp:
fp.write("print(42)\n")
with open(os.path.join(TESTFN2, "mod2.py"), "w") as fp:
fp.write("print(42 * 42)\n")
with open(os.path.join(TESTFN2, "mod2.txt"), "w") as fp:
fp.write("bla bla bla\n")
zipfp = zipfile.PyZipFile(TemporaryFile(), "w")
zipfp.writepy(TESTFN2)
names = zipfp.namelist()
self.assertTrue('mod1.pyc' in names or 'mod1.pyo' in names)
self.assertTrue('mod2.pyc' in names or 'mod2.pyo' in names)
self.assertNotIn('mod2.txt', names)
finally:
shutil.rmtree(TESTFN2)
def test_write_non_pyfile(self):
with zipfile.PyZipFile(TemporaryFile(), "w") as zipfp:
open(TESTFN, 'w').write('most definitely not a python file')
self.assertRaises(RuntimeError, zipfp.writepy, TESTFN)
os.remove(TESTFN)
class OtherTests(unittest.TestCase):
zips_with_bad_crc = {
zipfile.ZIP_STORED: (
b'PK\003\004\024\0\0\0\0\0 \213\212;:r'
b'\253\377\f\0\0\0\f\0\0\0\005\0\0\000af'
b'ilehello,AworldP'
b'K\001\002\024\003\024\0\0\0\0\0 \213\212;:'
b'r\253\377\f\0\0\0\f\0\0\0\005\0\0\0\0'
b'\0\0\0\0\0\0\0\200\001\0\0\0\000afi'
b'lePK\005\006\0\0\0\0\001\0\001\0003\000'
b'\0\0/\0\0\0\0\0'),
zipfile.ZIP_DEFLATED: (
b'PK\x03\x04\x14\x00\x00\x00\x08\x00n}\x0c=FA'
b'KE\x10\x00\x00\x00n\x00\x00\x00\x05\x00\x00\x00af'
b'ile\xcbH\xcd\xc9\xc9W(\xcf/\xcaI\xc9\xa0'
b'=\x13\x00PK\x01\x02\x14\x03\x14\x00\x00\x00\x08\x00n'
b'}\x0c=FAKE\x10\x00\x00\x00n\x00\x00\x00\x05'
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x80\x01\x00\x00\x00'
b'\x00afilePK\x05\x06\x00\x00\x00\x00\x01\x00'
b'\x01\x003\x00\x00\x003\x00\x00\x00\x00\x00'),
}
def test_unicode_filenames(self):
with zipfile.ZipFile(TESTFN, "w") as zf:
zf.writestr(u"foo.txt", "Test for unicode filename")
zf.writestr(u"\xf6.txt", "Test for unicode filename")
self.assertIsInstance(zf.infolist()[0].filename, unicode)
with zipfile.ZipFile(TESTFN, "r") as zf:
self.assertEqual(zf.filelist[0].filename, "foo.txt")
self.assertEqual(zf.filelist[1].filename, u"\xf6.txt")
def test_create_non_existent_file_for_append(self):
if os.path.exists(TESTFN):
os.unlink(TESTFN)
filename = 'testfile.txt'
content = 'hello, world. this is some content.'
try:
with zipfile.ZipFile(TESTFN, 'a') as zf:
zf.writestr(filename, content)
except IOError:
self.fail('Could not append data to a non-existent zip file.')
self.assertTrue(os.path.exists(TESTFN))
with zipfile.ZipFile(TESTFN, 'r') as zf:
self.assertEqual(zf.read(filename), content)
def test_close_erroneous_file(self):
# This test checks that the ZipFile constructor closes the file object
# it opens if there's an error in the file. If it doesn't, the
# traceback holds a reference to the ZipFile object and, indirectly,
# the file object.
# On Windows, this causes the os.unlink() call to fail because the
# underlying file is still open. This is SF bug #412214.
#
with open(TESTFN, "w") as fp:
fp.write("this is not a legal zip file\n")
try:
zf = zipfile.ZipFile(TESTFN)
except zipfile.BadZipfile:
pass
def test_is_zip_erroneous_file(self):
"""Check that is_zipfile() correctly identifies non-zip files."""
# - passing a filename
with open(TESTFN, "w") as fp:
fp.write("this is not a legal zip file\n")
chk = zipfile.is_zipfile(TESTFN)
self.assertFalse(chk)
# - passing a file object
with open(TESTFN, "rb") as fp:
chk = zipfile.is_zipfile(fp)
self.assertTrue(not chk)
# - passing a file-like object
fp = StringIO()
fp.write("this is not a legal zip file\n")
chk = zipfile.is_zipfile(fp)
self.assertTrue(not chk)
fp.seek(0, 0)
chk = zipfile.is_zipfile(fp)
self.assertTrue(not chk)
def test_damaged_zipfile(self):
"""Check that zipfiles with missing bytes at the end raise BadZipFile."""
# - Create a valid zip file
fp = io.BytesIO()
with zipfile.ZipFile(fp, mode="w") as zipf:
zipf.writestr("foo.txt", b"O, for a Muse of Fire!")
zipfiledata = fp.getvalue()
# - Now create copies of it missing the last N bytes and make sure
# a BadZipFile exception is raised when we try to open it
for N in range(len(zipfiledata)):
fp = io.BytesIO(zipfiledata[:N])
self.assertRaises(zipfile.BadZipfile, zipfile.ZipFile, fp)
def test_is_zip_valid_file(self):
"""Check that is_zipfile() correctly identifies zip files."""
# - passing a filename
with zipfile.ZipFile(TESTFN, mode="w") as zipf:
zipf.writestr("foo.txt", "O, for a Muse of Fire!")
chk = zipfile.is_zipfile(TESTFN)
self.assertTrue(chk)
# - passing a file object
with open(TESTFN, "rb") as fp:
chk = zipfile.is_zipfile(fp)
self.assertTrue(chk)
fp.seek(0, 0)
zip_contents = fp.read()
# - passing a file-like object
fp = StringIO()
fp.write(zip_contents)
chk = zipfile.is_zipfile(fp)
self.assertTrue(chk)
fp.seek(0, 0)
chk = zipfile.is_zipfile(fp)
self.assertTrue(chk)
def test_non_existent_file_raises_IOError(self):
# make sure we don't raise an AttributeError when a partially-constructed
# ZipFile instance is finalized; this tests for regression on SF tracker
# bug #403871.
# The bug we're testing for caused an AttributeError to be raised
# when a ZipFile instance was created for a file that did not
# exist; the .fp member was not initialized but was needed by the
# __del__() method. Since the AttributeError is in the __del__(),
# it is ignored, but the user should be sufficiently annoyed by
# the message on the output that regression will be noticed
# quickly.
self.assertRaises(IOError, zipfile.ZipFile, TESTFN)
def test_empty_file_raises_BadZipFile(self):
with open(TESTFN, 'w') as f:
pass
self.assertRaises(zipfile.BadZipfile, zipfile.ZipFile, TESTFN)
with open(TESTFN, 'w') as fp:
fp.write("short file")
self.assertRaises(zipfile.BadZipfile, zipfile.ZipFile, TESTFN)
def test_closed_zip_raises_RuntimeError(self):
"""Verify that testzip() doesn't swallow inappropriate exceptions."""
data = StringIO()
with zipfile.ZipFile(data, mode="w") as zipf:
zipf.writestr("foo.txt", "O, for a Muse of Fire!")
# This is correct; calling .read on a closed ZipFile should raise
# a RuntimeError, and so should calling .testzip. An earlier
# version of .testzip would swallow this exception (and any other)
# and report that the first file in the archive was corrupt.
self.assertRaises(RuntimeError, zipf.read, "foo.txt")
self.assertRaises(RuntimeError, zipf.open, "foo.txt")
self.assertRaises(RuntimeError, zipf.testzip)
self.assertRaises(RuntimeError, zipf.writestr, "bogus.txt", "bogus")
open(TESTFN, 'w').write('zipfile test data')
self.assertRaises(RuntimeError, zipf.write, TESTFN)
def test_bad_constructor_mode(self):
"""Check that bad modes passed to ZipFile constructor are caught."""
self.assertRaises(RuntimeError, zipfile.ZipFile, TESTFN, "q")
def test_bad_open_mode(self):
"""Check that bad modes passed to ZipFile.open are caught."""
with zipfile.ZipFile(TESTFN, mode="w") as zipf:
zipf.writestr("foo.txt", "O, for a Muse of Fire!")
with zipfile.ZipFile(TESTFN, mode="r") as zipf:
# read the data to make sure the file is there
zipf.read("foo.txt")
self.assertRaises(RuntimeError, zipf.open, "foo.txt", "q")
def test_read0(self):
"""Check that calling read(0) on a ZipExtFile object returns an empty
string and doesn't advance file pointer."""
with zipfile.ZipFile(TESTFN, mode="w") as zipf:
zipf.writestr("foo.txt", "O, for a Muse of Fire!")
# read the data to make sure the file is there
with zipf.open("foo.txt") as f:
for i in xrange(FIXEDTEST_SIZE):
self.assertEqual(f.read(0), '')
self.assertEqual(f.read(), "O, for a Muse of Fire!")
def test_open_non_existent_item(self):
"""Check that attempting to call open() for an item that doesn't
exist in the archive raises a RuntimeError."""
with zipfile.ZipFile(TESTFN, mode="w") as zipf:
self.assertRaises(KeyError, zipf.open, "foo.txt", "r")
def test_bad_compression_mode(self):
"""Check that bad compression methods passed to ZipFile.open are
caught."""
self.assertRaises(RuntimeError, zipfile.ZipFile, TESTFN, "w", -1)
def test_unsupported_compression(self):
# data is declared as shrunk, but actually deflated
data = (b'PK\x03\x04.\x00\x00\x00\x01\x00\xe4C\xa1@\x00\x00\x00'
b'\x00\x02\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00x\x03\x00PK\x01'
b'\x02.\x03.\x00\x00\x00\x01\x00\xe4C\xa1@\x00\x00\x00\x00\x02\x00\x00'
b'\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x80\x01\x00\x00\x00\x00xPK\x05\x06\x00\x00\x00\x00\x01\x00\x01\x00'
b'/\x00\x00\x00!\x00\x00\x00\x00\x00')
with zipfile.ZipFile(io.BytesIO(data), 'r') as zipf:
self.assertRaises(NotImplementedError, zipf.open, 'x')
def test_null_byte_in_filename(self):
"""Check that a filename containing a null byte is properly
terminated."""
with zipfile.ZipFile(TESTFN, mode="w") as zipf:
zipf.writestr("foo.txt\x00qqq", "O, for a Muse of Fire!")
self.assertEqual(zipf.namelist(), ['foo.txt'])
def test_struct_sizes(self):
"""Check that ZIP internal structure sizes are calculated correctly."""
self.assertEqual(zipfile.sizeEndCentDir, 22)
self.assertEqual(zipfile.sizeCentralDir, 46)
self.assertEqual(zipfile.sizeEndCentDir64, 56)
self.assertEqual(zipfile.sizeEndCentDir64Locator, 20)
def test_comments(self):
"""Check that comments on the archive are handled properly."""
# check default comment is empty
with zipfile.ZipFile(TESTFN, mode="w") as zipf:
self.assertEqual(zipf.comment, '')
zipf.writestr("foo.txt", "O, for a Muse of Fire!")
with zipfile.ZipFile(TESTFN, mode="r") as zipf:
self.assertEqual(zipf.comment, '')
# check a simple short comment
comment = 'Bravely taking to his feet, he beat a very brave retreat.'
with zipfile.ZipFile(TESTFN, mode="w") as zipf:
zipf.comment = comment
zipf.writestr("foo.txt", "O, for a Muse of Fire!")
with zipfile.ZipFile(TESTFN, mode="r") as zipf:
self.assertEqual(zipf.comment, comment)
# check a comment of max length
comment2 = ''.join(['%d' % (i**3 % 10) for i in xrange((1 << 16)-1)])
with zipfile.ZipFile(TESTFN, mode="w") as zipf:
zipf.comment = comment2
zipf.writestr("foo.txt", "O, for a Muse of Fire!")
with zipfile.ZipFile(TESTFN, mode="r") as zipf:
self.assertEqual(zipf.comment, comment2)
# check a comment that is too long is truncated
with zipfile.ZipFile(TESTFN, mode="w") as zipf:
zipf.comment = comment2 + 'oops'
zipf.writestr("foo.txt", "O, for a Muse of Fire!")
with zipfile.ZipFile(TESTFN, mode="r") as zipf:
self.assertEqual(zipf.comment, comment2)
def test_change_comment_in_empty_archive(self):
with zipfile.ZipFile(TESTFN, "a", zipfile.ZIP_STORED) as zipf:
self.assertFalse(zipf.filelist)
zipf.comment = b"this is a comment"
with zipfile.ZipFile(TESTFN, "r") as zipf:
self.assertEqual(zipf.comment, b"this is a comment")
def test_change_comment_in_nonempty_archive(self):
with zipfile.ZipFile(TESTFN, "w", zipfile.ZIP_STORED) as zipf:
zipf.writestr("foo.txt", "O, for a Muse of Fire!")
with zipfile.ZipFile(TESTFN, "a", zipfile.ZIP_STORED) as zipf:
self.assertTrue(zipf.filelist)
zipf.comment = b"this is a comment"
with zipfile.ZipFile(TESTFN, "r") as zipf:
self.assertEqual(zipf.comment, b"this is a comment")
def check_testzip_with_bad_crc(self, compression):
"""Tests that files with bad CRCs return their name from testzip."""
zipdata = self.zips_with_bad_crc[compression]
with zipfile.ZipFile(io.BytesIO(zipdata), mode="r") as zipf:
# testzip returns the name of the first corrupt file, or None
self.assertEqual('afile', zipf.testzip())
def test_testzip_with_bad_crc_stored(self):
self.check_testzip_with_bad_crc(zipfile.ZIP_STORED)
@skipUnless(zlib, "requires zlib")
def test_testzip_with_bad_crc_deflated(self):
self.check_testzip_with_bad_crc(zipfile.ZIP_DEFLATED)
def check_read_with_bad_crc(self, compression):
"""Tests that files with bad CRCs raise a BadZipfile exception when read."""
zipdata = self.zips_with_bad_crc[compression]
# Using ZipFile.read()
with zipfile.ZipFile(io.BytesIO(zipdata), mode="r") as zipf:
self.assertRaises(zipfile.BadZipfile, zipf.read, 'afile')
# Using ZipExtFile.read()
with zipfile.ZipFile(io.BytesIO(zipdata), mode="r") as zipf:
with zipf.open('afile', 'r') as corrupt_file:
self.assertRaises(zipfile.BadZipfile, corrupt_file.read)
# Same with small reads (in order to exercise the buffering logic)
with zipfile.ZipFile(io.BytesIO(zipdata), mode="r") as zipf:
with zipf.open('afile', 'r') as corrupt_file:
corrupt_file.MIN_READ_SIZE = 2
with self.assertRaises(zipfile.BadZipfile):
while corrupt_file.read(2):
pass
def test_read_with_bad_crc_stored(self):
self.check_read_with_bad_crc(zipfile.ZIP_STORED)
@skipUnless(zlib, "requires zlib")
def test_read_with_bad_crc_deflated(self):
self.check_read_with_bad_crc(zipfile.ZIP_DEFLATED)
def check_read_return_size(self, compression):
# Issue #9837: ZipExtFile.read() shouldn't return more bytes
# than requested.
for test_size in (1, 4095, 4096, 4097, 16384):
file_size = test_size + 1
junk = b''.join(struct.pack('B', randint(0, 255))
for x in range(file_size))
with zipfile.ZipFile(io.BytesIO(), "w", compression) as zipf:
zipf.writestr('foo', junk)
with zipf.open('foo', 'r') as fp:
buf = fp.read(test_size)
self.assertEqual(len(buf), test_size)
def test_read_return_size_stored(self):
self.check_read_return_size(zipfile.ZIP_STORED)
@skipUnless(zlib, "requires zlib")
def test_read_return_size_deflated(self):
self.check_read_return_size(zipfile.ZIP_DEFLATED)
def test_empty_zipfile(self):
# Check that creating a file in 'w' or 'a' mode and closing without
# adding any files to the archives creates a valid empty ZIP file
with zipfile.ZipFile(TESTFN, mode="w") as zipf:
pass
try:
zipf = zipfile.ZipFile(TESTFN, mode="r")
except zipfile.BadZipfile:
self.fail("Unable to create empty ZIP file in 'w' mode")
with zipfile.ZipFile(TESTFN, mode="a") as zipf:
pass
try:
zipf = zipfile.ZipFile(TESTFN, mode="r")
except:
self.fail("Unable to create empty ZIP file in 'a' mode")
def test_open_empty_file(self):
# Issue 1710703: Check that opening a file with less than 22 bytes
# raises a BadZipfile exception (rather than the previously unhelpful
# IOError)
with open(TESTFN, 'w') as f:
pass
self.assertRaises(zipfile.BadZipfile, zipfile.ZipFile, TESTFN, 'r')
def test_create_zipinfo_before_1980(self):
self.assertRaises(ValueError,
zipfile.ZipInfo, 'seventies', (1979, 1, 1, 0, 0, 0))
def tearDown(self):
unlink(TESTFN)
unlink(TESTFN2)
class DecryptionTests(unittest.TestCase):
"""Check that ZIP decryption works. Since the library does not
support encryption at the moment, we use a pre-generated encrypted
ZIP file."""
data = (
'PK\x03\x04\x14\x00\x01\x00\x00\x00n\x92i.#y\xef?&\x00\x00\x00\x1a\x00'
'\x00\x00\x08\x00\x00\x00test.txt\xfa\x10\xa0gly|\xfa-\xc5\xc0=\xf9y'
'\x18\xe0\xa8r\xb3Z}Lg\xbc\xae\xf9|\x9b\x19\xe4\x8b\xba\xbb)\x8c\xb0\xdbl'
'PK\x01\x02\x14\x00\x14\x00\x01\x00\x00\x00n\x92i.#y\xef?&\x00\x00\x00'
'\x1a\x00\x00\x00\x08\x00\x00\x00\x00\x00\x00\x00\x01\x00 \x00\xb6\x81'
'\x00\x00\x00\x00test.txtPK\x05\x06\x00\x00\x00\x00\x01\x00\x01\x006\x00'
'\x00\x00L\x00\x00\x00\x00\x00' )
data2 = (
'PK\x03\x04\x14\x00\t\x00\x08\x00\xcf}38xu\xaa\xb2\x14\x00\x00\x00\x00\x02'
'\x00\x00\x04\x00\x15\x00zeroUT\t\x00\x03\xd6\x8b\x92G\xda\x8b\x92GUx\x04'
'\x00\xe8\x03\xe8\x03\xc7<M\xb5a\xceX\xa3Y&\x8b{oE\xd7\x9d\x8c\x98\x02\xc0'
'PK\x07\x08xu\xaa\xb2\x14\x00\x00\x00\x00\x02\x00\x00PK\x01\x02\x17\x03'
'\x14\x00\t\x00\x08\x00\xcf}38xu\xaa\xb2\x14\x00\x00\x00\x00\x02\x00\x00'
'\x04\x00\r\x00\x00\x00\x00\x00\x00\x00\x00\x00\xa4\x81\x00\x00\x00\x00ze'
'roUT\x05\x00\x03\xd6\x8b\x92GUx\x00\x00PK\x05\x06\x00\x00\x00\x00\x01'
'\x00\x01\x00?\x00\x00\x00[\x00\x00\x00\x00\x00' )
plain = 'zipfile.py encryption test'
plain2 = '\x00'*512
def setUp(self):
with open(TESTFN, "wb") as fp:
fp.write(self.data)
self.zip = zipfile.ZipFile(TESTFN, "r")
with open(TESTFN2, "wb") as fp:
fp.write(self.data2)
self.zip2 = zipfile.ZipFile(TESTFN2, "r")
def tearDown(self):
self.zip.close()
os.unlink(TESTFN)
self.zip2.close()
os.unlink(TESTFN2)
def test_no_password(self):
# Reading the encrypted file without password
# must generate a RunTime exception
self.assertRaises(RuntimeError, self.zip.read, "test.txt")
self.assertRaises(RuntimeError, self.zip2.read, "zero")
def test_bad_password(self):
self.zip.setpassword("perl")
self.assertRaises(RuntimeError, self.zip.read, "test.txt")
self.zip2.setpassword("perl")
self.assertRaises(RuntimeError, self.zip2.read, "zero")
@skipUnless(zlib, "requires zlib")
def test_good_password(self):
self.zip.setpassword("python")
self.assertEqual(self.zip.read("test.txt"), self.plain)
self.zip2.setpassword("12345")
self.assertEqual(self.zip2.read("zero"), self.plain2)
class TestsWithRandomBinaryFiles(unittest.TestCase):
def setUp(self):
datacount = randint(16, 64)*1024 + randint(1, 1024)
self.data = ''.join(struct.pack('<f', random()*randint(-1000, 1000))
for i in xrange(datacount))
# Make a source file with some lines
with open(TESTFN, "wb") as fp:
fp.write(self.data)
def tearDown(self):
unlink(TESTFN)
unlink(TESTFN2)
def make_test_archive(self, f, compression):
# Create the ZIP archive
with zipfile.ZipFile(f, "w", compression) as zipfp:
zipfp.write(TESTFN, "another.name")
zipfp.write(TESTFN, TESTFN)
def zip_test(self, f, compression):
self.make_test_archive(f, compression)
# Read the ZIP archive
with zipfile.ZipFile(f, "r", compression) as zipfp:
testdata = zipfp.read(TESTFN)
self.assertEqual(len(testdata), len(self.data))
self.assertEqual(testdata, self.data)
self.assertEqual(zipfp.read("another.name"), self.data)
def test_stored(self):
for f in (TESTFN2, TemporaryFile(), StringIO()):
self.zip_test(f, zipfile.ZIP_STORED)
@skipUnless(zlib, "requires zlib")
def test_deflated(self):
for f in (TESTFN2, TemporaryFile(), io.BytesIO()):
self.zip_test(f, zipfile.ZIP_DEFLATED)
def zip_open_test(self, f, compression):
self.make_test_archive(f, compression)
# Read the ZIP archive
with zipfile.ZipFile(f, "r", compression) as zipfp:
zipdata1 = []
with zipfp.open(TESTFN) as zipopen1:
while True:
read_data = zipopen1.read(256)
if not read_data:
break
zipdata1.append(read_data)
zipdata2 = []
with zipfp.open("another.name") as zipopen2:
while True:
read_data = zipopen2.read(256)
if not read_data:
break
zipdata2.append(read_data)
testdata1 = ''.join(zipdata1)
self.assertEqual(len(testdata1), len(self.data))
self.assertEqual(testdata1, self.data)
testdata2 = ''.join(zipdata2)
self.assertEqual(len(testdata2), len(self.data))
self.assertEqual(testdata2, self.data)
def test_open_stored(self):
for f in (TESTFN2, TemporaryFile(), StringIO()):
self.zip_open_test(f, zipfile.ZIP_STORED)
@skipUnless(zlib, "requires zlib")
def test_open_deflated(self):
for f in (TESTFN2, TemporaryFile(), io.BytesIO()):
self.zip_open_test(f, zipfile.ZIP_DEFLATED)
def zip_random_open_test(self, f, compression):
self.make_test_archive(f, compression)
# Read the ZIP archive
with zipfile.ZipFile(f, "r", compression) as zipfp:
zipdata1 = []
with zipfp.open(TESTFN) as zipopen1:
while True:
read_data = zipopen1.read(randint(1, 1024))
if not read_data:
break
zipdata1.append(read_data)
testdata = ''.join(zipdata1)
self.assertEqual(len(testdata), len(self.data))
self.assertEqual(testdata, self.data)
def test_random_open_stored(self):
for f in (TESTFN2, TemporaryFile(), StringIO()):
self.zip_random_open_test(f, zipfile.ZIP_STORED)
@skipUnless(zlib, "requires zlib")
def test_random_open_deflated(self):
for f in (TESTFN2, TemporaryFile(), io.BytesIO()):
self.zip_random_open_test(f, zipfile.ZIP_DEFLATED)
@skipUnless(zlib, "requires zlib")
class TestsWithMultipleOpens(unittest.TestCase):
def setUp(self):
# Create the ZIP archive
with zipfile.ZipFile(TESTFN2, "w", zipfile.ZIP_DEFLATED) as zipfp:
zipfp.writestr('ones', '1'*FIXEDTEST_SIZE)
zipfp.writestr('twos', '2'*FIXEDTEST_SIZE)
def test_same_file(self):
# Verify that (when the ZipFile is in control of creating file objects)
# multiple open() calls can be made without interfering with each other.
with zipfile.ZipFile(TESTFN2, mode="r") as zipf:
zopen1 = zipf.open('ones')
zopen2 = zipf.open('ones')
data1 = zopen1.read(500)
data2 = zopen2.read(500)
data1 += zopen1.read(500)
data2 += zopen2.read(500)
self.assertEqual(data1, data2)
def test_different_file(self):
# Verify that (when the ZipFile is in control of creating file objects)
# multiple open() calls can be made without interfering with each other.
with zipfile.ZipFile(TESTFN2, mode="r") as zipf:
with zipf.open('ones') as zopen1, zipf.open('twos') as zopen2:
data1 = zopen1.read(500)
data2 = zopen2.read(500)
data1 += zopen1.read(500)
data2 += zopen2.read(500)
self.assertEqual(data1, '1'*FIXEDTEST_SIZE)
self.assertEqual(data2, '2'*FIXEDTEST_SIZE)
def test_interleaved(self):
# Verify that (when the ZipFile is in control of creating file objects)
# multiple open() calls can be made without interfering with each other.
with zipfile.ZipFile(TESTFN2, mode="r") as zipf:
with zipf.open('ones') as zopen1, zipf.open('twos') as zopen2:
data1 = zopen1.read(500)
data2 = zopen2.read(500)
data1 += zopen1.read(500)
data2 += zopen2.read(500)
self.assertEqual(data1, '1'*FIXEDTEST_SIZE)
self.assertEqual(data2, '2'*FIXEDTEST_SIZE)
def tearDown(self):
unlink(TESTFN2)
class TestWithDirectory(unittest.TestCase):
def setUp(self):
os.mkdir(TESTFN2)
def test_extract_dir(self):
with zipfile.ZipFile(findfile("zipdir.zip")) as zipf:
zipf.extractall(TESTFN2)
self.assertTrue(os.path.isdir(os.path.join(TESTFN2, "a")))
self.assertTrue(os.path.isdir(os.path.join(TESTFN2, "a", "b")))
self.assertTrue(os.path.exists(os.path.join(TESTFN2, "a", "b", "c")))
def test_bug_6050(self):
# Extraction should succeed if directories already exist
os.mkdir(os.path.join(TESTFN2, "a"))
self.test_extract_dir()
def test_store_dir(self):
os.mkdir(os.path.join(TESTFN2, "x"))
zipf = zipfile.ZipFile(TESTFN, "w")
zipf.write(os.path.join(TESTFN2, "x"), "x")
self.assertTrue(zipf.filelist[0].filename.endswith("x/"))
def tearDown(self):
shutil.rmtree(TESTFN2)
if os.path.exists(TESTFN):
unlink(TESTFN)
class UniversalNewlineTests(unittest.TestCase):
def setUp(self):
self.line_gen = ["Test of zipfile line %d." % i
for i in xrange(FIXEDTEST_SIZE)]
self.seps = ('\r', '\r\n', '\n')
self.arcdata, self.arcfiles = {}, {}
for n, s in enumerate(self.seps):
self.arcdata[s] = s.join(self.line_gen) + s
self.arcfiles[s] = '%s-%d' % (TESTFN, n)
open(self.arcfiles[s], "wb").write(self.arcdata[s])
def make_test_archive(self, f, compression):
# Create the ZIP archive
with zipfile.ZipFile(f, "w", compression) as zipfp:
for fn in self.arcfiles.values():
zipfp.write(fn, fn)
def read_test(self, f, compression):
self.make_test_archive(f, compression)
# Read the ZIP archive
with zipfile.ZipFile(f, "r") as zipfp:
for sep, fn in self.arcfiles.items():
with zipfp.open(fn, "rU") as fp:
zipdata = fp.read()
self.assertEqual(self.arcdata[sep], zipdata)
def readline_read_test(self, f, compression):
self.make_test_archive(f, compression)
# Read the ZIP archive
zipfp = zipfile.ZipFile(f, "r")
for sep, fn in self.arcfiles.items():
with zipfp.open(fn, "rU") as zipopen:
data = ''
while True:
read = zipopen.readline()
if not read:
break
data += read
read = zipopen.read(5)
if not read:
break
data += read
self.assertEqual(data, self.arcdata['\n'])
zipfp.close()
def readline_test(self, f, compression):
self.make_test_archive(f, compression)
# Read the ZIP archive
with zipfile.ZipFile(f, "r") as zipfp:
for sep, fn in self.arcfiles.items():
with zipfp.open(fn, "rU") as zipopen:
for line in self.line_gen:
linedata = zipopen.readline()
self.assertEqual(linedata, line + '\n')
def readlines_test(self, f, compression):
self.make_test_archive(f, compression)
# Read the ZIP archive
with zipfile.ZipFile(f, "r") as zipfp:
for sep, fn in self.arcfiles.items():
with zipfp.open(fn, "rU") as fp:
ziplines = fp.readlines()
for line, zipline in zip(self.line_gen, ziplines):
self.assertEqual(zipline, line + '\n')
def iterlines_test(self, f, compression):
self.make_test_archive(f, compression)
# Read the ZIP archive
with zipfile.ZipFile(f, "r") as zipfp:
for sep, fn in self.arcfiles.items():
for line, zipline in zip(self.line_gen, zipfp.open(fn, "rU")):
self.assertEqual(zipline, line + '\n')
def test_read_stored(self):
for f in (TESTFN2, TemporaryFile(), StringIO()):
self.read_test(f, zipfile.ZIP_STORED)
def test_readline_read_stored(self):
# Issue #7610: calls to readline() interleaved with calls to read().
for f in (TESTFN2, TemporaryFile(), StringIO()):
self.readline_read_test(f, zipfile.ZIP_STORED)
def test_readline_stored(self):
for f in (TESTFN2, TemporaryFile(), StringIO()):
self.readline_test(f, zipfile.ZIP_STORED)
def test_readlines_stored(self):
for f in (TESTFN2, TemporaryFile(), StringIO()):
self.readlines_test(f, zipfile.ZIP_STORED)
def test_iterlines_stored(self):
for f in (TESTFN2, TemporaryFile(), StringIO()):
self.iterlines_test(f, zipfile.ZIP_STORED)
@skipUnless(zlib, "requires zlib")
def test_read_deflated(self):
for f in (TESTFN2, TemporaryFile(), StringIO()):
self.read_test(f, zipfile.ZIP_DEFLATED)
@skipUnless(zlib, "requires zlib")
def test_readline_read_deflated(self):
# Issue #7610: calls to readline() interleaved with calls to read().
for f in (TESTFN2, TemporaryFile(), StringIO()):
self.readline_read_test(f, zipfile.ZIP_DEFLATED)
@skipUnless(zlib, "requires zlib")
def test_readline_deflated(self):
for f in (TESTFN2, TemporaryFile(), StringIO()):
self.readline_test(f, zipfile.ZIP_DEFLATED)
@skipUnless(zlib, "requires zlib")
def test_readlines_deflated(self):
for f in (TESTFN2, TemporaryFile(), StringIO()):
self.readlines_test(f, zipfile.ZIP_DEFLATED)
@skipUnless(zlib, "requires zlib")
def test_iterlines_deflated(self):
for f in (TESTFN2, TemporaryFile(), StringIO()):
self.iterlines_test(f, zipfile.ZIP_DEFLATED)
def tearDown(self):
for sep, fn in self.arcfiles.items():
os.remove(fn)
unlink(TESTFN)
unlink(TESTFN2)
def test_main():
run_unittest(TestsWithSourceFile, TestZip64InSmallFiles, OtherTests,
PyZipFileTests, DecryptionTests, TestsWithMultipleOpens,
TestWithDirectory, UniversalNewlineTests,
TestsWithRandomBinaryFiles)
if __name__ == "__main__":
test_main()
| gpl-3.0 |
Wuteyan/VTK | Common/Testing/WindowsMangleList.py | 37 | 22481 | #!/usr/bin/env python
## /*=========================================================================
## Program: Visualization Toolkit
## Module: WindowsMangleList.py
## Copyright (c) Ken Martin, Will Schroeder, Bill Lorensen
## All rights reserved.
## See Copyright.txt or http://www.kitware.com/Copyright.htm for details.
## This software is distributed WITHOUT ANY WARRANTY; without even
## the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
## PURPOSE. See the above copyright notice for more information.
## =========================================================================*/
## .NAME WindowsMangleList - List of method names mangled by windows.h.
## .SECTION Description
## WindowsMangleList is a script imported by HeaderTesting to get a regular
## expression matching method names that will be mangled by windows.h.
## See VTK/Utilities/Upgrading/README.WindowsMangling.txt for details
## on the windows.h mangling problems.
import re
# NOTE: The lists below are up-to-date for windows.h versions up to
# Visual Studio .NET 2003. Additional names may be added by future
# versions of windows.h.
#
# The set of macros defined by windows.h was obtained with the command
#
# gccxml --gccxml-compiler msvc71 -E win.cxx -dM
#
# where "win.cxx" contains the line "#include <windows.h>". Some
# filtering for names of the VTK naming convention was then applied.
# Names defined as macros by windows.h.
WindowsMangleNames = [
'AbnormalTermination',
'AbortSystemShutdown',
'AccessCheckAndAuditAlarm',
'AddAtom',
'AddFontResource',
'AddForm',
'AddJob',
'AddMonitor',
'AddPort',
'AddPrintProcessor',
'AddPrintProvidor',
'AddPrinter',
'AddPrinterConnection',
'AddPrinterDriver',
'AddPrinterDriverEx',
'AdvancedDocumentProperties',
'AnsiLower',
'AnsiLowerBuff',
'AnsiNext',
'AnsiPrev',
'AnsiToOem',
'AnsiToOemBuff',
'AnsiUpper',
'AnsiUpperBuff',
'AppendMenu',
'BackupEventLog',
'BeginUpdateResource',
'BuildCommDCB',
'BuildCommDCBAndTimeouts',
'CallMsgFilter',
'CallNamedPipe',
'CallWindowProc',
'CdChangerClassGuid',
'CdRomClassGuid',
'CertAddEncodedCertificateToSystemStore',
'CertGetNameString',
'CertNameToStr',
'CertOpenSystemStore',
'CertRDNValueToStr',
'CertStrToName',
'ChangeDisplaySettings',
'ChangeDisplaySettingsEx',
'ChangeMenu',
'ChangeServiceConfig',
'ChangeServiceConfig2',
'CharLower',
'CharLowerBuff',
'CharNext',
'CharPrev',
'CharToOem',
'CharToOemBuff',
'CharUpper',
'CharUpperBuff',
'ChooseColor',
'ChooseFont',
'ClearEventLog',
'CommConfigDialog',
'CommDlg_OpenSave_GetFilePath',
'CommDlg_OpenSave_GetFilePathA',
'CommDlg_OpenSave_GetFilePathW',
'CommDlg_OpenSave_GetFolderIDList',
'CommDlg_OpenSave_GetFolderPath',
'CommDlg_OpenSave_GetFolderPathA',
'CommDlg_OpenSave_GetFolderPathW',
'CommDlg_OpenSave_GetSpec',
'CommDlg_OpenSave_GetSpecA',
'CommDlg_OpenSave_GetSpecW',
'CommDlg_OpenSave_HideControl',
'CommDlg_OpenSave_SetControlText',
'CommDlg_OpenSave_SetDefExt',
'CompareString',
'ConfigurePort',
'CopyAcceleratorTable',
'CopyCursor',
'CopyEnhMetaFile',
'CopyFile',
'CopyMemory',
'CopyMetaFile',
'CreateAcceleratorTable',
'CreateColorSpace',
'CreateDC',
'CreateDesktop',
'CreateDialog',
'CreateDialogA',
'CreateDialogIndirect',
'CreateDialogIndirectA',
'CreateDialogIndirectParam',
'CreateDialogIndirectW',
'CreateDialogParam',
'CreateDialogW',
'CreateDirectory',
'CreateDirectoryEx',
'CreateEnhMetaFile',
'CreateEvent',
'CreateFile',
'CreateFileMapping',
'CreateFont',
'CreateFontIndirect',
'CreateIC',
'CreateMDIWindow',
'CreateMailslot',
'CreateMetaFile',
'CreateMutex',
'CreateNamedPipe',
'CreateProcess',
'CreateProcessAsUser',
'CreatePropertySheetPage',
'CreateScalableFontResource',
'CreateSemaphore',
'CreateService',
'CreateWindow',
'CreateWindowA',
'CreateWindowEx',
'CreateWindowStation',
'CreateWindowW',
'CryptAcquireContext',
'CryptBinaryToString',
'CryptEnumProviderTypes',
'CryptEnumProviders',
'CryptGetDefaultProvider',
'CryptRetrieveObjectByUrl',
'CryptSetProvider',
'CryptSetProviderEx',
'CryptSignHash',
'CryptStringToBinary',
'CryptVerifySignature',
'DceErrorInqText',
'DdeCreateStringHandle',
'DdeInitialize',
'DdeQueryString',
'DecryptFile',
'DefDlgProc',
'DefFrameProc',
'DefHookProc',
'DefMDIChildProc',
'DefWindowProc',
'DefineDosDevice',
'DefineHandleTable',
'DeleteFile',
'DeleteForm',
'DeleteMonitor',
'DeletePort',
'DeletePrintProcessor',
'DeletePrintProvidor',
'DeletePrinterConnection',
'DeletePrinterData',
'DeletePrinterDataEx',
'DeletePrinterDriver',
'DeletePrinterDriverEx',
'DeletePrinterKey',
'DeviceCapabilities',
'DialogBox',
'DialogBoxA',
'DialogBoxIndirect',
'DialogBoxIndirectA',
'DialogBoxIndirectParam',
'DialogBoxIndirectW',
'DialogBoxParam',
'DialogBoxW',
'DiskClassGuid',
'DispatchMessage',
'DlgDirList',
'DlgDirListComboBox',
'DlgDirSelectComboBoxEx',
'DlgDirSelectEx',
'DoEnvironmentSubst',
'DocumentProperties',
'DragQueryFile',
'DrawState',
'DrawText',
'DrawTextEx',
'EncryptFile',
'EndUpdateResource',
'EnumCalendarInfo',
'EnumCalendarInfoEx',
'EnumDateFormats',
'EnumDateFormatsEx',
'EnumDependentServices',
'EnumDesktops',
'EnumDisplayDevices',
'EnumDisplaySettings',
'EnumDisplaySettingsEx',
'EnumFontFamilies',
'EnumFontFamiliesEx',
'EnumFonts',
'EnumForms',
'EnumICMProfiles',
'EnumJobs',
'EnumLanguageGroupLocales',
'EnumMonitors',
'EnumPorts',
'EnumPrintProcessorDatatypes',
'EnumPrintProcessors',
'EnumPrinterData',
'EnumPrinterDataEx',
'EnumPrinterDrivers',
'EnumPrinterKey',
'EnumPrinters',
'EnumProps',
'EnumPropsEx',
'EnumResourceLanguages',
'EnumResourceNames',
'EnumResourceTypes',
'EnumServicesStatus',
'EnumServicesStatusEx',
'EnumSystemCodePages',
'EnumSystemLanguageGroups',
'EnumSystemLocales',
'EnumTaskWindows',
'EnumTimeFormats',
'EnumUILanguages',
'EnumWindowStations',
'ExitWindows',
'ExpandEnvironmentStrings',
'ExtTextOut',
'ExtractAssociatedIcon',
'ExtractAssociatedIconEx',
'ExtractIcon',
'ExtractIconEx',
'FatalAppExit',
'FileEncryptionStatus',
'FillConsoleOutputCharacter',
'FillMemory',
'FindAtom',
'FindExecutable',
'FindFirstChangeNotification',
'FindFirstFile',
'FindNextFile',
'FindResource',
'FindResourceEx',
'FindText',
'FindWindow',
'FindWindowEx',
'FloppyClassGuid',
'FoldString',
'FormatMessage',
'FreeEnvironmentStrings',
'FreeModule',
'FreeProcInstance',
'GetAltTabInfo',
'GetAtomName',
'GetBValue',
'GetBinaryType',
'GetCPInfoEx',
'GetCValue',
'GetCalendarInfo',
'GetCharABCWidths',
'GetCharABCWidthsFloat',
'GetCharWidth',
'GetCharWidth32',
'GetCharWidthFloat',
'GetCharacterPlacement',
'GetClassInfo',
'GetClassInfoEx',
'GetClassLong',
'GetClassLongPtr',
'GetClassLongPtrA',
'GetClassLongPtrW',
#'GetClassName', # Leave out GetClassName for now.
'GetClipboardFormatName',
'GetCommandLine',
'GetCompressedFileSize',
'GetComputerName',
'GetConsoleTitle',
'GetCurrencyFormat',
'GetCurrentDirectory',
'GetCurrentTime',
'GetDateFormat',
'GetDefaultCommConfig',
'GetDefaultPrinter',
'GetDiskFreeSpace',
'GetDiskFreeSpaceEx',
'GetDlgItemText',
'GetDriveType',
'GetEnhMetaFile',
'GetEnhMetaFileDescription',
'GetEnvironmentStringsA',
'GetEnvironmentVariable',
'GetExceptionCode',
'GetExceptionInformation',
'GetExpandedName',
'GetFileAttributes',
'GetFileAttributesEx',
'GetFileSecurity',
'GetFileTitle',
'GetFileVersionInfo',
'GetFileVersionInfoSize',
'GetFirmwareEnvironmentVariable',
'GetForm',
'GetFreeSpace',
'GetFullPathName',
'GetGValue',
'GetGeoInfo',
'GetGlyphOutline',
'GetICMProfile',
'GetJob',
'GetKValue',
'GetKerningPairs',
'GetKeyNameText',
'GetKeyboardLayoutName',
'GetLocaleInfo',
'GetLogColorSpace',
'GetLogicalDriveStrings',
'GetLongPathName',
'GetMValue',
'GetMenuItemInfo',
'GetMenuString',
'GetMessage',
'GetMetaFile',
'GetModuleFileName',
'GetModuleHandle',
'GetMonitorInfo',
'GetNamedPipeHandleState',
'GetNextWindow',
'GetNumberFormat',
'GetObject',
'GetOpenCardName',
'GetOpenFileName',
'GetOutlineTextMetrics',
'GetPrintProcessorDirectory',
'GetPrinter',
'GetPrinterData',
'GetPrinterDataEx',
'GetPrinterDriver',
'GetPrinterDriverDirectory',
'GetPrivateProfileInt',
'GetPrivateProfileSection',
'GetPrivateProfileSectionNames',
'GetPrivateProfileString',
'GetPrivateProfileStruct',
'GetProfileInt',
'GetProfileSection',
'GetProfileString',
'GetProp',
'GetRValue',
'GetSaveFileName',
'GetScode',
'GetServiceDisplayName',
'GetServiceKeyName',
'GetShortPathName',
'GetStartupInfo',
'GetStringTypeEx',
'GetSysModalWindow',
'GetSystemDirectory',
'GetSystemWindowsDirectory',
'GetTabbedTextExtent',
'GetTempFileName',
'GetTempPath',
'GetTextExtentExPoint',
'GetTextExtentPoint',
'GetTextExtentPoint32',
'GetTextFace',
'GetTextMetrics',
'GetTimeFormat',
'GetUrlPolicyPermissions',
'GetUserName',
'GetUserObjectInformation',
'GetVersionEx',
'GetVolumeInformation',
'GetWindowLong',
'GetWindowLongPtr',
'GetWindowLongPtrA',
'GetWindowLongPtrW',
'GetWindowModuleFileName',
'GetWindowTask',
'GetWindowText',
'GetWindowTextLength',
'GetWindowsDirectory',
'GetYValue',
'GlobalAddAtom',
'GlobalDiscard',
'GlobalFindAtom',
'GlobalGetAtomName',
'GlobalLRUNewest',
'GlobalLRUOldest',
'GrayString',
'HandleToLong',
'HandleToULong',
'HandleToUlong',
'HasOverlappedIoCompleted',
'ImmConfigureIME',
'ImmEnumRegisterWord',
'ImmEscape',
'ImmGetCandidateList',
'ImmGetCandidateListCount',
'ImmGetCompositionFont',
'ImmGetCompositionString',
'ImmGetConversionList',
'ImmGetDescription',
'ImmGetGuideLine',
'ImmGetIMEFileName',
'ImmGetImeMenuItems',
'ImmGetRegisterWordStyle',
'ImmInstallIME',
'ImmIsUIMessage',
'ImmRegisterWord',
'ImmSetCompositionFont',
'ImmSetCompositionString',
'ImmUnregisterWord',
'InitiateSystemShutdown',
'InitiateSystemShutdownEx',
'InsertMenu',
'InsertMenuItem',
'Int32x32To64',
'IntToPtr',
'InterlockedCompareExchangePointer',
'InterlockedExchangePointer',
'IsBadStringPtr',
'IsCharAlpha',
'IsCharAlphaNumeric',
'IsCharLower',
'IsCharUpper',
'IsContainerPartition',
'IsDialogMessage',
'IsEqualCLSID',
'IsEqualFMTID',
'IsEqualIID',
'IsFTPartition',
'IsHashValCompatible',
'IsLFNDrive',
'IsLoggingEnabled',
'IsRecognizedPartition',
'IsReparseTagMicrosoft',
'IsReparseTagNameSurrogate',
'IsValidDevmode',
'LimitEmsPages',
'LoadBitmap',
'LoadCursor',
'LoadCursorFromFile',
'LoadIcon',
'LoadImage',
'LoadKeyboardLayout',
'LoadLibrary',
'LoadLibraryEx',
'LoadMenu',
'LoadMenuIndirect',
'LoadString',
'LocalDiscard',
'LockSegment',
'LogonUser',
'LogonUserEx',
'LongToHandle',
'LongToPtr',
'LookupAccountName',
'LookupAccountSid',
'LookupPrivilegeDisplayName',
'LookupPrivilegeName',
'LookupPrivilegeValue',
'MakeProcInstance',
'MapVirtualKey',
'MapVirtualKeyEx',
'MaxNumberOfEEInfoParams',
'MediumChangerClassGuid',
'MessageBox',
'MessageBoxEx',
'MessageBoxIndirect',
'ModifyMenu',
'MoveFile',
'MoveFileEx',
'MoveMemory',
'MultinetGetConnectionPerformance',
'NdrFcLong',
'NdrFcShort',
'NdrFieldOffset',
'NdrFieldPad',
'NdrMarshCCtxtHdl',
'NdrMarshConfStringHdr',
'NdrMarshSCtxtHdl',
'NdrUnMarshCCtxtHdl',
'NdrUnMarshConfStringHdr',
'NdrUnMarshSCtxtHdl',
'ObjectCloseAuditAlarm',
'ObjectDeleteAuditAlarm',
'ObjectOpenAuditAlarm',
'ObjectPrivilegeAuditAlarm',
'OemToAnsi',
'OemToAnsiBuff',
'OemToChar',
'OemToCharBuff',
'OpenBackupEventLog',
'OpenDesktop',
'OpenEncryptedFileRaw',
'OpenEvent',
'OpenEventLog',
'OpenFileMapping',
'OpenMutex',
'OpenPrinter',
'OpenSCManager',
'OpenSemaphore',
'OpenService',
'OpenWindowStation',
'OutputDebugStr',
'OutputDebugString',
'PageSetupDlg',
'PartitionClassGuid',
'PeekConsoleInput',
'PeekMessage',
'PlaySound',
'PolyTextOut',
'PostAppMessage',
'PostAppMessageA',
'PostAppMessageW',
'PostMessage',
'PostThreadMessage',
'PrintDlg',
'PrintDlgEx',
'PrinterMessageBox',
'PrivateExtractIcons',
'PrivilegedServiceAuditAlarm',
'PropSheet_AddPage',
'PropSheet_Apply',
'PropSheet_CancelToClose',
'PropSheet_Changed',
'PropSheet_GetCurrentPageHwnd',
'PropSheet_GetResult',
'PropSheet_GetTabControl',
'PropSheet_HwndToIndex',
'PropSheet_IdToIndex',
'PropSheet_IndexToHwnd',
'PropSheet_IndexToId',
'PropSheet_IndexToPage',
'PropSheet_InsertPage',
'PropSheet_IsDialogMessage',
'PropSheet_PageToIndex',
'PropSheet_PressButton',
'PropSheet_QuerySiblings',
'PropSheet_RebootSystem',
'PropSheet_RecalcPageSizes',
'PropSheet_RemovePage',
'PropSheet_RestartWindows',
'PropSheet_SetCurSel',
'PropSheet_SetCurSelByID',
'PropSheet_SetFinishText',
'PropSheet_SetHeaderSubTitle',
'PropSheet_SetHeaderTitle',
'PropSheet_SetTitle',
'PropSheet_SetWizButtons',
'PropSheet_UnChanged',
'PropagateResult',
'PropertySheet',
'PtrToInt',
'PtrToLong',
'PtrToShort',
'PtrToUint',
'PtrToUlong',
'PtrToUshort',
'QueryDosDevice',
'QueryServiceConfig',
'QueryServiceConfig2',
'QueryServiceLockStatus',
'ReadConsole',
'ReadConsoleInput',
'ReadConsoleOutput',
'ReadConsoleOutputCharacter',
'ReadEventLog',
'RealGetWindowClass',
'RegConnectRegistry',
'RegCreateKey',
'RegCreateKeyEx',
'RegDeleteKey',
'RegDeleteValue',
'RegEnumKey',
'RegEnumKeyEx',
'RegEnumValue',
'RegLoadKey',
'RegOpenKey',
'RegOpenKeyEx',
'RegQueryInfoKey',
'RegQueryMultipleValues',
'RegQueryValue',
'RegQueryValueEx',
'RegReplaceKey',
'RegRestoreKey',
'RegSaveKey',
'RegSaveKeyEx',
'RegSetValue',
'RegSetValueEx',
'RegUnLoadKey',
'RegisterClass',
'RegisterClassEx',
'RegisterClipboardFormat',
'RegisterDeviceNotification',
'RegisterEventSource',
'RegisterServiceCtrlHandler',
'RegisterServiceCtrlHandlerEx',
'RegisterWindowMessage',
'RemoveDirectory',
'RemoveFontResource',
'RemoveProp',
'ReplaceText',
'ReportEvent',
'ResetDC',
'ResetPrinter',
'ResultFromScode',
'RpcAbnormalTermination',
'RpcAsyncGetCallHandle',
'RpcBindingFromStringBinding',
'RpcBindingInqAuthClient',
'RpcBindingInqAuthClientEx',
'RpcBindingInqAuthInfo',
'RpcBindingInqAuthInfoEx',
'RpcBindingSetAuthInfo',
'RpcBindingSetAuthInfoEx',
'RpcBindingToStringBinding',
'RpcEndExcept',
'RpcEndFinally',
'RpcEpRegister',
'RpcEpRegisterNoReplace',
'RpcExcept',
'RpcExceptionCode',
'RpcFinally',
'RpcMgmtEpEltInqNext',
'RpcMgmtInqServerPrincName',
'RpcNetworkInqProtseqs',
'RpcNetworkIsProtseqValid',
'RpcNsBindingExport',
'RpcNsBindingExportPnP',
'RpcNsBindingImportBegin',
'RpcNsBindingInqEntryName',
'RpcNsBindingLookupBegin',
'RpcNsBindingUnexport',
'RpcNsBindingUnexportPnP',
'RpcNsEntryExpandName',
'RpcNsEntryObjectInqBegin',
'RpcNsGroupDelete',
'RpcNsGroupMbrAdd',
'RpcNsGroupMbrInqBegin',
'RpcNsGroupMbrInqNext',
'RpcNsGroupMbrRemove',
'RpcNsMgmtBindingUnexport',
'RpcNsMgmtEntryCreate',
'RpcNsMgmtEntryDelete',
'RpcNsMgmtEntryInqIfIds',
'RpcNsProfileDelete',
'RpcNsProfileEltAdd',
'RpcNsProfileEltInqBegin',
'RpcNsProfileEltInqNext',
'RpcNsProfileEltRemove',
'RpcProtseqVectorFree',
'RpcServerInqCallAttributes',
'RpcServerInqDefaultPrincName',
'RpcServerRegisterAuthInfo',
'RpcServerUseProtseq',
'RpcServerUseProtseqEp',
'RpcServerUseProtseqEpEx',
'RpcServerUseProtseqEx',
'RpcServerUseProtseqIf',
'RpcServerUseProtseqIfEx',
'RpcStringBindingCompose',
'RpcStringBindingParse',
'RpcStringFree',
'RpcTryExcept',
'RpcTryFinally',
'RtlCopyMemory',
'RtlEqualMemory',
'RtlFillMemory',
'RtlMoveMemory',
'RtlZeroMemory',
'ScrollConsoleScreenBuffer',
'SearchPath',
'SecureZeroMemory',
'SendDlgItemMessage',
'SendMessage',
'SendMessageCallback',
'SendMessageTimeout',
'SendNotifyMessage',
'SetCalendarInfo',
'SetClassLong',
'SetClassLongPtr',
'SetClassLongPtrA',
'SetClassLongPtrW',
'SetComputerName',
'SetConsoleTitle',
'SetCurrentDirectory',
'SetCurrentTime', # Not actually mangled but matches GetCurrentTime
'SetDefaultCommConfig',
'SetDefaultPrinter',
'SetDlgItemText',
'SetEnvironmentStrings',
'SetEnvironmentVariable',
'SetFileAttributes',
'SetFileSecurity',
'SetFileShortName',
'SetFirmwareEnvironmentVariable',
'SetForm',
'SetICMProfile',
'SetJob',
'SetLocaleInfo',
'SetMenuItemInfo',
'SetPort',
'SetPrinter',
'SetPrinterData',
'SetPrinterDataEx',
'SetProp',
'SetSwapAreaSize',
'SetSysModalWindow',
'SetUrlPolicyPermissions',
'SetUserObjectInformation',
'SetVolumeLabel',
'SetWindowLong',
'SetWindowLongPtr',
'SetWindowLongPtrA',
'SetWindowLongPtrW',
'SetWindowText',
'SetWindowsHook',
'SetWindowsHookEx',
'ShellAbout',
'ShellExecute',
'ShellExecuteEx',
'ShellMessageBox',
'Shell_NotifyIcon',
'StartDoc',
'StartDocPrinter',
'StartService',
'StartServiceCtrlDispatcher',
'StoragePortClassGuid',
'SystemParametersInfo',
'TabbedTextOut',
'TapeClassGuid',
'TextOut',
'TranslateAccelerator',
'UintToPtr',
'UlongToHandle',
'UlongToPtr',
'UnlockResource',
'UnlockSegment',
'UnregisterClass',
'UpdateICMRegKey',
'UpdateResource',
'UuidFromString',
'UuidToString',
'VarBoolFromInt',
'VarBoolFromUint',
'VarBstrFromInt',
'VarBstrFromUint',
'VarCyFromInt',
'VarCyFromUint',
'VarDateFromInt',
'VarDateFromUint',
'VarDecFromInt',
'VarDecFromUint',
'VarI1FromInt',
'VarI1FromUint',
'VarI2FromInt',
'VarI2FromUint',
'VarI4FromI4',
'VarI4FromInt',
'VarI4FromUint',
'VarI8FromI8',
'VarI8FromInt',
'VarI8FromUint',
'VarIntFromBool',
'VarIntFromCy',
'VarIntFromDate',
'VarIntFromDec',
'VarIntFromDisp',
'VarIntFromI1',
'VarIntFromI2',
'VarIntFromI4',
'VarIntFromI8',
'VarIntFromR4',
'VarIntFromR8',
'VarIntFromStr',
'VarIntFromUI1',
'VarIntFromUI2',
'VarIntFromUI4',
'VarIntFromUI8',
'VarIntFromUint',
'VarR4FromInt',
'VarR4FromUint',
'VarR8FromInt',
'VarR8FromUint',
'VarUI1FromInt',
'VarUI1FromUint',
'VarUI2FromInt',
'VarUI2FromUint',
'VarUI4FromInt',
'VarUI4FromUI4',
'VarUI4FromUint',
'VarUI8FromUI8',
'VarUintFromBool',
'VarUintFromCy',
'VarUintFromDate',
'VarUintFromDec',
'VarUintFromDisp',
'VarUintFromI1',
'VarUintFromI2',
'VarUintFromI4',
'VarUintFromI8',
'VarUintFromInt',
'VarUintFromR4',
'VarUintFromR8',
'VarUintFromStr',
'VarUintFromUI1',
'VarUintFromUI2',
'VarUintFromUI4',
'VarUintFromUI8',
'VerFindFile',
'VerInstallFile',
'VerLanguageName',
'VerQueryValue',
'VerifyVersionInfo',
'VkKeyScan',
'VkKeyScanEx',
'VolumeClassGuid',
'WaitNamedPipe',
'WinExecError',
'WinHelp',
'WriteConsole',
'WriteConsoleInput',
'WriteConsoleOutput',
'WriteConsoleOutputCharacter',
'WriteOnceDiskClassGuid',
'WritePrivateProfileSection',
'WritePrivateProfileString',
'WritePrivateProfileStruct',
'WriteProfileSection',
'WriteProfileString',
'XcvData',
'Yield'
]
# Names for which Get<name> is a macro defined by windows.h.
WindowsMangleGets = [
'AltTabInfo',
'AtomName',
'BValue',
'BinaryType',
'CPInfoEx',
'CValue',
'CalendarInfo',
'CharABCWidths',
'CharABCWidthsFloat',
'CharWidth',
'CharWidth32',
'CharWidthFloat',
'CharacterPlacement',
'ClassInfo',
'ClassInfoEx',
'ClassLong',
'ClassLongPtr',
'ClassLongPtrA',
'ClassLongPtrW',
'ClassName', # GetClassName is left out but not for GetMacro case.
'ClipboardFormatName',
'CommandLine',
'CompressedFileSize',
'ComputerName',
'ConsoleTitle',
'CurrencyFormat',
'CurrentDirectory',
'CurrentTime',
'DateFormat',
'DefaultCommConfig',
'DefaultPrinter',
'DiskFreeSpace',
'DiskFreeSpaceEx',
'DlgItemText',
'DriveType',
'EnhMetaFile',
'EnhMetaFileDescription',
'EnvironmentStringsA',
'EnvironmentVariable',
'ExceptionCode',
'ExceptionInformation',
'ExpandedName',
'FileAttributes',
'FileAttributesEx',
'FileSecurity',
'FileTitle',
'FileVersionInfo',
'FileVersionInfoSize',
'FirmwareEnvironmentVariable',
'Form',
'FreeSpace',
'FullPathName',
'GValue',
'GeoInfo',
'GlyphOutline',
'ICMProfile',
'Job',
'KValue',
'KerningPairs',
'KeyNameText',
'KeyboardLayoutName',
'LocaleInfo',
'LogColorSpace',
'LogicalDriveStrings',
'LongPathName',
'MValue',
'MenuItemInfo',
'MenuString',
'Message',
'MetaFile',
'ModuleFileName',
'ModuleHandle',
'MonitorInfo',
'NamedPipeHandleState',
'NextWindow',
'NumberFormat',
'Object',
'OpenCardName',
'OpenFileName',
'OutlineTextMetrics',
'PrintProcessorDirectory',
'Printer',
'PrinterData',
'PrinterDataEx',
'PrinterDriver',
'PrinterDriverDirectory',
'PrivateProfileInt',
'PrivateProfileSection',
'PrivateProfileSectionNames',
'PrivateProfileString',
'PrivateProfileStruct',
'ProfileInt',
'ProfileSection',
'ProfileString',
'Prop',
'RValue',
'SaveFileName',
'Scode',
'ServiceDisplayName',
'ServiceKeyName',
'ShortPathName',
'StartupInfo',
'StringTypeEx',
'SysModalWindow',
'SystemDirectory',
'SystemWindowsDirectory',
'TabbedTextExtent',
'TempFileName',
'TempPath',
'TextExtentExPoint',
'TextExtentPoint',
'TextExtentPoint32',
'TextFace',
'TextMetrics',
'TimeFormat',
'UrlPolicyPermissions',
'UserName',
'UserObjectInformation',
'VersionEx',
'VolumeInformation',
'WindowLong',
'WindowLongPtr',
'WindowLongPtrA',
'WindowLongPtrW',
'WindowModuleFileName',
'WindowTask',
'WindowText',
'WindowTextLength',
'WindowsDirectory',
'YValue'
]
# Names for which Set<name> is a macro defined by windows.h.
WindowsMangleSets = [
'CalendarInfo',
'ClassLong',
'ClassLongPtr',
'ClassLongPtrA',
'ClassLongPtrW',
'ComputerName',
'ConsoleTitle',
'CurrentDirectory',
'CurrentTime', # Not actually mangled but matches GetCurrentTime
'DefaultCommConfig',
'DefaultPrinter',
'DlgItemText',
'EnvironmentStrings',
'EnvironmentVariable',
'FileAttributes',
'FileSecurity',
'FileShortName',
'FirmwareEnvironmentVariable',
'Form',
'ICMProfile',
'Job',
'LocaleInfo',
'MenuItemInfo',
'Port',
'Printer',
'PrinterData',
'PrinterDataEx',
'Prop',
'SwapAreaSize',
'SysModalWindow',
'UrlPolicyPermissions',
'UserObjectInformation',
'VolumeLabel',
'WindowLong',
'WindowLongPtr',
'WindowLongPtrA',
'WindowLongPtrW',
'WindowText',
'WindowsHook',
'WindowsHookEx'
]
#-----------------------------------------------------------------------------
# Construct a regular expression matching lines declaring methods with
# these names.
# Beginning of line.
WindowsMangle = '^.*('
#-----------------------------------------
# Check explicit method names.
WindowsMangle = WindowsMangle + '[^A-Za-z0-9_]('
sep = ''
for name in WindowsMangleNames:
WindowsMangle = WindowsMangle+sep+name
sep = '|'
WindowsMangle = WindowsMangle+')\s*\('
#-----------------------------------------
WindowsMangle = WindowsMangle+'|'
#-----------------------------------------
# Check names generated by GetMacro calls.
WindowsMangle = WindowsMangle+'vtkGet.*Macro\s*\(\s*('
sep = ''
for g in WindowsMangleGets:
WindowsMangle = WindowsMangle+sep+g
sep = '|'
WindowsMangle = WindowsMangle+')\s*,'
#-----------------------------------------
WindowsMangle = WindowsMangle+'|'
#-----------------------------------------
#-----------------------------------------
# Check names generated by SetMacro calls.
WindowsMangle = WindowsMangle+'vtkSet.*Macro\s*\(\s*('
sep = ''
for s in WindowsMangleSets:
WindowsMangle = WindowsMangle+sep+s
sep = '|'
WindowsMangle = WindowsMangle+')\s*,'
#-----------------------------------------
# End of line.
WindowsMangle = WindowsMangle+').*$'
#-----------------------------------------------------------------------------
# Compile the regular expression.
WindowsMangleRegEx = re.compile(WindowsMangle)
| bsd-3-clause |
bearbin/box-server | requests/packages/urllib3/filepost.py | 240 | 3047 | # urllib3/filepost.py
# Copyright 2008-2012 Andrey Petrov and contributors (see CONTRIBUTORS.txt)
#
# This module is part of urllib3 and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
import codecs
import mimetypes
from uuid import uuid4
from io import BytesIO
from .packages import six
from .packages.six import b
writer = codecs.lookup('utf-8')[3]
def choose_boundary():
"""
Our embarassingly-simple replacement for mimetools.choose_boundary.
"""
return uuid4().hex
def get_content_type(filename):
return mimetypes.guess_type(filename)[0] or 'application/octet-stream'
def iter_fields(fields):
"""
Iterate over fields.
Supports list of (k, v) tuples and dicts.
"""
if isinstance(fields, dict):
return ((k, v) for k, v in six.iteritems(fields))
return ((k, v) for k, v in fields)
def encode_multipart_formdata(fields, boundary=None):
"""
Encode a dictionary of ``fields`` using the multipart/form-data MIME format.
:param fields:
Dictionary of fields or list of (key, value) or (key, value, MIME type)
field tuples. The key is treated as the field name, and the value as
the body of the form-data bytes. If the value is a tuple of two
elements, then the first element is treated as the filename of the
form-data section and a suitable MIME type is guessed based on the
filename. If the value is a tuple of three elements, then the third
element is treated as an explicit MIME type of the form-data section.
Field names and filenames must be unicode.
:param boundary:
If not specified, then a random boundary will be generated using
:func:`mimetools.choose_boundary`.
"""
body = BytesIO()
if boundary is None:
boundary = choose_boundary()
for fieldname, value in iter_fields(fields):
body.write(b('--%s\r\n' % (boundary)))
if isinstance(value, tuple):
if len(value) == 3:
filename, data, content_type = value
else:
filename, data = value
content_type = get_content_type(filename)
writer(body).write('Content-Disposition: form-data; name="%s"; '
'filename="%s"\r\n' % (fieldname, filename))
body.write(b('Content-Type: %s\r\n\r\n' %
(content_type,)))
else:
data = value
writer(body).write('Content-Disposition: form-data; name="%s"\r\n'
% (fieldname))
body.write(b'\r\n')
if isinstance(data, int):
data = str(data) # Backwards compatibility
if isinstance(data, six.text_type):
writer(body).write(data)
else:
body.write(data)
body.write(b'\r\n')
body.write(b('--%s--\r\n' % (boundary)))
content_type = b('multipart/form-data; boundary=%s' % boundary)
return body.getvalue(), content_type
| mit |
sounay/flaminggo-test | onadata/apps/viewer/migrations/0002_auto__add_field_datadictionary_xls.py | 13 | 8621 | # encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'DataDictionary.xls'
db.add_column('odk_viewer_datadictionary', 'xls', self.gf('django.db.models.fields.files.FileField')(max_length=100, null=True), keep_default=False)
def backwards(self, orm):
# Deleting field 'DataDictionary.xls'
db.delete_column('odk_viewer_datadictionary', 'xls')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'odk_logger.instance': {
'Meta': {'object_name': 'Instance'},
'date': ('django.db.models.fields.DateField', [], {'null': 'True'}),
'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'date_modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'start_time': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'default': "u'submitted_via_web'", 'max_length': '20'}),
'survey_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['odk_logger.SurveyType']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'surveys'", 'null': 'True', 'to': "orm['auth.User']"}),
'xform': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'surveys'", 'null': 'True', 'to': "orm['odk_logger.XForm']"}),
'xml': ('django.db.models.fields.TextField', [], {})
},
'odk_logger.surveytype': {
'Meta': {'object_name': 'SurveyType'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'slug': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'odk_logger.xform': {
'Meta': {'ordering': "('id_string',)", 'unique_together': "(('user', 'id_string'),)", 'object_name': 'XForm'},
'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'date_modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'downloadable': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'id_string': ('django.db.models.fields.SlugField', [], {'max_length': '50', 'db_index': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'xforms'", 'null': 'True', 'to': "orm['auth.User']"}),
'xml': ('django.db.models.fields.TextField', [], {})
},
'odk_viewer.columnrename': {
'Meta': {'object_name': 'ColumnRename'},
'column_name': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'xpath': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'})
},
'odk_viewer.datadictionary': {
'Meta': {'object_name': 'DataDictionary'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'json': ('django.db.models.fields.TextField', [], {}),
'xform': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'data_dictionary'", 'unique': 'True', 'to': "orm['odk_logger.XForm']"}),
'xls': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True'})
},
'odk_viewer.instancemodification': {
'Meta': {'object_name': 'InstanceModification'},
'action': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'date_modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'instance': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'modifications'", 'to': "orm['odk_logger.Instance']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True'}),
'xpath': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'odk_viewer.parsedinstance': {
'Meta': {'object_name': 'ParsedInstance'},
'end_time': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'instance': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'parsed_instance'", 'unique': 'True', 'to': "orm['odk_logger.Instance']"}),
'lat': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'lng': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'start_time': ('django.db.models.fields.DateTimeField', [], {'null': 'True'})
}
}
complete_apps = ['viewer']
| bsd-2-clause |
MalmoUniversity-DA366A/calvin-base | calvin/actorstore/systemactors/std/Join.py | 4 | 1454 | # -*- coding: utf-8 -*-
# Copyright (c) 2015 Ericsson AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from calvin.actor.actor import Actor, ActionResult, condition
class Join(Actor):
"""
Join two streams of tokens
Inputs:
token_1 : first token stream
token_2 : second token stream
Outputs:
token : resulting token stream
"""
def init(self):
pass
@condition(['token_1'], ['token'])
def port_one(self, input):
return ActionResult(production=(input, ))
@condition(['token_2'], ['token'])
def port_two(self, input):
return ActionResult(production=(input, ))
action_priority = (port_one, port_two)
test_args = []
test_set = [
{
'in': {'token_1': [1, 2], 'token_2': ['a', 'b']},
'out': {'token': [1, 2, 'a', 'b']}
},
{
'in': {'token_2': [2]},
'out': {'token': [2]}
}
]
| apache-2.0 |
NikNitro/Python-iBeacon-Scan | sympy/simplify/tests/test_hyperexpand.py | 2 | 38226 | from random import randrange
from sympy.simplify.hyperexpand import (ShiftA, ShiftB, UnShiftA, UnShiftB,
MeijerShiftA, MeijerShiftB, MeijerShiftC, MeijerShiftD,
MeijerUnShiftA, MeijerUnShiftB, MeijerUnShiftC,
MeijerUnShiftD,
ReduceOrder, reduce_order, apply_operators,
devise_plan, make_derivative_operator, Formula,
hyperexpand, Hyper_Function, G_Function,
reduce_order_meijer,
build_hypergeometric_formula)
from sympy import hyper, I, S, meijerg, Piecewise, Tuple
from sympy.abc import z, a, b, c
from sympy.utilities.pytest import XFAIL, raises, slow
from sympy.utilities.randtest import verify_numerically as tn
from sympy.core.compatibility import range
from sympy import (cos, sin, log, exp, asin, lowergamma, atanh, besseli,
gamma, sqrt, pi, erf, exp_polar, Rational)
def test_branch_bug():
assert hyperexpand(hyper((-S(1)/3, S(1)/2), (S(2)/3, S(3)/2), -z)) == \
-z**S('1/3')*lowergamma(exp_polar(I*pi)/3, z)/5 \
+ sqrt(pi)*erf(sqrt(z))/(5*sqrt(z))
assert hyperexpand(meijerg([S(7)/6, 1], [], [S(2)/3], [S(1)/6, 0], z)) == \
2*z**S('2/3')*(2*sqrt(pi)*erf(sqrt(z))/sqrt(z) - 2*lowergamma(
S(2)/3, z)/z**S('2/3'))*gamma(S(2)/3)/gamma(S(5)/3)
def test_hyperexpand():
# Luke, Y. L. (1969), The Special Functions and Their Approximations,
# Volume 1, section 6.2
assert hyperexpand(hyper([], [], z)) == exp(z)
assert hyperexpand(hyper([1, 1], [2], -z)*z) == log(1 + z)
assert hyperexpand(hyper([], [S.Half], -z**2/4)) == cos(z)
assert hyperexpand(z*hyper([], [S('3/2')], -z**2/4)) == sin(z)
assert hyperexpand(hyper([S('1/2'), S('1/2')], [S('3/2')], z**2)*z) \
== asin(z)
def can_do(ap, bq, numerical=True, div=1, lowerplane=False):
from sympy import exp_polar, exp
r = hyperexpand(hyper(ap, bq, z))
if r.has(hyper):
return False
if not numerical:
return True
repl = {}
randsyms = r.free_symbols - {z}
while randsyms:
# Only randomly generated parameters are checked.
for n, a in enumerate(randsyms):
repl[a] = randcplx(n)/div
if not any([b.is_Integer and b <= 0 for b in Tuple(*bq).subs(repl)]):
break
[a, b, c, d] = [2, -1, 3, 1]
if lowerplane:
[a, b, c, d] = [2, -2, 3, -1]
return tn(
hyper(ap, bq, z).subs(repl),
r.replace(exp_polar, exp).subs(repl),
z, a=a, b=b, c=c, d=d)
def test_roach():
# Kelly B. Roach. Meijer G Function Representations.
# Section "Gallery"
assert can_do([S(1)/2], [S(9)/2])
assert can_do([], [1, S(5)/2, 4])
assert can_do([-S.Half, 1, 2], [3, 4])
assert can_do([S(1)/3], [-S(2)/3, -S(1)/2, S(1)/2, 1])
assert can_do([-S(3)/2, -S(1)/2], [-S(5)/2, 1])
assert can_do([-S(3)/2, ], [-S(1)/2, S(1)/2]) # shine-integral
assert can_do([-S(3)/2, -S(1)/2], [2]) # elliptic integrals
@XFAIL
def test_roach_fail():
assert can_do([-S(1)/2, 1], [S(1)/4, S(1)/2, S(3)/4]) # PFDD
assert can_do([S(3)/2], [S(5)/2, 5]) # struve function
assert can_do([-S(1)/2, S(1)/2, 1], [S(3)/2, S(5)/2]) # polylog, pfdd
assert can_do([1, 2, 3], [S(1)/2, 4]) # XXX ?
assert can_do([S(1)/2], [-S(1)/3, -S(1)/2, -S(2)/3]) # PFDD ?
# For the long table tests, see end of file
def test_polynomial():
from sympy import oo
assert hyperexpand(hyper([], [-1], z)) == oo
assert hyperexpand(hyper([-2], [-1], z)) == oo
assert hyperexpand(hyper([0, 0], [-1], z)) == 1
assert can_do([-5, -2, randcplx(), randcplx()], [-10, randcplx()])
assert hyperexpand(hyper((-1, 1), (-2,), z)) == 1 + z/2
def test_hyperexpand_bases():
assert hyperexpand(hyper([2], [a], z)) == \
a + z**(-a + 1)*(-a**2 + 3*a + z*(a - 1) - 2)*exp(z)* \
lowergamma(a - 1, z) - 1
# TODO [a+1, a-S.Half], [2*a]
assert hyperexpand(hyper([1, 2], [3], z)) == -2/z - 2*log(-z + 1)/z**2
assert hyperexpand(hyper([S.Half, 2], [S(3)/2], z)) == \
-1/(2*z - 2) + atanh(sqrt(z))/sqrt(z)/2
assert hyperexpand(hyper([S(1)/2, S(1)/2], [S(5)/2], z)) == \
(-3*z + 3)/4/(z*sqrt(-z + 1)) \
+ (6*z - 3)*asin(sqrt(z))/(4*z**(S(3)/2))
assert hyperexpand(hyper([1, 2], [S(3)/2], z)) == -1/(2*z - 2) \
- asin(sqrt(z))/(sqrt(z)*(2*z - 2)*sqrt(-z + 1))
assert hyperexpand(hyper([-S.Half - 1, 1, 2], [S.Half, 3], z)) == \
sqrt(z)*(6*z/7 - S(6)/5)*atanh(sqrt(z)) \
+ (-30*z**2 + 32*z - 6)/35/z - 6*log(-z + 1)/(35*z**2)
assert hyperexpand(hyper([1 + S.Half, 1, 1], [2, 2], z)) == \
-4*log(sqrt(-z + 1)/2 + S(1)/2)/z
# TODO hyperexpand(hyper([a], [2*a + 1], z))
# TODO [S.Half, a], [S(3)/2, a+1]
assert hyperexpand(hyper([2], [b, 1], z)) == \
z**(-b/2 + S(1)/2)*besseli(b - 1, 2*sqrt(z))*gamma(b) \
+ z**(-b/2 + 1)*besseli(b, 2*sqrt(z))*gamma(b)
# TODO [a], [a - S.Half, 2*a]
def test_hyperexpand_parametric():
assert hyperexpand(hyper([a, S(1)/2 + a], [S(1)/2], z)) \
== (1 + sqrt(z))**(-2*a)/2 + (1 - sqrt(z))**(-2*a)/2
assert hyperexpand(hyper([a, -S(1)/2 + a], [2*a], z)) \
== 2**(2*a - 1)*((-z + 1)**(S(1)/2) + 1)**(-2*a + 1)
def test_shifted_sum():
from sympy import simplify
assert simplify(hyperexpand(z**4*hyper([2], [3, S('3/2')], -z**2))) \
== z*sin(2*z) + (-z**2 + S.Half)*cos(2*z) - S.Half
def _randrat():
""" Steer clear of integers. """
return S(randrange(25) + 10)/50
def randcplx(offset=-1):
""" Polys is not good with real coefficients. """
return _randrat() + I*_randrat() + I*(1 + offset)
@slow
def test_formulae():
from sympy.simplify.hyperexpand import FormulaCollection
formulae = FormulaCollection().formulae
for formula in formulae:
h = formula.func(formula.z)
rep = {}
for n, sym in enumerate(formula.symbols):
rep[sym] = randcplx(n)
# NOTE hyperexpand returns truly branched functions. We know we are
# on the main sheet, but numerical evaluation can still go wrong
# (e.g. if exp_polar cannot be evalf'd).
# Just replace all exp_polar by exp, this usually works.
# first test if the closed-form is actually correct
h = h.subs(rep)
closed_form = formula.closed_form.subs(rep).rewrite('nonrepsmall')
z = formula.z
assert tn(h, closed_form.replace(exp_polar, exp), z)
# now test the computed matrix
cl = (formula.C * formula.B)[0].subs(rep).rewrite('nonrepsmall')
assert tn(closed_form.replace(
exp_polar, exp), cl.replace(exp_polar, exp), z)
deriv1 = z*formula.B.applyfunc(lambda t: t.rewrite(
'nonrepsmall')).diff(z)
deriv2 = formula.M * formula.B
for d1, d2 in zip(deriv1, deriv2):
assert tn(d1.subs(rep).replace(exp_polar, exp),
d2.subs(rep).rewrite('nonrepsmall').replace(exp_polar, exp), z)
def test_meijerg_formulae():
from sympy.simplify.hyperexpand import MeijerFormulaCollection
formulae = MeijerFormulaCollection().formulae
for sig in formulae:
for formula in formulae[sig]:
g = meijerg(formula.func.an, formula.func.ap,
formula.func.bm, formula.func.bq,
formula.z)
rep = {}
for sym in formula.symbols:
rep[sym] = randcplx()
# first test if the closed-form is actually correct
g = g.subs(rep)
closed_form = formula.closed_form.subs(rep)
z = formula.z
assert tn(g, closed_form, z)
# now test the computed matrix
cl = (formula.C * formula.B)[0].subs(rep)
assert tn(closed_form, cl, z)
deriv1 = z*formula.B.diff(z)
deriv2 = formula.M * formula.B
for d1, d2 in zip(deriv1, deriv2):
assert tn(d1.subs(rep), d2.subs(rep), z)
def op(f):
return z*f.diff(z)
def test_plan():
assert devise_plan(Hyper_Function([0], ()),
Hyper_Function([0], ()), z) == []
with raises(ValueError):
devise_plan(Hyper_Function([1], ()), Hyper_Function((), ()), z)
with raises(ValueError):
devise_plan(Hyper_Function([2], [1]), Hyper_Function([2], [2]), z)
with raises(ValueError):
devise_plan(Hyper_Function([2], []), Hyper_Function([S("1/2")], []), z)
# We cannot use pi/(10000 + n) because polys is insanely slow.
a1, a2, b1 = (randcplx(n) for n in range(3))
b1 += 2*I
h = hyper([a1, a2], [b1], z)
h2 = hyper((a1 + 1, a2), [b1], z)
assert tn(apply_operators(h,
devise_plan(Hyper_Function((a1 + 1, a2), [b1]),
Hyper_Function((a1, a2), [b1]), z), op),
h2, z)
h2 = hyper((a1 + 1, a2 - 1), [b1], z)
assert tn(apply_operators(h,
devise_plan(Hyper_Function((a1 + 1, a2 - 1), [b1]),
Hyper_Function((a1, a2), [b1]), z), op),
h2, z)
def test_plan_derivatives():
a1, a2, a3 = 1, 2, S('1/2')
b1, b2 = 3, S('5/2')
h = Hyper_Function((a1, a2, a3), (b1, b2))
h2 = Hyper_Function((a1 + 1, a2 + 1, a3 + 2), (b1 + 1, b2 + 1))
ops = devise_plan(h2, h, z)
f = Formula(h, z, h(z), [])
deriv = make_derivative_operator(f.M, z)
assert tn((apply_operators(f.C, ops, deriv)*f.B)[0], h2(z), z)
h2 = Hyper_Function((a1, a2 - 1, a3 - 2), (b1 - 1, b2 - 1))
ops = devise_plan(h2, h, z)
assert tn((apply_operators(f.C, ops, deriv)*f.B)[0], h2(z), z)
def test_reduction_operators():
a1, a2, b1 = (randcplx(n) for n in range(3))
h = hyper([a1], [b1], z)
assert ReduceOrder(2, 0) is None
assert ReduceOrder(2, -1) is None
assert ReduceOrder(1, S('1/2')) is None
h2 = hyper((a1, a2), (b1, a2), z)
assert tn(ReduceOrder(a2, a2).apply(h, op), h2, z)
h2 = hyper((a1, a2 + 1), (b1, a2), z)
assert tn(ReduceOrder(a2 + 1, a2).apply(h, op), h2, z)
h2 = hyper((a2 + 4, a1), (b1, a2), z)
assert tn(ReduceOrder(a2 + 4, a2).apply(h, op), h2, z)
# test several step order reduction
ap = (a2 + 4, a1, b1 + 1)
bq = (a2, b1, b1)
func, ops = reduce_order(Hyper_Function(ap, bq))
assert func.ap == (a1,)
assert func.bq == (b1,)
assert tn(apply_operators(h, ops, op), hyper(ap, bq, z), z)
def test_shift_operators():
a1, a2, b1, b2, b3 = (randcplx(n) for n in range(5))
h = hyper((a1, a2), (b1, b2, b3), z)
raises(ValueError, lambda: ShiftA(0))
raises(ValueError, lambda: ShiftB(1))
assert tn(ShiftA(a1).apply(h, op), hyper((a1 + 1, a2), (b1, b2, b3), z), z)
assert tn(ShiftA(a2).apply(h, op), hyper((a1, a2 + 1), (b1, b2, b3), z), z)
assert tn(ShiftB(b1).apply(h, op), hyper((a1, a2), (b1 - 1, b2, b3), z), z)
assert tn(ShiftB(b2).apply(h, op), hyper((a1, a2), (b1, b2 - 1, b3), z), z)
assert tn(ShiftB(b3).apply(h, op), hyper((a1, a2), (b1, b2, b3 - 1), z), z)
def test_ushift_operators():
a1, a2, b1, b2, b3 = (randcplx(n) for n in range(5))
h = hyper((a1, a2), (b1, b2, b3), z)
raises(ValueError, lambda: UnShiftA((1,), (), 0, z))
raises(ValueError, lambda: UnShiftB((), (-1,), 0, z))
raises(ValueError, lambda: UnShiftA((1,), (0, -1, 1), 0, z))
raises(ValueError, lambda: UnShiftB((0, 1), (1,), 0, z))
s = UnShiftA((a1, a2), (b1, b2, b3), 0, z)
assert tn(s.apply(h, op), hyper((a1 - 1, a2), (b1, b2, b3), z), z)
s = UnShiftA((a1, a2), (b1, b2, b3), 1, z)
assert tn(s.apply(h, op), hyper((a1, a2 - 1), (b1, b2, b3), z), z)
s = UnShiftB((a1, a2), (b1, b2, b3), 0, z)
assert tn(s.apply(h, op), hyper((a1, a2), (b1 + 1, b2, b3), z), z)
s = UnShiftB((a1, a2), (b1, b2, b3), 1, z)
assert tn(s.apply(h, op), hyper((a1, a2), (b1, b2 + 1, b3), z), z)
s = UnShiftB((a1, a2), (b1, b2, b3), 2, z)
assert tn(s.apply(h, op), hyper((a1, a2), (b1, b2, b3 + 1), z), z)
def can_do_meijer(a1, a2, b1, b2, numeric=True):
"""
This helper function tries to hyperexpand() the meijer g-function
corresponding to the parameters a1, a2, b1, b2.
It returns False if this expansion still contains g-functions.
If numeric is True, it also tests the so-obtained formula numerically
(at random values) and returns False if the test fails.
Else it returns True.
"""
from sympy import unpolarify, expand
r = hyperexpand(meijerg(a1, a2, b1, b2, z))
if r.has(meijerg):
return False
# NOTE hyperexpand() returns a truly branched function, whereas numerical
# evaluation only works on the main branch. Since we are evaluating on
# the main branch, this should not be a problem, but expressions like
# exp_polar(I*pi/2*x)**a are evaluated incorrectly. We thus have to get
# rid of them. The expand heuristically does this...
r = unpolarify(expand(r, force=True, power_base=True, power_exp=False,
mul=False, log=False, multinomial=False, basic=False))
if not numeric:
return True
repl = {}
for n, a in enumerate(meijerg(a1, a2, b1, b2, z).free_symbols - {z}):
repl[a] = randcplx(n)
return tn(meijerg(a1, a2, b1, b2, z).subs(repl), r.subs(repl), z)
@slow
def test_meijerg_expand():
from sympy import combsimp, simplify
# from mpmath docs
assert hyperexpand(meijerg([[], []], [[0], []], -z)) == exp(z)
assert hyperexpand(meijerg([[1, 1], []], [[1], [0]], z)) == \
log(z + 1)
assert hyperexpand(meijerg([[1, 1], []], [[1], [1]], z)) == \
z/(z + 1)
assert hyperexpand(meijerg([[], []], [[S(1)/2], [0]], (z/2)**2)) \
== sin(z)/sqrt(pi)
assert hyperexpand(meijerg([[], []], [[0], [S(1)/2]], (z/2)**2)) \
== cos(z)/sqrt(pi)
assert can_do_meijer([], [a], [a - 1, a - S.Half], [])
assert can_do_meijer([], [], [a/2], [-a/2], False) # branches...
assert can_do_meijer([a], [b], [a], [b, a - 1])
# wikipedia
assert hyperexpand(meijerg([1], [], [], [0], z)) == \
Piecewise((0, abs(z) < 1), (1, abs(1/z) < 1),
(meijerg([1], [], [], [0], z), True))
assert hyperexpand(meijerg([], [1], [0], [], z)) == \
Piecewise((1, abs(z) < 1), (0, abs(1/z) < 1),
(meijerg([], [1], [0], [], z), True))
# The Special Functions and their Approximations
assert can_do_meijer([], [], [a + b/2], [a, a - b/2, a + S.Half])
assert can_do_meijer(
[], [], [a], [b], False) # branches only agree for small z
assert can_do_meijer([], [S.Half], [a], [-a])
assert can_do_meijer([], [], [a, b], [])
assert can_do_meijer([], [], [a, b], [])
assert can_do_meijer([], [], [a, a + S.Half], [b, b + S.Half])
assert can_do_meijer([], [], [a, -a], [0, S.Half], False) # dito
assert can_do_meijer([], [], [a, a + S.Half, b, b + S.Half], [])
assert can_do_meijer([S.Half], [], [0], [a, -a])
assert can_do_meijer([S.Half], [], [a], [0, -a], False) # dito
assert can_do_meijer([], [a - S.Half], [a, b], [a - S.Half], False)
assert can_do_meijer([], [a + S.Half], [a + b, a - b, a], [], False)
assert can_do_meijer([a + S.Half], [], [b, 2*a - b, a], [], False)
# This for example is actually zero.
assert can_do_meijer([], [], [], [a, b])
# Testing a bug:
assert hyperexpand(meijerg([0, 2], [], [], [-1, 1], z)) == \
Piecewise((0, abs(z) < 1),
(z/2 - 1/(2*z), abs(1/z) < 1),
(meijerg([0, 2], [], [], [-1, 1], z), True))
# Test that the simplest possible answer is returned:
assert combsimp(simplify(hyperexpand(
meijerg([1], [1 - a], [-a/2, -a/2 + S(1)/2], [], 1/z)))) == \
-2*sqrt(pi)*(sqrt(z + 1) + 1)**a/a
# Test that hyper is returned
assert hyperexpand(meijerg([1], [], [a], [0, 0], z)) == hyper(
(a,), (a + 1, a + 1), z*exp_polar(I*pi))*z**a*gamma(a)/gamma(a + 1)**2
# Test place option
f = meijerg(((0, 1), ()), ((S(1)/2,), (0,)), z**2)
assert hyperexpand(f) == sqrt(pi)/sqrt(1 + z**(-2))
assert hyperexpand(f, place=0) == sqrt(pi)*z/sqrt(z**2 + 1)
def test_meijerg_lookup():
from sympy import uppergamma, Si, Ci
assert hyperexpand(meijerg([a], [], [b, a], [], z)) == \
z**b*exp(z)*gamma(-a + b + 1)*uppergamma(a - b, z)
assert hyperexpand(meijerg([0], [], [0, 0], [], z)) == \
exp(z)*uppergamma(0, z)
assert can_do_meijer([a], [], [b, a + 1], [])
assert can_do_meijer([a], [], [b + 2, a], [])
assert can_do_meijer([a], [], [b - 2, a], [])
assert hyperexpand(meijerg([a], [], [a, a, a - S(1)/2], [], z)) == \
-sqrt(pi)*z**(a - S(1)/2)*(2*cos(2*sqrt(z))*(Si(2*sqrt(z)) - pi/2)
- 2*sin(2*sqrt(z))*Ci(2*sqrt(z))) == \
hyperexpand(meijerg([a], [], [a, a - S(1)/2, a], [], z)) == \
hyperexpand(meijerg([a], [], [a - S(1)/2, a, a], [], z))
assert can_do_meijer([a - 1], [], [a + 2, a - S(3)/2, a + 1], [])
@XFAIL
def test_meijerg_expand_fail():
# These basically test hyper([], [1/2 - a, 1/2 + 1, 1/2], z),
# which is *very* messy. But since the meijer g actually yields a
# sum of bessel functions, things can sometimes be simplified a lot and
# are then put into tables...
assert can_do_meijer([], [], [a + S.Half], [a, a - b/2, a + b/2])
assert can_do_meijer([], [], [0, S.Half], [a, -a])
assert can_do_meijer([], [], [3*a - S.Half, a, -a - S.Half], [a - S.Half])
assert can_do_meijer([], [], [0, a - S.Half, -a - S.Half], [S.Half])
assert can_do_meijer([], [], [a, b + S(1)/2, b], [2*b - a])
assert can_do_meijer([], [], [a, b + S(1)/2, b, 2*b - a])
assert can_do_meijer([S.Half], [], [-a, a], [0])
@slow
def test_meijerg():
# carefully set up the parameters.
# NOTE: this used to fail sometimes. I believe it is fixed, but if you
# hit an inexplicable test failure here, please let me know the seed.
a1, a2 = (randcplx(n) - 5*I - n*I for n in range(2))
b1, b2 = (randcplx(n) + 5*I + n*I for n in range(2))
b3, b4, b5, a3, a4, a5 = (randcplx() for n in range(6))
g = meijerg([a1], [a3, a4], [b1], [b3, b4], z)
assert ReduceOrder.meijer_minus(3, 4) is None
assert ReduceOrder.meijer_plus(4, 3) is None
g2 = meijerg([a1, a2], [a3, a4], [b1], [b3, b4, a2], z)
assert tn(ReduceOrder.meijer_plus(a2, a2).apply(g, op), g2, z)
g2 = meijerg([a1, a2], [a3, a4], [b1], [b3, b4, a2 + 1], z)
assert tn(ReduceOrder.meijer_plus(a2, a2 + 1).apply(g, op), g2, z)
g2 = meijerg([a1, a2 - 1], [a3, a4], [b1], [b3, b4, a2 + 2], z)
assert tn(ReduceOrder.meijer_plus(a2 - 1, a2 + 2).apply(g, op), g2, z)
g2 = meijerg([a1], [a3, a4, b2 - 1], [b1, b2 + 2], [b3, b4], z)
assert tn(ReduceOrder.meijer_minus(
b2 + 2, b2 - 1).apply(g, op), g2, z, tol=1e-6)
# test several-step reduction
an = [a1, a2]
bq = [b3, b4, a2 + 1]
ap = [a3, a4, b2 - 1]
bm = [b1, b2 + 1]
niq, ops = reduce_order_meijer(G_Function(an, ap, bm, bq))
assert niq.an == (a1,)
assert set(niq.ap) == {a3, a4}
assert niq.bm == (b1,)
assert set(niq.bq) == {b3, b4}
assert tn(apply_operators(g, ops, op), meijerg(an, ap, bm, bq, z), z)
def test_meijerg_shift_operators():
# carefully set up the parameters. XXX this still fails sometimes
a1, a2, a3, a4, a5, b1, b2, b3, b4, b5 = (randcplx(n) for n in range(10))
g = meijerg([a1], [a3, a4], [b1], [b3, b4], z)
assert tn(MeijerShiftA(b1).apply(g, op),
meijerg([a1], [a3, a4], [b1 + 1], [b3, b4], z), z)
assert tn(MeijerShiftB(a1).apply(g, op),
meijerg([a1 - 1], [a3, a4], [b1], [b3, b4], z), z)
assert tn(MeijerShiftC(b3).apply(g, op),
meijerg([a1], [a3, a4], [b1], [b3 + 1, b4], z), z)
assert tn(MeijerShiftD(a3).apply(g, op),
meijerg([a1], [a3 - 1, a4], [b1], [b3, b4], z), z)
s = MeijerUnShiftA([a1], [a3, a4], [b1], [b3, b4], 0, z)
assert tn(
s.apply(g, op), meijerg([a1], [a3, a4], [b1 - 1], [b3, b4], z), z)
s = MeijerUnShiftC([a1], [a3, a4], [b1], [b3, b4], 0, z)
assert tn(
s.apply(g, op), meijerg([a1], [a3, a4], [b1], [b3 - 1, b4], z), z)
s = MeijerUnShiftB([a1], [a3, a4], [b1], [b3, b4], 0, z)
assert tn(
s.apply(g, op), meijerg([a1 + 1], [a3, a4], [b1], [b3, b4], z), z)
s = MeijerUnShiftD([a1], [a3, a4], [b1], [b3, b4], 0, z)
assert tn(
s.apply(g, op), meijerg([a1], [a3 + 1, a4], [b1], [b3, b4], z), z)
@slow
def test_meijerg_confluence():
def t(m, a, b):
from sympy import sympify, Piecewise
a, b = sympify([a, b])
m_ = m
m = hyperexpand(m)
if not m == Piecewise((a, abs(z) < 1), (b, abs(1/z) < 1), (m_, True)):
return False
if not (m.args[0].args[0] == a and m.args[1].args[0] == b):
return False
z0 = randcplx()/10
if abs(m.subs(z, z0).n() - a.subs(z, z0).n()).n() > 1e-10:
return False
if abs(m.subs(z, 1/z0).n() - b.subs(z, 1/z0).n()).n() > 1e-10:
return False
return True
assert t(meijerg([], [1, 1], [0, 0], [], z), -log(z), 0)
assert t(meijerg(
[], [3, 1], [0, 0], [], z), -z**2/4 + z - log(z)/2 - S(3)/4, 0)
assert t(meijerg([], [3, 1], [-1, 0], [], z),
z**2/12 - z/2 + log(z)/2 + S(1)/4 + 1/(6*z), 0)
assert t(meijerg([], [1, 1, 1, 1], [0, 0, 0, 0], [], z), -log(z)**3/6, 0)
assert t(meijerg([1, 1], [], [], [0, 0], z), 0, -log(1/z))
assert t(meijerg([1, 1], [2, 2], [1, 1], [0, 0], z),
-z*log(z) + 2*z, -log(1/z) + 2)
assert t(meijerg([S(1)/2], [1, 1], [0, 0], [S(3)/2], z), log(z)/2 - 1, 0)
def u(an, ap, bm, bq):
m = meijerg(an, ap, bm, bq, z)
m2 = hyperexpand(m, allow_hyper=True)
if m2.has(meijerg) and not (m2.is_Piecewise and len(m2.args) == 3):
return False
return tn(m, m2, z)
assert u([], [1], [0, 0], [])
assert u([1, 1], [], [], [0])
assert u([1, 1], [2, 2, 5], [1, 1, 6], [0, 0])
assert u([1, 1], [2, 2, 5], [1, 1, 6], [0])
def test_meijerg_with_Floats():
# see issue #10681
from sympy import RR
f = meijerg(((3.0, 1), ()), ((S(3)/2,), (0,)), z)
a = -2.3632718012073
g = a*z**(S(3)/2)*hyper((-0.5, S(3)/2), (S(5)/2,), z*exp_polar(I*pi))
assert RR.almosteq((hyperexpand(f)/g).n(), 1.0, 1e-12)
def test_lerchphi():
from sympy import combsimp, exp_polar, polylog, log, lerchphi
assert hyperexpand(hyper([1, a], [a + 1], z)/a) == lerchphi(z, 1, a)
assert hyperexpand(
hyper([1, a, a], [a + 1, a + 1], z)/a**2) == lerchphi(z, 2, a)
assert hyperexpand(hyper([1, a, a, a], [a + 1, a + 1, a + 1], z)/a**3) == \
lerchphi(z, 3, a)
assert hyperexpand(hyper([1] + [a]*10, [a + 1]*10, z)/a**10) == \
lerchphi(z, 10, a)
assert combsimp(hyperexpand(meijerg([0, 1 - a], [], [0],
[-a], exp_polar(-I*pi)*z))) == lerchphi(z, 1, a)
assert combsimp(hyperexpand(meijerg([0, 1 - a, 1 - a], [], [0],
[-a, -a], exp_polar(-I*pi)*z))) == lerchphi(z, 2, a)
assert combsimp(hyperexpand(meijerg([0, 1 - a, 1 - a, 1 - a], [], [0],
[-a, -a, -a], exp_polar(-I*pi)*z))) == lerchphi(z, 3, a)
assert hyperexpand(z*hyper([1, 1], [2], z)) == -log(1 + -z)
assert hyperexpand(z*hyper([1, 1, 1], [2, 2], z)) == polylog(2, z)
assert hyperexpand(z*hyper([1, 1, 1, 1], [2, 2, 2], z)) == polylog(3, z)
assert hyperexpand(hyper([1, a, 1 + S(1)/2], [a + 1, S(1)/2], z)) == \
-2*a/(z - 1) + (-2*a**2 + a)*lerchphi(z, 1, a)
# Now numerical tests. These make sure reductions etc are carried out
# correctly
# a rational function (polylog at negative integer order)
assert can_do([2, 2, 2], [1, 1])
# NOTE these contain log(1-x) etc ... better make sure we have |z| < 1
# reduction of order for polylog
assert can_do([1, 1, 1, b + 5], [2, 2, b], div=10)
# reduction of order for lerchphi
# XXX lerchphi in mpmath is flaky
assert can_do(
[1, a, a, a, b + 5], [a + 1, a + 1, a + 1, b], numerical=False)
# test a bug
from sympy import Abs
assert hyperexpand(hyper([S(1)/2, S(1)/2, S(1)/2, 1],
[S(3)/2, S(3)/2, S(3)/2], S(1)/4)) == \
Abs(-polylog(3, exp_polar(I*pi)/2) + polylog(3, S(1)/2))
def test_partial_simp():
# First test that hypergeometric function formulae work.
a, b, c, d, e = (randcplx() for _ in range(5))
for func in [Hyper_Function([a, b, c], [d, e]),
Hyper_Function([], [a, b, c, d, e])]:
f = build_hypergeometric_formula(func)
z = f.z
assert f.closed_form == func(z)
deriv1 = f.B.diff(z)*z
deriv2 = f.M*f.B
for func1, func2 in zip(deriv1, deriv2):
assert tn(func1, func2, z)
# Now test that formulae are partially simplified.
from sympy.abc import a, b, z
assert hyperexpand(hyper([3, a], [1, b], z)) == \
(-a*b/2 + a*z/2 + 2*a)*hyper([a + 1], [b], z) \
+ (a*b/2 - 2*a + 1)*hyper([a], [b], z)
assert tn(
hyperexpand(hyper([3, d], [1, e], z)), hyper([3, d], [1, e], z), z)
assert hyperexpand(hyper([3], [1, a, b], z)) == \
hyper((), (a, b), z) \
+ z*hyper((), (a + 1, b), z)/(2*a) \
- z*(b - 4)*hyper((), (a + 1, b + 1), z)/(2*a*b)
assert tn(
hyperexpand(hyper([3], [1, d, e], z)), hyper([3], [1, d, e], z), z)
def test_hyperexpand_special():
assert hyperexpand(hyper([a, b], [c], 1)) == \
gamma(c)*gamma(c - a - b)/gamma(c - a)/gamma(c - b)
assert hyperexpand(hyper([a, b], [1 + a - b], -1)) == \
gamma(1 + a/2)*gamma(1 + a - b)/gamma(1 + a)/gamma(1 + a/2 - b)
assert hyperexpand(hyper([a, b], [1 + b - a], -1)) == \
gamma(1 + b/2)*gamma(1 + b - a)/gamma(1 + b)/gamma(1 + b/2 - a)
assert hyperexpand(meijerg([1 - z - a/2], [1 - z + a/2], [b/2], [-b/2], 1)) == \
gamma(1 - 2*z)*gamma(z + a/2 + b/2)/gamma(1 - z + a/2 - b/2) \
/gamma(1 - z - a/2 + b/2)/gamma(1 - z + a/2 + b/2)
assert hyperexpand(hyper([a], [b], 0)) == 1
assert hyper([a], [b], 0) != 0
def test_Mod1_behavior():
from sympy import Symbol, simplify, lowergamma
n = Symbol('n', integer=True)
# Note: this should not hang.
assert simplify(hyperexpand(meijerg([1], [], [n + 1], [0], z))) == \
lowergamma(n + 1, z)
@slow
def test_prudnikov_misc():
assert can_do([1, (3 + I)/2, (3 - I)/2], [S(3)/2, 2])
assert can_do([S.Half, a - 1], [S(3)/2, a + 1], lowerplane=True)
assert can_do([], [b + 1])
assert can_do([a], [a - 1, b + 1])
assert can_do([a], [a - S.Half, 2*a])
assert can_do([a], [a - S.Half, 2*a + 1])
assert can_do([a], [a - S.Half, 2*a - 1])
assert can_do([a], [a + S.Half, 2*a])
assert can_do([a], [a + S.Half, 2*a + 1])
assert can_do([a], [a + S.Half, 2*a - 1])
assert can_do([S.Half], [b, 2 - b])
assert can_do([S.Half], [b, 3 - b])
assert can_do([1], [2, b])
assert can_do([a, a + S.Half], [2*a, b, 2*a - b + 1])
assert can_do([a, a + S.Half], [S.Half, 2*a, 2*a + S.Half])
assert can_do([a], [a + 1], lowerplane=True) # lowergamma
@slow
def test_prudnikov_1():
# A. P. Prudnikov, Yu. A. Brychkov and O. I. Marichev (1990).
# Integrals and Series: More Special Functions, Vol. 3,.
# Gordon and Breach Science Publisher
# 7.3.1
assert can_do([a, -a], [S.Half])
assert can_do([a, 1 - a], [S.Half])
assert can_do([a, 1 - a], [S(3)/2])
assert can_do([a, 2 - a], [S.Half])
assert can_do([a, 2 - a], [S(3)/2])
assert can_do([a, 2 - a], [S(3)/2])
assert can_do([a, a + S(1)/2], [2*a - 1])
assert can_do([a, a + S(1)/2], [2*a])
assert can_do([a, a + S(1)/2], [2*a + 1])
assert can_do([a, a + S(1)/2], [S(1)/2])
assert can_do([a, a + S(1)/2], [S(3)/2])
assert can_do([a, a/2 + 1], [a/2])
assert can_do([1, b], [2])
assert can_do([1, b], [b + 1], numerical=False) # Lerch Phi
# NOTE: branches are complicated for |z| > 1
assert can_do([a], [2*a])
assert can_do([a], [2*a + 1])
assert can_do([a], [2*a - 1])
@slow
def test_prudnikov_2():
h = S.Half
assert can_do([-h, -h], [h])
assert can_do([-h, h], [3*h])
assert can_do([-h, h], [5*h])
assert can_do([-h, h], [7*h])
assert can_do([-h, 1], [h])
for p in [-h, h]:
for n in [-h, h, 1, 3*h, 2, 5*h, 3, 7*h, 4]:
for m in [-h, h, 3*h, 5*h, 7*h]:
assert can_do([p, n], [m])
for n in [1, 2, 3, 4]:
for m in [1, 2, 3, 4]:
assert can_do([p, n], [m])
@slow
def test_prudnikov_3():
h = S.Half
assert can_do([S(1)/4, S(3)/4], [h])
assert can_do([S(1)/4, S(3)/4], [3*h])
assert can_do([S(1)/3, S(2)/3], [3*h])
assert can_do([S(3)/4, S(5)/4], [h])
assert can_do([S(3)/4, S(5)/4], [3*h])
for p in [1, 2, 3, 4]:
for n in [-h, h, 1, 3*h, 2, 5*h, 3, 7*h, 4, 9*h]:
for m in [1, 3*h, 2, 5*h, 3, 7*h, 4]:
assert can_do([p, m], [n])
@slow
def test_prudnikov_4():
h = S.Half
for p in [3*h, 5*h, 7*h]:
for n in [-h, h, 3*h, 5*h, 7*h]:
for m in [3*h, 2, 5*h, 3, 7*h, 4]:
assert can_do([p, m], [n])
for n in [1, 2, 3, 4]:
for m in [2, 3, 4]:
assert can_do([p, m], [n])
@slow
def test_prudnikov_5():
h = S.Half
for p in [1, 2, 3]:
for q in range(p, 4):
for r in [1, 2, 3]:
for s in range(r, 4):
assert can_do([-h, p, q], [r, s])
for p in [h, 1, 3*h, 2, 5*h, 3]:
for q in [h, 3*h, 5*h]:
for r in [h, 3*h, 5*h]:
for s in [h, 3*h, 5*h]:
if s <= q and s <= r:
assert can_do([-h, p, q], [r, s])
for p in [h, 1, 3*h, 2, 5*h, 3]:
for q in [1, 2, 3]:
for r in [h, 3*h, 5*h]:
for s in [1, 2, 3]:
assert can_do([-h, p, q], [r, s])
@slow
def test_prudnikov_6():
h = S.Half
for m in [3*h, 5*h]:
for n in [1, 2, 3]:
for q in [h, 1, 2]:
for p in [1, 2, 3]:
assert can_do([h, q, p], [m, n])
for q in [1, 2, 3]:
for p in [3*h, 5*h]:
assert can_do([h, q, p], [m, n])
for q in [1, 2]:
for p in [1, 2, 3]:
for m in [1, 2, 3]:
for n in [1, 2, 3]:
assert can_do([h, q, p], [m, n])
assert can_do([h, h, 5*h], [3*h, 3*h])
assert can_do([h, 1, 5*h], [3*h, 3*h])
assert can_do([h, 2, 2], [1, 3])
# pages 435 to 457 contain more PFDD and stuff like this
@slow
def test_prudnikov_7():
assert can_do([3], [6])
h = S.Half
for n in [h, 3*h, 5*h, 7*h]:
assert can_do([-h], [n])
for m in [-h, h, 1, 3*h, 2, 5*h, 3, 7*h, 4]: # HERE
for n in [-h, h, 3*h, 5*h, 7*h, 1, 2, 3, 4]:
assert can_do([m], [n])
@slow
def test_prudnikov_8():
h = S.Half
# 7.12.2
for a in [1, 2, 3]:
for b in [1, 2, 3]:
for c in range(1, a + 1):
for d in [h, 1, 3*h, 2, 5*h, 3]:
assert can_do([a, b], [c, d])
for b in [3*h, 5*h]:
for c in [h, 1, 3*h, 2, 5*h, 3]:
for d in [1, 2, 3]:
assert can_do([a, b], [c, d])
for a in [-h, h, 3*h, 5*h]:
for b in [1, 2, 3]:
for c in [h, 1, 3*h, 2, 5*h, 3]:
for d in [1, 2, 3]:
assert can_do([a, b], [c, d])
for b in [h, 3*h, 5*h]:
for c in [h, 3*h, 5*h, 3]:
for d in [h, 1, 3*h, 2, 5*h, 3]:
if c <= b:
assert can_do([a, b], [c, d])
def test_prudnikov_9():
# 7.13.1 [we have a general formula ... so this is a bit pointless]
for i in range(9):
assert can_do([], [(S(i) + 1)/2])
for i in range(5):
assert can_do([], [-(2*S(i) + 1)/2])
@slow
def test_prudnikov_10():
# 7.14.2
h = S.Half
for p in [-h, h, 1, 3*h, 2, 5*h, 3, 7*h, 4]:
for m in [1, 2, 3, 4]:
for n in range(m, 5):
assert can_do([p], [m, n])
for p in [1, 2, 3, 4]:
for n in [h, 3*h, 5*h, 7*h]:
for m in [1, 2, 3, 4]:
assert can_do([p], [n, m])
for p in [3*h, 5*h, 7*h]:
for m in [h, 1, 2, 5*h, 3, 7*h, 4]:
assert can_do([p], [h, m])
assert can_do([p], [3*h, m])
for m in [h, 1, 2, 5*h, 3, 7*h, 4]:
assert can_do([7*h], [5*h, m])
assert can_do([-S(1)/2], [S(1)/2, S(1)/2]) # shine-integral shi
def test_prudnikov_11():
# 7.15
assert can_do([a, a + S.Half], [2*a, b, 2*a - b])
assert can_do([a, a + S.Half], [S(3)/2, 2*a, 2*a - S(1)/2])
assert can_do([S(1)/4, S(3)/4], [S(1)/2, S(1)/2, 1])
assert can_do([S(5)/4, S(3)/4], [S(3)/2, S(1)/2, 2])
assert can_do([S(5)/4, S(3)/4], [S(3)/2, S(3)/2, 1])
assert can_do([S(5)/4, S(7)/4], [S(3)/2, S(5)/2, 2])
assert can_do([1, 1], [S(3)/2, 2, 2]) # cosh-integral chi
@slow
def test_prudnikov_12():
# 7.16
assert can_do(
[], [a, a + S.Half, 2*a], False) # branches only agree for some z!
assert can_do([], [a, a + S.Half, 2*a + 1], False) # dito
assert can_do([], [S.Half, a, a + S.Half])
assert can_do([], [S(3)/2, a, a + S.Half])
assert can_do([], [S(1)/4, S(1)/2, S(3)/4])
assert can_do([], [S(1)/2, S(1)/2, 1])
assert can_do([], [S(1)/2, S(3)/2, 1])
assert can_do([], [S(3)/4, S(3)/2, S(5)/4])
assert can_do([], [1, 1, S(3)/2])
assert can_do([], [1, 2, S(3)/2])
assert can_do([], [1, S(3)/2, S(3)/2])
assert can_do([], [S(5)/4, S(3)/2, S(7)/4])
assert can_do([], [2, S(3)/2, S(3)/2])
@slow
def test_prudnikov_2F1():
h = S.Half
# Elliptic integrals
for p in [-h, h]:
for m in [h, 3*h, 5*h, 7*h]:
for n in [1, 2, 3, 4]:
assert can_do([p, m], [n])
@XFAIL
def test_prudnikov_fail_2F1():
assert can_do([a, b], [b + 1]) # incomplete beta function
assert can_do([-1, b], [c]) # Poly. also -2, -3 etc
# TODO polys
# Legendre functions:
assert can_do([a, b], [a + b + S.Half])
assert can_do([a, b], [a + b - S.Half])
assert can_do([a, b], [a + b + S(3)/2])
assert can_do([a, b], [(a + b + 1)/2])
assert can_do([a, b], [(a + b)/2 + 1])
assert can_do([a, b], [a - b + 1])
assert can_do([a, b], [a - b + 2])
assert can_do([a, b], [2*b])
assert can_do([a, b], [S.Half])
assert can_do([a, b], [S(3)/2])
assert can_do([a, 1 - a], [c])
assert can_do([a, 2 - a], [c])
assert can_do([a, 3 - a], [c])
assert can_do([a, a + S(1)/2], [c])
assert can_do([1, b], [c])
assert can_do([1, b], [S(3)/2])
assert can_do([S(1)/4, S(3)/4], [1])
# PFDD
o = S(1)
assert can_do([o/8, 1], [o/8*9])
assert can_do([o/6, 1], [o/6*7])
assert can_do([o/6, 1], [o/6*13])
assert can_do([o/5, 1], [o/5*6])
assert can_do([o/5, 1], [o/5*11])
assert can_do([o/4, 1], [o/4*5])
assert can_do([o/4, 1], [o/4*9])
assert can_do([o/3, 1], [o/3*4])
assert can_do([o/3, 1], [o/3*7])
assert can_do([o/8*3, 1], [o/8*11])
assert can_do([o/5*2, 1], [o/5*7])
assert can_do([o/5*2, 1], [o/5*12])
assert can_do([o/5*3, 1], [o/5*8])
assert can_do([o/5*3, 1], [o/5*13])
assert can_do([o/8*5, 1], [o/8*13])
assert can_do([o/4*3, 1], [o/4*7])
assert can_do([o/4*3, 1], [o/4*11])
assert can_do([o/3*2, 1], [o/3*5])
assert can_do([o/3*2, 1], [o/3*8])
assert can_do([o/5*4, 1], [o/5*9])
assert can_do([o/5*4, 1], [o/5*14])
assert can_do([o/6*5, 1], [o/6*11])
assert can_do([o/6*5, 1], [o/6*17])
assert can_do([o/8*7, 1], [o/8*15])
@XFAIL
def test_prudnikov_fail_3F2():
assert can_do([a, a + S(1)/3, a + S(2)/3], [S(1)/3, S(2)/3])
assert can_do([a, a + S(1)/3, a + S(2)/3], [S(2)/3, S(4)/3])
assert can_do([a, a + S(1)/3, a + S(2)/3], [S(4)/3, S(5)/3])
# page 421
assert can_do([a, a + S(1)/3, a + S(2)/3], [3*a/2, (3*a + 1)/2])
# pages 422 ...
assert can_do([-S.Half, S.Half, S.Half], [1, 1]) # elliptic integrals
assert can_do([-S.Half, S.Half, 1], [S(3)/2, S(3)/2])
# TODO LOTS more
# PFDD
assert can_do([S(1)/8, S(3)/8, 1], [S(9)/8, S(11)/8])
assert can_do([S(1)/8, S(5)/8, 1], [S(9)/8, S(13)/8])
assert can_do([S(1)/8, S(7)/8, 1], [S(9)/8, S(15)/8])
assert can_do([S(1)/6, S(1)/3, 1], [S(7)/6, S(4)/3])
assert can_do([S(1)/6, S(2)/3, 1], [S(7)/6, S(5)/3])
assert can_do([S(1)/6, S(2)/3, 1], [S(5)/3, S(13)/6])
assert can_do([S.Half, 1, 1], [S(1)/4, S(3)/4])
# LOTS more
@XFAIL
def test_prudnikov_fail_other():
# 7.11.2
# 7.12.1
assert can_do([1, a], [b, 1 - 2*a + b]) # ???
# 7.14.2
assert can_do([-S(1)/2], [S(1)/2, 1]) # struve
assert can_do([1], [S(1)/2, S(1)/2]) # struve
assert can_do([S(1)/4], [S(1)/2, S(5)/4]) # PFDD
assert can_do([S(3)/4], [S(3)/2, S(7)/4]) # PFDD
assert can_do([1], [S(1)/4, S(3)/4]) # PFDD
assert can_do([1], [S(3)/4, S(5)/4]) # PFDD
assert can_do([1], [S(5)/4, S(7)/4]) # PFDD
# TODO LOTS more
# 7.15.2
assert can_do([S(1)/2, 1], [S(3)/4, S(5)/4, S(3)/2]) # PFDD
assert can_do([S(1)/2, 1], [S(7)/4, S(5)/4, S(3)/2]) # PFDD
# 7.16.1
assert can_do([], [S(1)/3, S(2/3)]) # PFDD
assert can_do([], [S(2)/3, S(4/3)]) # PFDD
assert can_do([], [S(5)/3, S(4/3)]) # PFDD
# XXX this does not *evaluate* right??
assert can_do([], [a, a + S.Half, 2*a - 1])
def test_bug():
h = hyper([-1, 1], [z], -1)
assert hyperexpand(h) == (z + 1)/z
def test_omgissue_203():
h = hyper((-5, -3, -4), (-6, -6), 1)
assert hyperexpand(h) == Rational(1, 30)
h = hyper((-6, -7, -5), (-6, -6), 1)
assert hyperexpand(h) == -Rational(1, 6)
| gpl-3.0 |
erikdvoss/tiy-project | bitcoin_trade/trade_engine/models.py | 3 | 12895 | from django.contrib.auth.models import User
from django.db.models.signals import post_save
from trade_engine.custom_models import SeparatedValuesField
from django.dispatch import receiver
from django.db import models
import jsonfield
import json
import http.client
import urllib.request, urllib.parse, urllib.error
import hashlib
import hmac
import time
class UserAccount(models.Model):
user = models.OneToOneField(User, related_name='UserAccount')
api_key = models.CharField(max_length=100)
secret = models.CharField(max_length=100)
def __str__(self):
return "{}, {}".format(self.api_key, self.secret)
class DepositAddress(models.Model):
user = models.ForeignKey(User, related_name='UserAddress')
address = models.CharField(max_length=35)
def __str__(self):
return "{}".format(self.address)
class WithdrawCoin(models.Model):
user = models.ForeignKey(User)
coin = models.CharField(max_length=3, default="BTC")
amount = models.DecimalField(max_digits=10, decimal_places=8)
address = models.CharField(max_length=35)
@receiver(post_save, sender=WithdrawCoin)
def withdraw_handler(sender, instance, **kwargs):
useraccount = instance.user.UserAccount
api_key = useraccount.api_key
secret = useraccount.secret.encode()
user = instance.user
nonce = str(time.time()).split('.')[0]
parms = {"method": "WithdrawCoin",
"coinName": instance.coin,
"amount": instance.amount,
"address": instance.address,
"nonce": nonce}
parms = urllib.parse.urlencode(parms)
hashed = hmac.new(secret, digestmod=hashlib.sha512)
parms = parms.encode()
hashed.update(parms)
signature = hashed.hexdigest()
headers = {"Content-type": "application/x-www-form-urlencoded",
"Key": api_key,
"Sign": signature}
conn = http.client.HTTPSConnection("btc-e.com")
conn.request("POST", "/tapi", parms, headers)
response = conn.getresponse().read()
response = response.decode('latin-1')
response = json.loads(response)
usd = response['return']['funds']['usd']
btc = response['return']['funds']['btc']
object = {'user': user, 'usd': usd, 'btc': btc}
BalanceTicker.objects.create(**object)
tid = response['return']['tId']
amount = response['return']['amountSent']
dict = {'tid': tid, 'amount': amount}
WithdrawTicker.objects.create(**dict)
class WithdrawTicker(models.Model):
user = models.ForeignKey(User)
tid = models.IntegerField()
amount = models.DecimalField(max_digits=10, decimal_places=8)
def __str__(self):
return "{}, {}".format(self.tid, self.amount)
class Balance(models.Model):
user = models.ForeignKey(User)
@receiver(post_save, sender=Balance)
def balance_handler(sender, instance, **kwargs):
useraccount = instance.user.UserAccount
api_key = useraccount.api_key
secret = useraccount.secret.encode()
user = instance.user
nonce = str(time.time()).split('.')[0]
parms = {"method": "getInfo",
"nonce": nonce}
parms = urllib.parse.urlencode(parms)
hashed = hmac.new(secret, digestmod=hashlib.sha512)
parms = parms.encode()
hashed.update(parms)
signature = hashed.hexdigest()
headers = {"Content-type": "application/x-www-form-urlencoded",
"Key": api_key,
"Sign": signature}
conn = http.client.HTTPSConnection("btc-e.com")
conn.request("POST", "/tapi", parms, headers)
response = conn.getresponse().read()
response = response.decode('latin-1')
response = json.loads(response)
usd = response['return']['funds']['usd']
btc = response['return']['funds']['btc']
object = {'user': user, 'usd': usd, 'btc': btc}
BalanceTicker.objects.create(**object)
class BalanceTicker(models.Model):
user = models.ForeignKey(User, related_name='UserBalance')
usd = models.DecimalField(max_digits=13, decimal_places=8)
btc = models.DecimalField(max_digits=10, decimal_places=8)
def __str__(self):
return "{}, {}".format(self.usd, self.btc)
class ActiveOrder(models.Model):
user = models.ForeignKey(User)
@receiver(post_save, sender=ActiveOrder)
def active_order_handler(sender, instance, **kwargs):
useraccount = instance.user.UserAccount
api_key = useraccount.api_key
secret = useraccount.secret.encode()
user = instance.user
nonce = str(time.time()).split('.')[0]
parms = {"method": "ActiveOrders",
"pair": "btc_usd",
"nonce": nonce}
parms = urllib.parse.urlencode(parms)
hashed = hmac.new(secret, digestmod=hashlib.sha512)
parms = parms.encode()
hashed.update(parms)
signature = hashed.hexdigest()
headers = {"Content-type": "application/x-www-form-urlencoded",
"Key": api_key,
"Sign": signature}
conn = http.client.HTTPSConnection("btc-e.com")
conn.request("POST", "/tapi", parms, headers)
response = conn.getresponse().read()
response = response.decode('latin-1')
response = json.loads(response)
object = {'user': user, 'json': response}
ActiveOrderTicker.objects.create(**object)
class ActiveOrderTicker(models.Model):
user = models.ForeignKey(User, related_name='UserActiveOrders')
json = jsonfield.JSONField()
def __str__(self):
return "{}".format(self.json)
@property
def split_json(self):
loads = self.json
if loads['success'] == 0:
return ["no open orders"]
elif loads['success'] == 1:
x = loads['return']
y = {k: k for k in x}
z = list(y.values())
return z
types = [('buy', 'buy'), ('sell', 'sell')]
class Trade(models.Model):
user = models.ForeignKey(User)
pair = models.CharField(max_length=8, default="btc_usd")
type = models.CharField(max_length=4, choices=types)
rate = models.DecimalField(max_digits=13, decimal_places=8)
amount = models.DecimalField(max_digits=10, decimal_places=8)
@receiver(post_save, sender=Trade)
def trade_handler(sender, instance, **kwargs):
useraccount = instance.user.UserAccount
api_key = useraccount.api_key
secret = useraccount.secret.encode()
user = instance.user
nonce = str(time.time()).split('.')[0]
parms = {"method": "Trade",
"pair": instance.pair,
"type": instance.type,
"rate": instance.rate,
"amount": instance.amount,
"nonce": nonce}
parms = urllib.parse.urlencode(parms)
hashed = hmac.new(secret, digestmod=hashlib.sha512)
parms = parms.encode()
hashed.update(parms)
signature = hashed.hexdigest()
headers = {"Content-type": "application/x-www-form-urlencoded",
"Key": api_key,
"Sign": signature}
conn = http.client.HTTPSConnection("btc-e.com")
conn.request("POST", "/tapi", parms, headers)
response = conn.getresponse().read()
response = response.decode('latin-1')
response = json.loads(response)
object = {'user': user, 'json': response}
TradeTicker.objects.create(**object)
class TradeTicker(models.Model):
user = models.ForeignKey(User)
json = jsonfield.JSONField()
def __str__(self):
return self.json
class CancelOrder(models.Model):
user = models.ForeignKey(User)
order_id = models.IntegerField()
@receiver(post_save, sender=CancelOrder)
def cancel_order_handler(sender, instance, **kwargs):
useraccount = instance.user.UserAccount
api_key = useraccount.api_key
secret = useraccount.secret.encode()
user = instance.user
nonce = str(time.time()).split('.')[0]
parms = {"method": "CancelOrder",
"order_id": instance.order_id,
"nonce": nonce}
parms = urllib.parse.urlencode(parms)
hashed = hmac.new(secret, digestmod=hashlib.sha512)
parms = parms.encode()
hashed.update(parms)
signature = hashed.hexdigest()
headers = {"Content-type": "application/x-www-form-urlencoded",
"Key": api_key,
"Sign": signature}
conn = http.client.HTTPSConnection("btc-e.com")
conn.request("POST", "/tapi", parms, headers)
response = conn.getresponse().read()
response = response.decode('latin-1')
response = json.loads(response)
object = {'user': user, 'json': response}
CancelOrderTicker.objects.create(**object)
class CancelOrderTicker(models.Model):
user = models.ForeignKey(User)
json = jsonfield.JSONField()
def __str__(self):
return self.json
class TradeHistory(models.Model):
user = models.ForeignKey(User)
_From = models.IntegerField(default=0)
count = models.IntegerField(default=1000)
order = models.CharField(max_length=4, default="DESC")
pair = models.CharField(max_length=7, default="btc_usd")
@receiver(post_save, sender=TradeHistory)
def trade_history_handler(sender, instance, **kwargs):
useraccount = instance.user.UserAccount
api_key = useraccount.api_key
secret = useraccount.secret.encode()
user = instance.user
nonce = str(time.time()).split('.')[0]
parms = {"method": "TradeHistory",
"user": instance.user,
"from": instance._From,
"count": instance.count,
"order": instance.order,
"pair": instance.pair,
"nonce": nonce}
parms = urllib.parse.urlencode(parms)
hashed = hmac.new(secret, digestmod=hashlib.sha512)
parms = parms.encode()
hashed.update(parms)
signature = hashed.hexdigest()
headers = {"Content-type": "application/x-www-form-urlencoded",
"Key": api_key,
"Sign": signature}
conn = http.client.HTTPSConnection("btc-e.com")
conn.request("POST", "/tapi", parms, headers)
response = conn.getresponse().read()
response = response.decode('latin-1')
response = json.loads(response)
object = {'user': user, 'json': response}
TradeHistoryTicker.objects.create(**object)
class TradeHistoryTicker(models.Model):
user = models.ForeignKey(User)
json = jsonfield.JSONField()
class TransHistory(models.Model):
user = models.ForeignKey(User)
_From = models.IntegerField(default=0)
count = models.IntegerField(default=1000)
order = models.CharField(max_length=4, default="DESC")
@receiver(post_save, sender=TransHistory)
def trans_history_handler(sender, instance, **kwargs):
useraccount = instance.user.UserAccount
api_key = useraccount.api_key
secret = useraccount.secret.encode()
user = instance.user
nonce = str(time.time()).split('.')[0]
parms = {"method": "TransHistory",
"user": instance.user,
"from": instance._From,
"count": instance.count,
"order": instance.order,
"nonce": nonce}
parms = urllib.parse.urlencode(parms)
hashed = hmac.new(secret, digestmod=hashlib.sha512)
parms = parms.encode()
hashed.update(parms)
signature = hashed.hexdigest()
headers = {"Content-type": "application/x-www-form-urlencoded",
"Key": api_key,
"Sign": signature}
conn = http.client.HTTPSConnection("btc-e.com")
conn.request("POST", "/tapi", parms, headers)
response = conn.getresponse().read()
response = response.decode('latin-1')
response = json.loads(response)
object = {'user': user, 'json': response}
TransHistoryTicker.objects.create(**object)
class TransHistoryTicker(models.Model):
user = models.ForeignKey(User)
json = jsonfield.JSONField()
class Ticker(models.Model):
high = models.DecimalField(max_digits=6, decimal_places=2)
low = models.DecimalField(max_digits=6, decimal_places=2)
avg = models.DecimalField(max_digits=6, decimal_places=2)
vol = models.DecimalField(max_digits=20, decimal_places=2)
vol_cur = models.DecimalField(max_digits=9, decimal_places=2)
server_time = models.IntegerField()
last = models.DecimalField(max_digits=6, decimal_places=2)
buy = models.DecimalField(max_digits=6, decimal_places=2)
sell = models.DecimalField(max_digits=6, decimal_places=2)
updated = models.IntegerField()
def __str__(self):
return "{}, {}, {}, {}, {}, {}, {}, {}, {}, {}".format(self.high, self.low, self.avg, self.vol, self.vol_cur, self.server_time, self.last, self.buy, self.sell, self.updated)
class Depth(models.Model):
asks = SeparatedValuesField()
bids = SeparatedValuesField()
def __str__(self):
return "{}, {}".format(self.asks, self.bids)
@property
def split_bids(self):
x = self.bids[1:]
y = x[:-1]
z = list(y.split("],["))
return z
@property
def split_asks(self):
x = self.asks[1:]
y = x[:-1]
z = list(y.split("],["))
return z
| gpl-3.0 |
ephes/scikit-learn | sklearn/feature_extraction/tests/test_text.py | 110 | 34127 | from __future__ import unicode_literals
import warnings
from sklearn.feature_extraction.text import strip_tags
from sklearn.feature_extraction.text import strip_accents_unicode
from sklearn.feature_extraction.text import strip_accents_ascii
from sklearn.feature_extraction.text import HashingVectorizer
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.feature_extraction.text import ENGLISH_STOP_WORDS
from sklearn.cross_validation import train_test_split
from sklearn.cross_validation import cross_val_score
from sklearn.grid_search import GridSearchCV
from sklearn.pipeline import Pipeline
from sklearn.svm import LinearSVC
from sklearn.base import clone
import numpy as np
from nose import SkipTest
from nose.tools import assert_equal
from nose.tools import assert_false
from nose.tools import assert_not_equal
from nose.tools import assert_true
from nose.tools import assert_almost_equal
from numpy.testing import assert_array_almost_equal
from numpy.testing import assert_array_equal
from numpy.testing import assert_raises
from sklearn.utils.testing import (assert_in, assert_less, assert_greater,
assert_warns_message, assert_raise_message,
clean_warning_registry)
from collections import defaultdict, Mapping
from functools import partial
import pickle
from io import StringIO
JUNK_FOOD_DOCS = (
"the pizza pizza beer copyright",
"the pizza burger beer copyright",
"the the pizza beer beer copyright",
"the burger beer beer copyright",
"the coke burger coke copyright",
"the coke burger burger",
)
NOTJUNK_FOOD_DOCS = (
"the salad celeri copyright",
"the salad salad sparkling water copyright",
"the the celeri celeri copyright",
"the tomato tomato salad water",
"the tomato salad water copyright",
)
ALL_FOOD_DOCS = JUNK_FOOD_DOCS + NOTJUNK_FOOD_DOCS
def uppercase(s):
return strip_accents_unicode(s).upper()
def strip_eacute(s):
return s.replace('\xe9', 'e')
def split_tokenize(s):
return s.split()
def lazy_analyze(s):
return ['the_ultimate_feature']
def test_strip_accents():
# check some classical latin accentuated symbols
a = '\xe0\xe1\xe2\xe3\xe4\xe5\xe7\xe8\xe9\xea\xeb'
expected = 'aaaaaaceeee'
assert_equal(strip_accents_unicode(a), expected)
a = '\xec\xed\xee\xef\xf1\xf2\xf3\xf4\xf5\xf6\xf9\xfa\xfb\xfc\xfd'
expected = 'iiiinooooouuuuy'
assert_equal(strip_accents_unicode(a), expected)
# check some arabic
a = '\u0625' # halef with a hamza below
expected = '\u0627' # simple halef
assert_equal(strip_accents_unicode(a), expected)
# mix letters accentuated and not
a = "this is \xe0 test"
expected = 'this is a test'
assert_equal(strip_accents_unicode(a), expected)
def test_to_ascii():
# check some classical latin accentuated symbols
a = '\xe0\xe1\xe2\xe3\xe4\xe5\xe7\xe8\xe9\xea\xeb'
expected = 'aaaaaaceeee'
assert_equal(strip_accents_ascii(a), expected)
a = '\xec\xed\xee\xef\xf1\xf2\xf3\xf4\xf5\xf6\xf9\xfa\xfb\xfc\xfd'
expected = 'iiiinooooouuuuy'
assert_equal(strip_accents_ascii(a), expected)
# check some arabic
a = '\u0625' # halef with a hamza below
expected = '' # halef has no direct ascii match
assert_equal(strip_accents_ascii(a), expected)
# mix letters accentuated and not
a = "this is \xe0 test"
expected = 'this is a test'
assert_equal(strip_accents_ascii(a), expected)
def test_word_analyzer_unigrams():
for Vectorizer in (CountVectorizer, HashingVectorizer):
wa = Vectorizer(strip_accents='ascii').build_analyzer()
text = ("J'ai mang\xe9 du kangourou ce midi, "
"c'\xe9tait pas tr\xeas bon.")
expected = ['ai', 'mange', 'du', 'kangourou', 'ce', 'midi',
'etait', 'pas', 'tres', 'bon']
assert_equal(wa(text), expected)
text = "This is a test, really.\n\n I met Harry yesterday."
expected = ['this', 'is', 'test', 'really', 'met', 'harry',
'yesterday']
assert_equal(wa(text), expected)
wa = Vectorizer(input='file').build_analyzer()
text = StringIO("This is a test with a file-like object!")
expected = ['this', 'is', 'test', 'with', 'file', 'like',
'object']
assert_equal(wa(text), expected)
# with custom preprocessor
wa = Vectorizer(preprocessor=uppercase).build_analyzer()
text = ("J'ai mang\xe9 du kangourou ce midi, "
" c'\xe9tait pas tr\xeas bon.")
expected = ['AI', 'MANGE', 'DU', 'KANGOUROU', 'CE', 'MIDI',
'ETAIT', 'PAS', 'TRES', 'BON']
assert_equal(wa(text), expected)
# with custom tokenizer
wa = Vectorizer(tokenizer=split_tokenize,
strip_accents='ascii').build_analyzer()
text = ("J'ai mang\xe9 du kangourou ce midi, "
"c'\xe9tait pas tr\xeas bon.")
expected = ["j'ai", 'mange', 'du', 'kangourou', 'ce', 'midi,',
"c'etait", 'pas', 'tres', 'bon.']
assert_equal(wa(text), expected)
def test_word_analyzer_unigrams_and_bigrams():
wa = CountVectorizer(analyzer="word", strip_accents='unicode',
ngram_range=(1, 2)).build_analyzer()
text = "J'ai mang\xe9 du kangourou ce midi, c'\xe9tait pas tr\xeas bon."
expected = ['ai', 'mange', 'du', 'kangourou', 'ce', 'midi',
'etait', 'pas', 'tres', 'bon', 'ai mange', 'mange du',
'du kangourou', 'kangourou ce', 'ce midi', 'midi etait',
'etait pas', 'pas tres', 'tres bon']
assert_equal(wa(text), expected)
def test_unicode_decode_error():
# decode_error default to strict, so this should fail
# First, encode (as bytes) a unicode string.
text = "J'ai mang\xe9 du kangourou ce midi, c'\xe9tait pas tr\xeas bon."
text_bytes = text.encode('utf-8')
# Then let the Analyzer try to decode it as ascii. It should fail,
# because we have given it an incorrect encoding.
wa = CountVectorizer(ngram_range=(1, 2), encoding='ascii').build_analyzer()
assert_raises(UnicodeDecodeError, wa, text_bytes)
ca = CountVectorizer(analyzer='char', ngram_range=(3, 6),
encoding='ascii').build_analyzer()
assert_raises(UnicodeDecodeError, ca, text_bytes)
def test_char_ngram_analyzer():
cnga = CountVectorizer(analyzer='char', strip_accents='unicode',
ngram_range=(3, 6)).build_analyzer()
text = "J'ai mang\xe9 du kangourou ce midi, c'\xe9tait pas tr\xeas bon"
expected = ["j'a", "'ai", 'ai ', 'i m', ' ma']
assert_equal(cnga(text)[:5], expected)
expected = ['s tres', ' tres ', 'tres b', 'res bo', 'es bon']
assert_equal(cnga(text)[-5:], expected)
text = "This \n\tis a test, really.\n\n I met Harry yesterday"
expected = ['thi', 'his', 'is ', 's i', ' is']
assert_equal(cnga(text)[:5], expected)
expected = [' yeste', 'yester', 'esterd', 'sterda', 'terday']
assert_equal(cnga(text)[-5:], expected)
cnga = CountVectorizer(input='file', analyzer='char',
ngram_range=(3, 6)).build_analyzer()
text = StringIO("This is a test with a file-like object!")
expected = ['thi', 'his', 'is ', 's i', ' is']
assert_equal(cnga(text)[:5], expected)
def test_char_wb_ngram_analyzer():
cnga = CountVectorizer(analyzer='char_wb', strip_accents='unicode',
ngram_range=(3, 6)).build_analyzer()
text = "This \n\tis a test, really.\n\n I met Harry yesterday"
expected = [' th', 'thi', 'his', 'is ', ' thi']
assert_equal(cnga(text)[:5], expected)
expected = ['yester', 'esterd', 'sterda', 'terday', 'erday ']
assert_equal(cnga(text)[-5:], expected)
cnga = CountVectorizer(input='file', analyzer='char_wb',
ngram_range=(3, 6)).build_analyzer()
text = StringIO("A test with a file-like object!")
expected = [' a ', ' te', 'tes', 'est', 'st ', ' tes']
assert_equal(cnga(text)[:6], expected)
def test_countvectorizer_custom_vocabulary():
vocab = {"pizza": 0, "beer": 1}
terms = set(vocab.keys())
# Try a few of the supported types.
for typ in [dict, list, iter, partial(defaultdict, int)]:
v = typ(vocab)
vect = CountVectorizer(vocabulary=v)
vect.fit(JUNK_FOOD_DOCS)
if isinstance(v, Mapping):
assert_equal(vect.vocabulary_, vocab)
else:
assert_equal(set(vect.vocabulary_), terms)
X = vect.transform(JUNK_FOOD_DOCS)
assert_equal(X.shape[1], len(terms))
def test_countvectorizer_custom_vocabulary_pipeline():
what_we_like = ["pizza", "beer"]
pipe = Pipeline([
('count', CountVectorizer(vocabulary=what_we_like)),
('tfidf', TfidfTransformer())])
X = pipe.fit_transform(ALL_FOOD_DOCS)
assert_equal(set(pipe.named_steps['count'].vocabulary_),
set(what_we_like))
assert_equal(X.shape[1], len(what_we_like))
def test_countvectorizer_custom_vocabulary_repeated_indeces():
vocab = {"pizza": 0, "beer": 0}
try:
CountVectorizer(vocabulary=vocab)
except ValueError as e:
assert_in("vocabulary contains repeated indices", str(e).lower())
def test_countvectorizer_custom_vocabulary_gap_index():
vocab = {"pizza": 1, "beer": 2}
try:
CountVectorizer(vocabulary=vocab)
except ValueError as e:
assert_in("doesn't contain index", str(e).lower())
def test_countvectorizer_stop_words():
cv = CountVectorizer()
cv.set_params(stop_words='english')
assert_equal(cv.get_stop_words(), ENGLISH_STOP_WORDS)
cv.set_params(stop_words='_bad_str_stop_')
assert_raises(ValueError, cv.get_stop_words)
cv.set_params(stop_words='_bad_unicode_stop_')
assert_raises(ValueError, cv.get_stop_words)
stoplist = ['some', 'other', 'words']
cv.set_params(stop_words=stoplist)
assert_equal(cv.get_stop_words(), set(stoplist))
def test_countvectorizer_empty_vocabulary():
try:
vect = CountVectorizer(vocabulary=[])
vect.fit(["foo"])
assert False, "we shouldn't get here"
except ValueError as e:
assert_in("empty vocabulary", str(e).lower())
try:
v = CountVectorizer(max_df=1.0, stop_words="english")
# fit on stopwords only
v.fit(["to be or not to be", "and me too", "and so do you"])
assert False, "we shouldn't get here"
except ValueError as e:
assert_in("empty vocabulary", str(e).lower())
def test_fit_countvectorizer_twice():
cv = CountVectorizer()
X1 = cv.fit_transform(ALL_FOOD_DOCS[:5])
X2 = cv.fit_transform(ALL_FOOD_DOCS[5:])
assert_not_equal(X1.shape[1], X2.shape[1])
def test_tf_idf_smoothing():
X = [[1, 1, 1],
[1, 1, 0],
[1, 0, 0]]
tr = TfidfTransformer(smooth_idf=True, norm='l2')
tfidf = tr.fit_transform(X).toarray()
assert_true((tfidf >= 0).all())
# check normalization
assert_array_almost_equal((tfidf ** 2).sum(axis=1), [1., 1., 1.])
# this is robust to features with only zeros
X = [[1, 1, 0],
[1, 1, 0],
[1, 0, 0]]
tr = TfidfTransformer(smooth_idf=True, norm='l2')
tfidf = tr.fit_transform(X).toarray()
assert_true((tfidf >= 0).all())
def test_tfidf_no_smoothing():
X = [[1, 1, 1],
[1, 1, 0],
[1, 0, 0]]
tr = TfidfTransformer(smooth_idf=False, norm='l2')
tfidf = tr.fit_transform(X).toarray()
assert_true((tfidf >= 0).all())
# check normalization
assert_array_almost_equal((tfidf ** 2).sum(axis=1), [1., 1., 1.])
# the lack of smoothing make IDF fragile in the presence of feature with
# only zeros
X = [[1, 1, 0],
[1, 1, 0],
[1, 0, 0]]
tr = TfidfTransformer(smooth_idf=False, norm='l2')
clean_warning_registry()
with warnings.catch_warnings(record=True) as w:
1. / np.array([0.])
numpy_provides_div0_warning = len(w) == 1
in_warning_message = 'divide by zero'
tfidf = assert_warns_message(RuntimeWarning, in_warning_message,
tr.fit_transform, X).toarray()
if not numpy_provides_div0_warning:
raise SkipTest("Numpy does not provide div 0 warnings.")
def test_sublinear_tf():
X = [[1], [2], [3]]
tr = TfidfTransformer(sublinear_tf=True, use_idf=False, norm=None)
tfidf = tr.fit_transform(X).toarray()
assert_equal(tfidf[0], 1)
assert_greater(tfidf[1], tfidf[0])
assert_greater(tfidf[2], tfidf[1])
assert_less(tfidf[1], 2)
assert_less(tfidf[2], 3)
def test_vectorizer():
# raw documents as an iterator
train_data = iter(ALL_FOOD_DOCS[:-1])
test_data = [ALL_FOOD_DOCS[-1]]
n_train = len(ALL_FOOD_DOCS) - 1
# test without vocabulary
v1 = CountVectorizer(max_df=0.5)
counts_train = v1.fit_transform(train_data)
if hasattr(counts_train, 'tocsr'):
counts_train = counts_train.tocsr()
assert_equal(counts_train[0, v1.vocabulary_["pizza"]], 2)
# build a vectorizer v1 with the same vocabulary as the one fitted by v1
v2 = CountVectorizer(vocabulary=v1.vocabulary_)
# compare that the two vectorizer give the same output on the test sample
for v in (v1, v2):
counts_test = v.transform(test_data)
if hasattr(counts_test, 'tocsr'):
counts_test = counts_test.tocsr()
vocabulary = v.vocabulary_
assert_equal(counts_test[0, vocabulary["salad"]], 1)
assert_equal(counts_test[0, vocabulary["tomato"]], 1)
assert_equal(counts_test[0, vocabulary["water"]], 1)
# stop word from the fixed list
assert_false("the" in vocabulary)
# stop word found automatically by the vectorizer DF thresholding
# words that are high frequent across the complete corpus are likely
# to be not informative (either real stop words of extraction
# artifacts)
assert_false("copyright" in vocabulary)
# not present in the sample
assert_equal(counts_test[0, vocabulary["coke"]], 0)
assert_equal(counts_test[0, vocabulary["burger"]], 0)
assert_equal(counts_test[0, vocabulary["beer"]], 0)
assert_equal(counts_test[0, vocabulary["pizza"]], 0)
# test tf-idf
t1 = TfidfTransformer(norm='l1')
tfidf = t1.fit(counts_train).transform(counts_train).toarray()
assert_equal(len(t1.idf_), len(v1.vocabulary_))
assert_equal(tfidf.shape, (n_train, len(v1.vocabulary_)))
# test tf-idf with new data
tfidf_test = t1.transform(counts_test).toarray()
assert_equal(tfidf_test.shape, (len(test_data), len(v1.vocabulary_)))
# test tf alone
t2 = TfidfTransformer(norm='l1', use_idf=False)
tf = t2.fit(counts_train).transform(counts_train).toarray()
assert_equal(t2.idf_, None)
# test idf transform with unlearned idf vector
t3 = TfidfTransformer(use_idf=True)
assert_raises(ValueError, t3.transform, counts_train)
# test idf transform with incompatible n_features
X = [[1, 1, 5],
[1, 1, 0]]
t3.fit(X)
X_incompt = [[1, 3],
[1, 3]]
assert_raises(ValueError, t3.transform, X_incompt)
# L1-normalized term frequencies sum to one
assert_array_almost_equal(np.sum(tf, axis=1), [1.0] * n_train)
# test the direct tfidf vectorizer
# (equivalent to term count vectorizer + tfidf transformer)
train_data = iter(ALL_FOOD_DOCS[:-1])
tv = TfidfVectorizer(norm='l1')
tv.max_df = v1.max_df
tfidf2 = tv.fit_transform(train_data).toarray()
assert_false(tv.fixed_vocabulary_)
assert_array_almost_equal(tfidf, tfidf2)
# test the direct tfidf vectorizer with new data
tfidf_test2 = tv.transform(test_data).toarray()
assert_array_almost_equal(tfidf_test, tfidf_test2)
# test transform on unfitted vectorizer with empty vocabulary
v3 = CountVectorizer(vocabulary=None)
assert_raises(ValueError, v3.transform, train_data)
# ascii preprocessor?
v3.set_params(strip_accents='ascii', lowercase=False)
assert_equal(v3.build_preprocessor(), strip_accents_ascii)
# error on bad strip_accents param
v3.set_params(strip_accents='_gabbledegook_', preprocessor=None)
assert_raises(ValueError, v3.build_preprocessor)
# error with bad analyzer type
v3.set_params = '_invalid_analyzer_type_'
assert_raises(ValueError, v3.build_analyzer)
def test_tfidf_vectorizer_setters():
tv = TfidfVectorizer(norm='l2', use_idf=False, smooth_idf=False,
sublinear_tf=False)
tv.norm = 'l1'
assert_equal(tv._tfidf.norm, 'l1')
tv.use_idf = True
assert_true(tv._tfidf.use_idf)
tv.smooth_idf = True
assert_true(tv._tfidf.smooth_idf)
tv.sublinear_tf = True
assert_true(tv._tfidf.sublinear_tf)
def test_hashing_vectorizer():
v = HashingVectorizer()
X = v.transform(ALL_FOOD_DOCS)
token_nnz = X.nnz
assert_equal(X.shape, (len(ALL_FOOD_DOCS), v.n_features))
assert_equal(X.dtype, v.dtype)
# By default the hashed values receive a random sign and l2 normalization
# makes the feature values bounded
assert_true(np.min(X.data) > -1)
assert_true(np.min(X.data) < 0)
assert_true(np.max(X.data) > 0)
assert_true(np.max(X.data) < 1)
# Check that the rows are normalized
for i in range(X.shape[0]):
assert_almost_equal(np.linalg.norm(X[0].data, 2), 1.0)
# Check vectorization with some non-default parameters
v = HashingVectorizer(ngram_range=(1, 2), non_negative=True, norm='l1')
X = v.transform(ALL_FOOD_DOCS)
assert_equal(X.shape, (len(ALL_FOOD_DOCS), v.n_features))
assert_equal(X.dtype, v.dtype)
# ngrams generate more non zeros
ngrams_nnz = X.nnz
assert_true(ngrams_nnz > token_nnz)
assert_true(ngrams_nnz < 2 * token_nnz)
# makes the feature values bounded
assert_true(np.min(X.data) > 0)
assert_true(np.max(X.data) < 1)
# Check that the rows are normalized
for i in range(X.shape[0]):
assert_almost_equal(np.linalg.norm(X[0].data, 1), 1.0)
def test_feature_names():
cv = CountVectorizer(max_df=0.5)
# test for Value error on unfitted/empty vocabulary
assert_raises(ValueError, cv.get_feature_names)
X = cv.fit_transform(ALL_FOOD_DOCS)
n_samples, n_features = X.shape
assert_equal(len(cv.vocabulary_), n_features)
feature_names = cv.get_feature_names()
assert_equal(len(feature_names), n_features)
assert_array_equal(['beer', 'burger', 'celeri', 'coke', 'pizza',
'salad', 'sparkling', 'tomato', 'water'],
feature_names)
for idx, name in enumerate(feature_names):
assert_equal(idx, cv.vocabulary_.get(name))
def test_vectorizer_max_features():
vec_factories = (
CountVectorizer,
TfidfVectorizer,
)
expected_vocabulary = set(['burger', 'beer', 'salad', 'pizza'])
expected_stop_words = set([u'celeri', u'tomato', u'copyright', u'coke',
u'sparkling', u'water', u'the'])
for vec_factory in vec_factories:
# test bounded number of extracted features
vectorizer = vec_factory(max_df=0.6, max_features=4)
vectorizer.fit(ALL_FOOD_DOCS)
assert_equal(set(vectorizer.vocabulary_), expected_vocabulary)
assert_equal(vectorizer.stop_words_, expected_stop_words)
def test_count_vectorizer_max_features():
# Regression test: max_features didn't work correctly in 0.14.
cv_1 = CountVectorizer(max_features=1)
cv_3 = CountVectorizer(max_features=3)
cv_None = CountVectorizer(max_features=None)
counts_1 = cv_1.fit_transform(JUNK_FOOD_DOCS).sum(axis=0)
counts_3 = cv_3.fit_transform(JUNK_FOOD_DOCS).sum(axis=0)
counts_None = cv_None.fit_transform(JUNK_FOOD_DOCS).sum(axis=0)
features_1 = cv_1.get_feature_names()
features_3 = cv_3.get_feature_names()
features_None = cv_None.get_feature_names()
# The most common feature is "the", with frequency 7.
assert_equal(7, counts_1.max())
assert_equal(7, counts_3.max())
assert_equal(7, counts_None.max())
# The most common feature should be the same
assert_equal("the", features_1[np.argmax(counts_1)])
assert_equal("the", features_3[np.argmax(counts_3)])
assert_equal("the", features_None[np.argmax(counts_None)])
def test_vectorizer_max_df():
test_data = ['abc', 'dea', 'eat']
vect = CountVectorizer(analyzer='char', max_df=1.0)
vect.fit(test_data)
assert_true('a' in vect.vocabulary_.keys())
assert_equal(len(vect.vocabulary_.keys()), 6)
assert_equal(len(vect.stop_words_), 0)
vect.max_df = 0.5 # 0.5 * 3 documents -> max_doc_count == 1.5
vect.fit(test_data)
assert_true('a' not in vect.vocabulary_.keys()) # {ae} ignored
assert_equal(len(vect.vocabulary_.keys()), 4) # {bcdt} remain
assert_true('a' in vect.stop_words_)
assert_equal(len(vect.stop_words_), 2)
vect.max_df = 1
vect.fit(test_data)
assert_true('a' not in vect.vocabulary_.keys()) # {ae} ignored
assert_equal(len(vect.vocabulary_.keys()), 4) # {bcdt} remain
assert_true('a' in vect.stop_words_)
assert_equal(len(vect.stop_words_), 2)
def test_vectorizer_min_df():
test_data = ['abc', 'dea', 'eat']
vect = CountVectorizer(analyzer='char', min_df=1)
vect.fit(test_data)
assert_true('a' in vect.vocabulary_.keys())
assert_equal(len(vect.vocabulary_.keys()), 6)
assert_equal(len(vect.stop_words_), 0)
vect.min_df = 2
vect.fit(test_data)
assert_true('c' not in vect.vocabulary_.keys()) # {bcdt} ignored
assert_equal(len(vect.vocabulary_.keys()), 2) # {ae} remain
assert_true('c' in vect.stop_words_)
assert_equal(len(vect.stop_words_), 4)
vect.min_df = 0.8 # 0.8 * 3 documents -> min_doc_count == 2.4
vect.fit(test_data)
assert_true('c' not in vect.vocabulary_.keys()) # {bcdet} ignored
assert_equal(len(vect.vocabulary_.keys()), 1) # {a} remains
assert_true('c' in vect.stop_words_)
assert_equal(len(vect.stop_words_), 5)
def test_count_binary_occurrences():
# by default multiple occurrences are counted as longs
test_data = ['aaabc', 'abbde']
vect = CountVectorizer(analyzer='char', max_df=1.0)
X = vect.fit_transform(test_data).toarray()
assert_array_equal(['a', 'b', 'c', 'd', 'e'], vect.get_feature_names())
assert_array_equal([[3, 1, 1, 0, 0],
[1, 2, 0, 1, 1]], X)
# using boolean features, we can fetch the binary occurrence info
# instead.
vect = CountVectorizer(analyzer='char', max_df=1.0, binary=True)
X = vect.fit_transform(test_data).toarray()
assert_array_equal([[1, 1, 1, 0, 0],
[1, 1, 0, 1, 1]], X)
# check the ability to change the dtype
vect = CountVectorizer(analyzer='char', max_df=1.0,
binary=True, dtype=np.float32)
X_sparse = vect.fit_transform(test_data)
assert_equal(X_sparse.dtype, np.float32)
def test_hashed_binary_occurrences():
# by default multiple occurrences are counted as longs
test_data = ['aaabc', 'abbde']
vect = HashingVectorizer(analyzer='char', non_negative=True,
norm=None)
X = vect.transform(test_data)
assert_equal(np.max(X[0:1].data), 3)
assert_equal(np.max(X[1:2].data), 2)
assert_equal(X.dtype, np.float64)
# using boolean features, we can fetch the binary occurrence info
# instead.
vect = HashingVectorizer(analyzer='char', non_negative=True, binary=True,
norm=None)
X = vect.transform(test_data)
assert_equal(np.max(X.data), 1)
assert_equal(X.dtype, np.float64)
# check the ability to change the dtype
vect = HashingVectorizer(analyzer='char', non_negative=True, binary=True,
norm=None, dtype=np.float64)
X = vect.transform(test_data)
assert_equal(X.dtype, np.float64)
def test_vectorizer_inverse_transform():
# raw documents
data = ALL_FOOD_DOCS
for vectorizer in (TfidfVectorizer(), CountVectorizer()):
transformed_data = vectorizer.fit_transform(data)
inversed_data = vectorizer.inverse_transform(transformed_data)
analyze = vectorizer.build_analyzer()
for doc, inversed_terms in zip(data, inversed_data):
terms = np.sort(np.unique(analyze(doc)))
inversed_terms = np.sort(np.unique(inversed_terms))
assert_array_equal(terms, inversed_terms)
# Test that inverse_transform also works with numpy arrays
transformed_data = transformed_data.toarray()
inversed_data2 = vectorizer.inverse_transform(transformed_data)
for terms, terms2 in zip(inversed_data, inversed_data2):
assert_array_equal(np.sort(terms), np.sort(terms2))
def test_count_vectorizer_pipeline_grid_selection():
# raw documents
data = JUNK_FOOD_DOCS + NOTJUNK_FOOD_DOCS
# label junk food as -1, the others as +1
target = [-1] * len(JUNK_FOOD_DOCS) + [1] * len(NOTJUNK_FOOD_DOCS)
# split the dataset for model development and final evaluation
train_data, test_data, target_train, target_test = train_test_split(
data, target, test_size=.2, random_state=0)
pipeline = Pipeline([('vect', CountVectorizer()),
('svc', LinearSVC())])
parameters = {
'vect__ngram_range': [(1, 1), (1, 2)],
'svc__loss': ('hinge', 'squared_hinge')
}
# find the best parameters for both the feature extraction and the
# classifier
grid_search = GridSearchCV(pipeline, parameters, n_jobs=1)
# Check that the best model found by grid search is 100% correct on the
# held out evaluation set.
pred = grid_search.fit(train_data, target_train).predict(test_data)
assert_array_equal(pred, target_test)
# on this toy dataset bigram representation which is used in the last of
# the grid_search is considered the best estimator since they all converge
# to 100% accuracy models
assert_equal(grid_search.best_score_, 1.0)
best_vectorizer = grid_search.best_estimator_.named_steps['vect']
assert_equal(best_vectorizer.ngram_range, (1, 1))
def test_vectorizer_pipeline_grid_selection():
# raw documents
data = JUNK_FOOD_DOCS + NOTJUNK_FOOD_DOCS
# label junk food as -1, the others as +1
target = [-1] * len(JUNK_FOOD_DOCS) + [1] * len(NOTJUNK_FOOD_DOCS)
# split the dataset for model development and final evaluation
train_data, test_data, target_train, target_test = train_test_split(
data, target, test_size=.1, random_state=0)
pipeline = Pipeline([('vect', TfidfVectorizer()),
('svc', LinearSVC())])
parameters = {
'vect__ngram_range': [(1, 1), (1, 2)],
'vect__norm': ('l1', 'l2'),
'svc__loss': ('hinge', 'squared_hinge'),
}
# find the best parameters for both the feature extraction and the
# classifier
grid_search = GridSearchCV(pipeline, parameters, n_jobs=1)
# Check that the best model found by grid search is 100% correct on the
# held out evaluation set.
pred = grid_search.fit(train_data, target_train).predict(test_data)
assert_array_equal(pred, target_test)
# on this toy dataset bigram representation which is used in the last of
# the grid_search is considered the best estimator since they all converge
# to 100% accuracy models
assert_equal(grid_search.best_score_, 1.0)
best_vectorizer = grid_search.best_estimator_.named_steps['vect']
assert_equal(best_vectorizer.ngram_range, (1, 1))
assert_equal(best_vectorizer.norm, 'l2')
assert_false(best_vectorizer.fixed_vocabulary_)
def test_vectorizer_pipeline_cross_validation():
# raw documents
data = JUNK_FOOD_DOCS + NOTJUNK_FOOD_DOCS
# label junk food as -1, the others as +1
target = [-1] * len(JUNK_FOOD_DOCS) + [1] * len(NOTJUNK_FOOD_DOCS)
pipeline = Pipeline([('vect', TfidfVectorizer()),
('svc', LinearSVC())])
cv_scores = cross_val_score(pipeline, data, target, cv=3)
assert_array_equal(cv_scores, [1., 1., 1.])
def test_vectorizer_unicode():
# tests that the count vectorizer works with cyrillic.
document = (
"\xd0\x9c\xd0\xb0\xd1\x88\xd0\xb8\xd0\xbd\xd0\xbd\xd0\xbe\xd0"
"\xb5 \xd0\xbe\xd0\xb1\xd1\x83\xd1\x87\xd0\xb5\xd0\xbd\xd0\xb8\xd0"
"\xb5 \xe2\x80\x94 \xd0\xbe\xd0\xb1\xd1\x88\xd0\xb8\xd1\x80\xd0\xbd"
"\xd1\x8b\xd0\xb9 \xd0\xbf\xd0\xbe\xd0\xb4\xd1\x80\xd0\xb0\xd0\xb7"
"\xd0\xb4\xd0\xb5\xd0\xbb \xd0\xb8\xd1\x81\xd0\xba\xd1\x83\xd1\x81"
"\xd1\x81\xd1\x82\xd0\xb2\xd0\xb5\xd0\xbd\xd0\xbd\xd0\xbe\xd0\xb3"
"\xd0\xbe \xd0\xb8\xd0\xbd\xd1\x82\xd0\xb5\xd0\xbb\xd0\xbb\xd0"
"\xb5\xd0\xba\xd1\x82\xd0\xb0, \xd0\xb8\xd0\xb7\xd1\x83\xd1\x87"
"\xd0\xb0\xd1\x8e\xd1\x89\xd0\xb8\xd0\xb9 \xd0\xbc\xd0\xb5\xd1\x82"
"\xd0\xbe\xd0\xb4\xd1\x8b \xd0\xbf\xd0\xbe\xd1\x81\xd1\x82\xd1\x80"
"\xd0\xbe\xd0\xb5\xd0\xbd\xd0\xb8\xd1\x8f \xd0\xb0\xd0\xbb\xd0\xb3"
"\xd0\xbe\xd1\x80\xd0\xb8\xd1\x82\xd0\xbc\xd0\xbe\xd0\xb2, \xd1\x81"
"\xd0\xbf\xd0\xbe\xd1\x81\xd0\xbe\xd0\xb1\xd0\xbd\xd1\x8b\xd1\x85 "
"\xd0\xbe\xd0\xb1\xd1\x83\xd1\x87\xd0\xb0\xd1\x82\xd1\x8c\xd1\x81\xd1"
"\x8f.")
vect = CountVectorizer()
X_counted = vect.fit_transform([document])
assert_equal(X_counted.shape, (1, 15))
vect = HashingVectorizer(norm=None, non_negative=True)
X_hashed = vect.transform([document])
assert_equal(X_hashed.shape, (1, 2 ** 20))
# No collisions on such a small dataset
assert_equal(X_counted.nnz, X_hashed.nnz)
# When norm is None and non_negative, the tokens are counted up to
# collisions
assert_array_equal(np.sort(X_counted.data), np.sort(X_hashed.data))
def test_tfidf_vectorizer_with_fixed_vocabulary():
# non regression smoke test for inheritance issues
vocabulary = ['pizza', 'celeri']
vect = TfidfVectorizer(vocabulary=vocabulary)
X_1 = vect.fit_transform(ALL_FOOD_DOCS)
X_2 = vect.transform(ALL_FOOD_DOCS)
assert_array_almost_equal(X_1.toarray(), X_2.toarray())
assert_true(vect.fixed_vocabulary_)
def test_pickling_vectorizer():
instances = [
HashingVectorizer(),
HashingVectorizer(norm='l1'),
HashingVectorizer(binary=True),
HashingVectorizer(ngram_range=(1, 2)),
CountVectorizer(),
CountVectorizer(preprocessor=strip_tags),
CountVectorizer(analyzer=lazy_analyze),
CountVectorizer(preprocessor=strip_tags).fit(JUNK_FOOD_DOCS),
CountVectorizer(strip_accents=strip_eacute).fit(JUNK_FOOD_DOCS),
TfidfVectorizer(),
TfidfVectorizer(analyzer=lazy_analyze),
TfidfVectorizer().fit(JUNK_FOOD_DOCS),
]
for orig in instances:
s = pickle.dumps(orig)
copy = pickle.loads(s)
assert_equal(type(copy), orig.__class__)
assert_equal(copy.get_params(), orig.get_params())
assert_array_equal(
copy.fit_transform(JUNK_FOOD_DOCS).toarray(),
orig.fit_transform(JUNK_FOOD_DOCS).toarray())
def test_stop_words_removal():
# Ensure that deleting the stop_words_ attribute doesn't affect transform
fitted_vectorizers = (
TfidfVectorizer().fit(JUNK_FOOD_DOCS),
CountVectorizer(preprocessor=strip_tags).fit(JUNK_FOOD_DOCS),
CountVectorizer(strip_accents=strip_eacute).fit(JUNK_FOOD_DOCS)
)
for vect in fitted_vectorizers:
vect_transform = vect.transform(JUNK_FOOD_DOCS).toarray()
vect.stop_words_ = None
stop_None_transform = vect.transform(JUNK_FOOD_DOCS).toarray()
delattr(vect, 'stop_words_')
stop_del_transform = vect.transform(JUNK_FOOD_DOCS).toarray()
assert_array_equal(stop_None_transform, vect_transform)
assert_array_equal(stop_del_transform, vect_transform)
def test_pickling_transformer():
X = CountVectorizer().fit_transform(JUNK_FOOD_DOCS)
orig = TfidfTransformer().fit(X)
s = pickle.dumps(orig)
copy = pickle.loads(s)
assert_equal(type(copy), orig.__class__)
assert_array_equal(
copy.fit_transform(X).toarray(),
orig.fit_transform(X).toarray())
def test_non_unique_vocab():
vocab = ['a', 'b', 'c', 'a', 'a']
vect = CountVectorizer(vocabulary=vocab)
assert_raises(ValueError, vect.fit, [])
def test_hashingvectorizer_nan_in_docs():
# np.nan can appear when using pandas to load text fields from a csv file
# with missing values.
message = "np.nan is an invalid document, expected byte or unicode string."
exception = ValueError
def func():
hv = HashingVectorizer()
hv.fit_transform(['hello world', np.nan, 'hello hello'])
assert_raise_message(exception, message, func)
def test_tfidfvectorizer_binary():
# Non-regression test: TfidfVectorizer used to ignore its "binary" param.
v = TfidfVectorizer(binary=True, use_idf=False, norm=None)
assert_true(v.binary)
X = v.fit_transform(['hello world', 'hello hello']).toarray()
assert_array_equal(X.ravel(), [1, 1, 1, 0])
X2 = v.transform(['hello world', 'hello hello']).toarray()
assert_array_equal(X2.ravel(), [1, 1, 1, 0])
def test_tfidfvectorizer_export_idf():
vect = TfidfVectorizer(use_idf=True)
vect.fit(JUNK_FOOD_DOCS)
assert_array_almost_equal(vect.idf_, vect._tfidf.idf_)
def test_vectorizer_vocab_clone():
vect_vocab = TfidfVectorizer(vocabulary=["the"])
vect_vocab_clone = clone(vect_vocab)
vect_vocab.fit(ALL_FOOD_DOCS)
vect_vocab_clone.fit(ALL_FOOD_DOCS)
assert_equal(vect_vocab_clone.vocabulary_, vect_vocab.vocabulary_)
| bsd-3-clause |
alexanderturner/ansible | lib/ansible/plugins/lookup/env.py | 251 | 1071 | # (c) 2012, Jan-Piet Mens <jpmens(at)gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
from ansible.plugins.lookup import LookupBase
class LookupModule(LookupBase):
def run(self, terms, variables, **kwargs):
ret = []
for term in terms:
var = term.split()[0]
ret.append(os.getenv(var, ''))
return ret
| gpl-3.0 |
Kimanicodes/wananchi | AfricasTalkingGateway.py | 1 | 8915 | """
COPYRIGHT (C) 2014 AFRICASTALKING LTD <www.africastalking.com> #
AFRICAStALKING SMS GATEWAY CLASS IS A FREE SOFTWARE IE. CAN BE MODIFIED AND/OR REDISTRIBUTED
UNDER THER TERMS OF GNU GENERAL PUBLIC LICENCES AS PUBLISHED BY THE
FREE SOFTWARE FOUNDATION VERSION 3 OR ANY LATER VERSION
THE CLASS IS DISTRIBUTED ON 'AS IS' BASIS WITHOUT ANY WARRANTY, INCLUDING BUT NOT LIMITED TO
THE IMPLIED WARRANTY OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE
OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
import urllib
import urllib2
import json
class AfricasTalkingGatewayException(Exception):
pass
class AfricasTalkingGateway:
def __init__(self, username_, apiKey_):
self.username = username_
self.apiKey = apiKey_
self.SMSURLString = "https://api.africastalking.com/version1/messaging"
self.VoiceURLString = "https://voice.africastalking.com"
self.SubscriptionURLString = "https://api.africastalking.com/version1/subscription"
self.UserDataURLString = "https://api.africastalking.com/version1/user"
self.AirtimeUrlString = "https://api.africastalking.com/version1/airtime"
self.headers = {'Accept': 'application/json', 'apikey': apiKey_}
self.HTTP_RESPONSE_OK = 200
self.HTTP_RESPONSE_CREATED = 201
# Turn this on if you run into problems. It will print the raw HTTP
# response from our server
self.Debug = False
# Messaging methods
def sendMessage(self, to_, message_, from_=None, bulkSMSMode_=1, enqueue_=0, keyword_=None, linkId_=None, retryDurationInHours_=None):
if len(to_) == 0 or len(message_) == 0:
raise AfricasTalkingGatewayException(
"Please provide both to_ and message_ parameters")
parameters = {'username': self.username,
'to': to_,
'message': message_,
'bulkSMSMode': bulkSMSMode_}
if not from_ is None:
parameters["from"] = from_
if enqueue_ > 0:
parameters["enqueue"] = enqueue_
if not keyword_ is None:
parameters["keyword"] = keyword_
if not linkId_ is None:
parameters["linkId"] = linkId_
if not retryDurationInHours_ is None:
parameters["retryDurationInHours"] = retryDurationInHours_
response = self.sendRequest(self.SMSURLString, parameters)
if self.responseCode == self.HTTP_RESPONSE_CREATED:
decoded = json.loads(response)
recipients = decoded['SMSMessageData']['Recipients']
if len(recipients) > 0:
return recipients
raise AfricasTalkingGatewayException(
decoded['SMSMessageData']['Message'])
raise AfricasTalkingGatewayException(response)
def fetchMessages(self, lastReceivedId_=0):
url = "%s?username=%s&lastReceivedId=%s" % (
self.SMSURLString, self.username, lastReceivedId_)
response = self.sendRequest(url)
if self.responseCode == self.HTTP_RESPONSE_OK:
decoded = json.loads(response)
return decoded['SMSMessageData']['Messages']
raise AfricasTalkingGatewayException(response)
# Subscription methods
def createSubscription(self, phoneNumber_, shortCode_, keyword_):
if len(phoneNumber_) == 0 or len(shortCode_) == 0 or len(keyword_) == 0:
raise AfricasTalkingGatewayException(
"Please supply phone number, short code and keyword")
url = "%s/create" % (self.SubscriptionURLString)
parameters = {
'username': self.username,
'phoneNumber': phoneNumber_,
'shortCode': shortCode_,
'keyword': keyword_
}
response = self.sendRequest(url, parameters)
if self.responseCode == self.HTTP_RESPONSE_CREATED:
decoded = json.loads(response)
return decoded
raise AfricasTalkingGatewayException(response)
def deleteSubscription(self, phoneNumber_, shortCode_, keyword_):
if len(phoneNumber_) == 0 or len(shortCode_) == 0 or len(keyword_) == 0:
raise AfricasTalkingGatewayException(
"Please supply phone number, short code and keyword")
url = "%s/delete" % (self.SubscriptionURLString)
parameters = {
'username': self.username,
'phoneNumber': phoneNumber_,
'shortCode': shortCode_,
'keyword': keyword_
}
response = self.sendRequest(url, parameters)
if self.responseCode == self.HTTP_RESPONSE_CREATED:
decoded = json.loads(response)
return decoded
raise AfricasTalkingGatewayException(response)
def fetchPremiumSubscriptions(self, shortCode_, keyword_, lastReceivedId_=0):
if len(shortCode_) == 0 or len(keyword_) == 0:
raise AfricasTalkingGatewayException(
"Please supply the short code and keyword")
url = "%s?username=%s&shortCode=%s&keyword=%s&lastReceivedId=%s" % (
self.SubscriptionURLString, self.username, shortCode_, keyword_, lastReceivedId_)
result = self.sendRequest(url)
if self.responseCode == self.HTTP_RESPONSE_OK:
decoded = json.loads(result)
return decoded['responses']
raise AfricasTalkingGatewayException(response)
# Voice methods
def call(self, from_, to_):
parameters = {
'username': self.username,
'from': from_, 'to': to_
}
url = "%s/call" % (self.VoiceURLString)
response = self.sendRequest(url, parameters)
decoded = json.loads(response)
if decoded['errorMessage'] == "None":
return decoded['entries']
raise AfricasTalkingGatewayException(decoded['errorMessage'])
def getNumQueuedCalls(self, phoneNumber_, queueName_=None):
parameters = {
'username': self.username,
'phoneNumbers': phoneNumber_
}
if queueName_ is not None:
parameters['queueName'] = queueName_
url = "%s/queueStatus" % (self.VoiceURLString)
response = self.sendRequest(url, parameters)
decoded = json.loads(response)
if decoded['errorMessage'] == "None":
return decoded['entries']
raise AfricasTalkingGatewayException(decoded['ErrorMessage'])
def uploadMediaFile(self, urlString_):
parameters = {
'username': self.username,
'url': urlString_
}
url = "%s/mediaUpload" % (self.VoiceURLString)
response = self.sendRequest(url, parameters)
decoded = json.loads(response)
if decoded['errorMessage'] != "None":
raise AfricasTalkingGatewayException(decoded['errorMessage'])
# Airtime method
def sendAirtime(self, recipients_):
parameters = {
'username': self.username,
'recipients': json.dumps(recipients_)
}
SendAirtimeUrlString = "%s/send" % (self.AirtimeUrlString)
response = self.sendRequest(SendAirtimeUrlString, parameters)
decoded = json.loads(response)
responses = decoded['responses']
if self.responseCode == self.HTTP_RESPONSE_CREATED:
if len(responses) > 0:
return responses
raise AfricasTalkingGatewayException(decoded["errorMessage"])
raise AfricasTalkingGatewayException(response)
# Userdata method
def getUserData(self):
url = "%s?username=%s" % (self.UserDataURLString, self.username)
result = self.sendRequest(url)
if self.responseCode == self.HTTP_RESPONSE_OK:
decoded = json.loads(result)
return decoded['UserData']
raise AfricasTalkingGatewayException(response)
# HTTP access method
def sendRequest(self, urlString, data_=None):
try:
if data_ is not None:
data = urllib.urlencode(data_)
request = urllib2.Request(
urlString, data, headers=self.headers)
else:
request = urllib2.Request(urlString, headers=self.headers)
response = urllib2.urlopen(request)
except Exception as e:
raise AfricasTalkingGatewayException(str(e))
else:
self.responseCode = response.getcode()
response = response.read()
if self.Debug:
print response
return response
| mit |
BIDS-collaborative/cega-trace | Jack&John/scholar.py | 2 | 48634 | #! /usr/bin/env python
"""
This module provides classes for querying Google Scholar and parsing
returned results. It currently *only* processes the first results
page. It is not a recursive crawler.
"""
# ChangeLog
# ---------
#
# 2.9 Fixed Unicode problem in certain queries. Thanks to smidm for
# this contribution.
#
# 2.8 Improved quotation-mark handling for multi-word phrases in
# queries. Also, log URLs %-decoded in debugging output, for
# easier interpretation.
#
# 2.7 Ability to extract content excerpts as reported in search results.
# Also a fix to -s|--some and -n|--none: these did not yet support
# passing lists of phrases. This now works correctly if you provide
# separate phrases via commas.
#
# 2.6 Ability to disable inclusion of patents and citations. This
# has the same effect as unchecking the two patents/citations
# checkboxes in the Scholar UI, which are checked by default.
# Accordingly, the command-line options are --no-patents and
# --no-citations.
#
# 2.5: Ability to parse global result attributes. This right now means
# only the total number of results as reported by Scholar at the
# top of the results pages (e.g. "About 31 results"). Such
# global result attributes end up in the new attrs member of the
# used ScholarQuery class. To render those attributes, you need
# to use the new --txt-globals flag.
#
# Rendering global results is currently not supported for CSV
# (as they don't fit the one-line-per-article pattern). For
# grepping, you can separate the global results from the
# per-article ones by looking for a line prefix of "[G]":
#
# $ scholar.py --txt-globals -a "Einstein"
# [G] Results 11900
#
# Title Can quantum-mechanical description of physical reality be considered complete?
# URL http://journals.aps.org/pr/abstract/10.1103/PhysRev.47.777
# Year 1935
# Citations 12804
# Versions 80
# Cluster ID 8174092782678430881
# Citations list http://scholar.google.com/scholar?cites=8174092782678430881&as_sdt=2005&sciodt=0,5&hl=en
# Versions list http://scholar.google.com/scholar?cluster=8174092782678430881&hl=en&as_sdt=0,5
#
# 2.4: Bugfixes:
#
# - Correctly handle Unicode characters when reporting results
# in text format.
#
# - Correctly parse citation-only (i.e. linkless) results in
# Google Scholar results.
#
# 2.3: Additional features:
#
# - Direct extraction of first PDF version of an article
#
# - Ability to pull up an article cluster's results directly.
#
# This is based on work from @aliparsai on GitHub -- thanks!
#
# - Suppress missing search results (so far shown as "None" in
# the textual output form.
#
# 2.2: Added a logging option that reports full HTML contents, for
# debugging, as well as incrementally more detailed logging via
# -d up to -dddd.
#
# 2.1: Additional features:
#
# - Improved cookie support: the new --cookie-file options
# allows the reuse of a cookie across invocations of the tool;
# this allows higher query rates than would otherwise result
# when invoking scholar.py repeatedly.
#
# - Workaround: remove the num= URL-encoded argument from parsed
# URLs. For some reason, Google Scholar decides to propagate
# the value from the original query into the URLs embedded in
# the results.
#
# 2.0: Thorough overhaul of design, with substantial improvements:
#
# - Full support for advanced search arguments provided by
# Google Scholar
#
# - Support for retrieval of external citation formats, such as
# BibTeX or EndNote
#
# - Simple logging framework to track activity during execution
#
# 1.7: Python 3 and BeautifulSoup 4 compatibility, as well as printing
# of usage info when no options are given. Thanks to Pablo
# Oliveira (https://github.com/pablooliveira)!
#
# Also a bunch of pylinting and code cleanups.
#
# 1.6: Cookie support, from Matej Smid (https://github.com/palmstrom).
#
# 1.5: A few changes:
#
# - Tweak suggested by Tobias Isenberg: use unicode during CSV
# formatting.
#
# - The option -c|--count now understands numbers up to 100 as
# well. Likewise suggested by Tobias.
#
# - By default, text rendering mode is now active. This avoids
# confusion when playing with the script, as it used to report
# nothing when the user didn't select an explicit output mode.
#
# 1.4: Updates to reflect changes in Scholar's page rendering,
# contributed by Amanda Hay at Tufts -- thanks!
#
# 1.3: Updates to reflect changes in Scholar's page rendering.
#
# 1.2: Minor tweaks, mostly thanks to helpful feedback from Dan Bolser.
# Thanks Dan!
#
# 1.1: Made author field explicit, added --author option.
#
# Don't complain about missing docstrings: pylint: disable-msg=C0111
#
# Copyright 2010--2014 Christian Kreibich. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
#
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
# WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT,
# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
# STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
# IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import optparse
import os
import sys
import re
try:
# Try importing for Python 3
# pylint: disable-msg=F0401
# pylint: disable-msg=E0611
from urllib.request import HTTPCookieProcessor, Request, build_opener
from urllib.parse import quote, unquote
from http.cookiejar import MozillaCookieJar
except ImportError:
# Fallback for Python 2
from urllib2 import Request, build_opener, HTTPCookieProcessor
from urllib import quote, unquote
from cookielib import MozillaCookieJar
# Import BeautifulSoup -- try 4 first, fall back to older
try:
from bs4 import BeautifulSoup
except ImportError:
try:
from BeautifulSoup import BeautifulSoup
except ImportError:
print('We need BeautifulSoup, sorry...')
sys.exit(1)
# Support unicode in both Python 2 and 3. In Python 3, unicode is str.
if sys.version_info[0] == 3:
unicode = str # pylint: disable-msg=W0622
encode = lambda s: s # pylint: disable-msg=C0103
else:
def encode(s):
if isinstance(s, basestring):
return s.encode('utf-8') # pylint: disable-msg=C0103
else:
return str(s)
class Error(Exception):
"""Base class for any Scholar error."""
class FormatError(Error):
"""A query argument or setting was formatted incorrectly."""
class QueryArgumentError(Error):
"""A query did not have a suitable set of arguments."""
class ScholarConf(object):
"""Helper class for global settings."""
VERSION = '2.9'
LOG_LEVEL = 1
MAX_PAGE_RESULTS = 20 # Current maximum for per-page results
SCHOLAR_SITE = 'http://scholar.google.com'
# USER_AGENT = 'Mozilla/5.0 (X11; U; FreeBSD i386; en-US; rv:1.9.2.9) Gecko/20100913 Firefox/3.6.9'
# Let's update at this point (3/14):
USER_AGENT = 'Mozilla/5.0 (X11; Linux x86_64; rv:27.0) Gecko/20100101 Firefox/27.0'
# If set, we will use this file to read/save cookies to enable
# cookie use across sessions.
COOKIE_JAR_FILE = None
class ScholarUtils(object):
"""A wrapper for various utensils that come in handy."""
LOG_LEVELS = {'error': 1,
'warn': 2,
'info': 3,
'debug': 4}
@staticmethod
def ensure_int(arg, msg=None):
try:
return int(arg)
except ValueError:
raise FormatError(msg)
@staticmethod
def log(level, msg):
if level not in ScholarUtils.LOG_LEVELS.keys():
return
if ScholarUtils.LOG_LEVELS[level] > ScholarConf.LOG_LEVEL:
return
sys.stderr.write('[%5s] %s' % (level.upper(), msg + '\n'))
sys.stderr.flush()
class ScholarArticle(object):
"""
A class representing articles listed on Google Scholar. The class
provides basic dictionary-like behavior.
"""
def __init__(self):
# The triplets for each keyword correspond to (1) the actual
# value, (2) a user-suitable label for the item, and (3) an
# ordering index:
self.attrs = {
'title': [None, 'Title', 0],
'url': [None, 'URL', 1],
'year': [None, 'Year', 2],
'num_citations': [0, 'Citations', 3],
'num_versions': [0, 'Versions', 4],
'cluster_id': [None, 'Cluster ID', 5],
'url_pdf': [None, 'PDF link', 6],
'url_citations': [None, 'Citations list', 7],
'url_versions': [None, 'Versions list', 8],
'url_citation': [None, 'Citation link', 9],
'excerpt': [None, 'Excerpt', 10],
}
# The citation data in one of the standard export formats,
# e.g. BibTeX.
self.citation_data = None
def __getitem__(self, key):
if key in self.attrs:
return self.attrs[key][0]
return None
def __len__(self):
return len(self.attrs)
def __setitem__(self, key, item):
if key in self.attrs:
self.attrs[key][0] = item
else:
self.attrs[key] = [item, key, len(self.attrs)]
def __delitem__(self, key):
if key in self.attrs:
del self.attrs[key]
def set_citation_data(self, citation_data):
self.citation_data = citation_data
def as_txt(self):
# Get items sorted in specified order:
items = sorted(list(self.attrs.values()), key=lambda item: item[2])
# Find largest label length:
max_label_len = max([len(str(item[1])) for item in items])
fmt = '%%%ds %%s' % max_label_len
res = []
for item in items:
if item[0] is not None:
res.append(fmt % (item[1], item[0]))
return '\n'.join(res)
def as_csv(self, header=False, sep='|'):
# Get keys sorted in specified order:
keys = [pair[0] for pair in \
sorted([(key, val[2]) for key, val in list(self.attrs.items())],
key=lambda pair: pair[1])]
res = []
if header:
res.append(sep.join(keys))
res.append(sep.join([unicode(self.attrs[key][0]) for key in keys]))
return '\n'.join(res)
def as_citation(self):
"""
Reports the article in a standard citation format. This works only
if you have configured the querier to retrieve a particular
citation export format. (See ScholarSettings.)
"""
return self.citation_data or ''
class ScholarArticleParser(object):
"""
ScholarArticleParser can parse HTML document strings obtained from
Google Scholar. This is a base class; concrete implementations
adapting to tweaks made by Google over time follow below.
"""
def __init__(self, site=None):
self.soup = None
self.article = None
self.site = site or ScholarConf.SCHOLAR_SITE
self.year_re = re.compile(r'\b(?:20|19)\d{2}\b')
def handle_article(self, art):
"""
The parser invokes this callback on each article parsed
successfully. In this base class, the callback does nothing.
"""
def handle_num_results(self, num_results):
"""
The parser invokes this callback if it determines the overall
number of results, as reported on the parsed results page. The
base class implementation does nothing.
"""
def parse(self, html):
"""
This method initiates parsing of HTML content, cleans resulting
content as needed, and notifies the parser instance of
resulting instances via the handle_article callback.
"""
self.soup = BeautifulSoup(html)
# This parses any global, non-itemized attributes from the page.
self._parse_globals()
# Now parse out listed articles:
for div in self.soup.findAll(ScholarArticleParser._tag_results_checker):
self._parse_article(div)
self._clean_article()
if self.article['title']:
self.handle_article(self.article)
def _clean_article(self):
"""
This gets invoked after we have parsed an article, to do any
needed cleanup/polishing before we hand off the resulting
article.
"""
if self.article['title']:
self.article['title'] = self.article['title'].strip()
def _parse_globals(self):
tag = self.soup.find(name='div', attrs={'id': 'gs_ab_md'})
if tag is not None:
raw_text = tag.findAll(text=True)
# raw text is a list because the body contains <b> etc
if raw_text is not None and len(raw_text) > 0:
try:
num_results = raw_text[0].split()[1]
# num_results may now contain commas to separate
# thousands, strip:
num_results = num_results.replace(',', '')
num_results = int(num_results)
self.handle_num_results(num_results)
except (IndexError, ValueError):
pass
def _parse_article(self, div):
self.article = ScholarArticle()
for tag in div:
if not hasattr(tag, 'name'):
continue
if tag.name == 'div' and self._tag_has_class(tag, 'gs_rt') and \
tag.h3 and tag.h3.a:
self.article['title'] = ''.join(tag.h3.a.findAll(text=True))
self.article['url'] = self._path2url(tag.h3.a['href'])
if self.article['url'].endswith('.pdf'):
self.article['url_pdf'] = self.article['url']
if tag.name == 'font':
for tag2 in tag:
if not hasattr(tag2, 'name'):
continue
if tag2.name == 'span' and \
self._tag_has_class(tag2, 'gs_fl'):
self._parse_links(tag2)
def _parse_links(self, span):
for tag in span:
if not hasattr(tag, 'name'):
continue
if tag.name != 'a' or tag.get('href') is None:
continue
if tag.get('href').startswith('/scholar?cites'):
if hasattr(tag, 'string') and tag.string.startswith('Cited by'):
self.article['num_citations'] = \
self._as_int(tag.string.split()[-1])
# Weird Google Scholar behavior here: if the original
# search query came with a number-of-results limit,
# then this limit gets propagated to the URLs embedded
# in the results page as well. Same applies to
# versions URL in next if-block.
self.article['url_citations'] = \
self._strip_url_arg('num', self._path2url(tag.get('href')))
# We can also extract the cluster ID from the versions
# URL. Note that we know that the string contains "?",
# from the above if-statement.
args = self.article['url_citations'].split('?', 1)[1]
for arg in args.split('&'):
if arg.startswith('cites='):
self.article['cluster_id'] = arg[6:]
if tag.get('href').startswith('/scholar?cluster'):
if hasattr(tag, 'string') and tag.string.startswith('All '):
self.article['num_versions'] = \
self._as_int(tag.string.split()[1])
self.article['url_versions'] = \
self._strip_url_arg('num', self._path2url(tag.get('href')))
if tag.getText().startswith('Import'):
self.article['url_citation'] = self._path2url(tag.get('href'))
@staticmethod
def _tag_has_class(tag, klass):
"""
This predicate function checks whether a BeatifulSoup Tag instance
has a class attribute.
"""
res = tag.get('class') or []
if type(res) != list:
# BeautifulSoup 3 can return e.g. 'gs_md_wp gs_ttss',
# so split -- conveniently produces a list in any case
res = res.split()
return klass in res
@staticmethod
def _tag_results_checker(tag):
return tag.name == 'div' \
and ScholarArticleParser._tag_has_class(tag, 'gs_r')
@staticmethod
def _as_int(obj):
try:
return int(obj)
except ValueError:
return None
def _path2url(self, path):
"""Helper, returns full URL in case path isn't one."""
if path.startswith('http://'):
return path
if not path.startswith('/'):
path = '/' + path
return self.site + path
def _strip_url_arg(self, arg, url):
"""Helper, removes a URL-encoded argument, if present."""
parts = url.split('?', 1)
if len(parts) != 2:
return url
res = []
for part in parts[1].split('&'):
if not part.startswith(arg + '='):
res.append(part)
return parts[0] + '?' + '&'.join(res)
class ScholarArticleParser120201(ScholarArticleParser):
"""
This class reflects update to the Scholar results page layout that
Google recently.
"""
def _parse_article(self, div):
self.article = ScholarArticle()
for tag in div:
if not hasattr(tag, 'name'):
continue
if tag.name == 'h3' and self._tag_has_class(tag, 'gs_rt') and tag.a:
self.article['title'] = ''.join(tag.a.findAll(text=True))
self.article['url'] = self._path2url(tag.a['href'])
if self.article['url'].endswith('.pdf'):
self.article['url_pdf'] = self.article['url']
if tag.name == 'div' and self._tag_has_class(tag, 'gs_a'):
year = self.year_re.findall(tag.text)
self.article['year'] = year[0] if len(year) > 0 else None
if tag.name == 'div' and self._tag_has_class(tag, 'gs_fl'):
self._parse_links(tag)
class ScholarArticleParser120726(ScholarArticleParser):
"""
This class reflects update to the Scholar results page layout that
Google made 07/26/12.
"""
def _parse_article(self, div):
self.article = ScholarArticle()
for tag in div:
if not hasattr(tag, 'name'):
continue
if str(tag).lower().find('.pdf'):
if tag.find('div', {'class': 'gs_ttss'}):
self._parse_links(tag.find('div', {'class': 'gs_ttss'}))
if tag.name == 'div' and self._tag_has_class(tag, 'gs_ri'):
# There are (at least) two formats here. In the first
# one, we have a link, e.g.:
#
# <h3 class="gs_rt">
# <a href="http://dl.acm.org/citation.cfm?id=972384" class="yC0">
# <b>Honeycomb</b>: creating intrusion detection signatures using
# honeypots
# </a>
# </h3>
#
# In the other, there's no actual link -- it's what
# Scholar renders as "CITATION" in the HTML:
#
# <h3 class="gs_rt">
# <span class="gs_ctu">
# <span class="gs_ct1">[CITATION]</span>
# <span class="gs_ct2">[C]</span>
# </span>
# <b>Honeycomb</b> automated ids signature creation using honeypots
# </h3>
#
# We now distinguish the two.
try:
atag = tag.h3.a
self.article['title'] = ''.join(atag.findAll(text=True))
self.article['url'] = self._path2url(atag['href'])
if self.article['url'].endswith('.pdf'):
self.article['url_pdf'] = self.article['url']
except:
# Remove a few spans that have unneeded content (e.g. [CITATION])
for span in tag.h3.findAll(name='span'):
span.clear()
self.article['title'] = ''.join(tag.h3.findAll(text=True))
if tag.find('div', {'class': 'gs_a'}):
year = self.year_re.findall(tag.find('div', {'class': 'gs_a'}).text)
self.article['year'] = year[0] if len(year) > 0 else None
if tag.find('div', {'class': 'gs_fl'}):
self._parse_links(tag.find('div', {'class': 'gs_fl'}))
if tag.find('div', {'class': 'gs_rs'}):
# These are the content excerpts rendered into the results.
raw_text = tag.find('div', {'class': 'gs_rs'}).findAll(text=True)
if len(raw_text) > 0:
raw_text = ''.join(raw_text)
raw_text = raw_text.replace('\n', '')
self.article['excerpt'] = raw_text
class ScholarQuery(object):
"""
The base class for any kind of results query we send to Scholar.
"""
def __init__(self):
self.url = None
# The number of results requested from Scholar -- not the
# total number of results it reports (the latter gets stored
# in attrs, see below).
self.num_results = ScholarConf.MAX_PAGE_RESULTS
# Queries may have global result attributes, similar to
# per-article attributes in ScholarArticle. The exact set of
# attributes may differ by query type, but they all share the
# basic data structure:
self.attrs = {}
def set_num_page_results(self, num_page_results):
msg = 'maximum number of results on page must be numeric'
self.num_results = ScholarUtils.ensure_int(num_page_results, msg)
def get_url(self):
"""
Returns a complete, submittable URL string for this particular
query instance. The URL and its arguments will vary depending
on the query.
"""
return None
def _add_attribute_type(self, key, label, default_value=None):
"""
Adds a new type of attribute to the list of attributes
understood by this query. Meant to be used by the constructors
in derived classes.
"""
if len(self.attrs) == 0:
self.attrs[key] = [default_value, label, 0]
return
idx = max([item[2] for item in self.attrs.values()]) + 1
self.attrs[key] = [default_value, label, idx]
def __getitem__(self, key):
"""Getter for attribute value. Returns None if no such key."""
if key in self.attrs:
return self.attrs[key][0]
return None
def __setitem__(self, key, item):
"""Setter for attribute value. Does nothing if no such key."""
if key in self.attrs:
self.attrs[key][0] = item
def _parenthesize_phrases(self, query):
"""
Turns a query string containing comma-separated phrases into a
space-separated list of tokens, quoted if containing
whitespace. For example, input
'some words, foo, bar'
becomes
'"some words" foo bar'
This comes in handy during the composition of certain queries.
"""
if query.find(',') < 0:
return query
phrases = []
for phrase in query.split(','):
phrase = phrase.strip()
if phrase.find(' ') > 0:
phrase = '"' + phrase + '"'
phrases.append(phrase)
return ' '.join(phrases)
class ClusterScholarQuery(ScholarQuery):
"""
This version just pulls up an article cluster whose ID we already
know about.
"""
SCHOLAR_CLUSTER_URL = ScholarConf.SCHOLAR_SITE + '/scholar?' \
+ 'cluster=%(cluster)s' \
+ '&num=%(num)s'
def __init__(self, cluster=None):
ScholarQuery.__init__(self)
self._add_attribute_type('num_results', 'Results', 0)
self.cluster = None
self.set_cluster(cluster)
def set_cluster(self, cluster):
"""
Sets search to a Google Scholar results cluster ID.
"""
msg = 'cluster ID must be numeric'
self.cluster = ScholarUtils.ensure_int(cluster, msg)
def get_url(self):
if self.cluster is None:
raise QueryArgumentError('cluster query needs cluster ID')
urlargs = {'cluster': self.cluster,
'num': self.num_results or ScholarConf.MAX_PAGE_RESULTS}
for key, val in urlargs.items():
urlargs[key] = quote(encode(val))
return self.SCHOLAR_CLUSTER_URL % urlargs
class SearchScholarQuery(ScholarQuery):
"""
This version represents the search query parameters the user can
configure on the Scholar website, in the advanced search options.
"""
SCHOLAR_QUERY_URL = ScholarConf.SCHOLAR_SITE + '/scholar?' \
+ 'as_q=%(words)s' \
+ '&as_epq=%(phrase)s' \
+ '&as_oq=%(words_some)s' \
+ '&as_eq=%(words_none)s' \
+ '&as_occt=%(scope)s' \
+ '&as_sauthors=%(authors)s' \
+ '&as_publication=%(pub)s' \
+ '&as_ylo=%(ylo)s' \
+ '&as_yhi=%(yhi)s' \
+ '&as_sdt=%(patents)s%%2C5' \
+ '&as_vis=%(citations)s' \
+ '&btnG=&hl=en' \
+ '&num=%(num)s'
def __init__(self):
ScholarQuery.__init__(self)
self._add_attribute_type('num_results', 'Results', 0)
self.words = None # The default search behavior
self.words_some = None # At least one of those words
self.words_none = None # None of these words
self.phrase = None
self.scope_title = False # If True, search in title only
self.author = None
self.pub = None
self.timeframe = [None, None]
self.include_patents = True
self.include_citations = True
def set_words(self, words):
"""Sets words that *all* must be found in the result."""
self.words = words
def set_words_some(self, words):
"""Sets words of which *at least one* must be found in result."""
self.words_some = words
def set_words_none(self, words):
"""Sets words of which *none* must be found in the result."""
self.words_none = words
def set_phrase(self, phrase):
"""Sets phrase that must be found in the result exactly."""
self.phrase = phrase
def set_scope(self, title_only):
"""
Sets Boolean indicating whether to search entire article or title
only.
"""
self.scope_title = title_only
def set_author(self, author):
"""Sets names that must be on the result's author list."""
self.author = author
def set_pub(self, pub):
"""Sets the publication in which the result must be found."""
self.pub = pub
def set_timeframe(self, start=None, end=None):
"""
Sets timeframe (in years as integer) in which result must have
appeared. It's fine to specify just start or end, or both.
"""
if start:
start = ScholarUtils.ensure_int(start)
if end:
end = ScholarUtils.ensure_int(end)
self.timeframe = [start, end]
def set_include_citations(self, yesorno):
self.include_citations = yesorno
def set_include_patents(self, yesorno):
self.include_patents = yesorno
def get_url(self):
if self.words is None and self.words_some is None \
and self.words_none is None and self.phrase is None \
and self.author is None and self.pub is None \
and self.timeframe[0] is None and self.timeframe[1] is None:
raise QueryArgumentError('search query needs more parameters')
# If we have some-words or none-words lists, we need to
# process them so GS understands them. For simple
# space-separeted word lists, there's nothing to do. For lists
# of phrases we have to ensure quotations around the phrases,
# separating them by whitespace.
words_some = None
words_none = None
if self.words_some:
words_some = self._parenthesize_phrases(self.words_some)
if self.words_none:
words_none = self._parenthesize_phrases(self.words_none)
urlargs = {'words': self.words or '',
'words_some': words_some or '',
'words_none': words_none or '',
'phrase': self.phrase or '',
'scope': 'title' if self.scope_title else 'any',
'authors': self.author or '',
'pub': self.pub or '',
'ylo': self.timeframe[0] or '',
'yhi': self.timeframe[1] or '',
'patents': '0' if self.include_patents else '1',
'citations': '0' if self.include_citations else '1',
'num': self.num_results or ScholarConf.MAX_PAGE_RESULTS}
for key, val in urlargs.items():
urlargs[key] = quote(encode(val))
return self.SCHOLAR_QUERY_URL % urlargs
class ScholarSettings(object):
"""
This class lets you adjust the Scholar settings for your
session. It's intended to mirror the features tunable in the
Scholar Settings pane, but right now it's a bit basic.
"""
CITFORM_NONE = 0
CITFORM_REFWORKS = 1
CITFORM_REFMAN = 2
CITFORM_ENDNOTE = 3
CITFORM_BIBTEX = 4
def __init__(self):
self.citform = 0 # Citation format, default none
self.per_page_results = ScholarConf.MAX_PAGE_RESULTS
self._is_configured = False
def set_citation_format(self, citform):
citform = ScholarUtils.ensure_int(citform)
if citform < 0 or citform > self.CITFORM_BIBTEX:
raise FormatError('citation format invalid, is "%s"' \
% citform)
self.citform = citform
self._is_configured = True
def set_per_page_results(self, per_page_results):
msg = 'page results must be integer'
self.per_page_results = ScholarUtils.ensure_int(per_page_results, msg)
self.per_page_results = min(self.per_page_results,
ScholarConf.MAX_PAGE_RESULTS)
self._is_configured = True
def is_configured(self):
return self._is_configured
class ScholarQuerier(object):
"""
ScholarQuerier instances can conduct a search on Google Scholar
with subsequent parsing of the resulting HTML content. The
articles found are collected in the articles member, a list of
ScholarArticle instances.
"""
# Default URLs for visiting and submitting Settings pane, as of 3/14
GET_SETTINGS_URL = ScholarConf.SCHOLAR_SITE + '/scholar_settings?' \
+ 'sciifh=1&hl=en&as_sdt=0,5'
SET_SETTINGS_URL = ScholarConf.SCHOLAR_SITE + '/scholar_setprefs?' \
+ 'q=' \
+ '&scisig=%(scisig)s' \
+ '&inststart=0' \
+ '&as_sdt=1,5' \
+ '&as_sdtp=' \
+ '&num=%(num)s' \
+ '&scis=%(scis)s' \
+ '%(scisf)s' \
+ '&hl=en&lang=all&instq=&inst=569367360547434339&save='
# Older URLs:
# ScholarConf.SCHOLAR_SITE + '/scholar?q=%s&hl=en&btnG=Search&as_sdt=2001&as_sdtp=on
class Parser(ScholarArticleParser120726):
def __init__(self, querier):
ScholarArticleParser120726.__init__(self)
self.querier = querier
def handle_num_results(self, num_results):
if self.querier is not None and self.querier.query is not None:
self.querier.query['num_results'] = num_results
def handle_article(self, art):
self.querier.add_article(art)
def __init__(self):
self.articles = []
self.query = None
self.cjar = MozillaCookieJar()
# If we have a cookie file, load it:
if ScholarConf.COOKIE_JAR_FILE and \
os.path.exists(ScholarConf.COOKIE_JAR_FILE):
try:
self.cjar.load(ScholarConf.COOKIE_JAR_FILE,
ignore_discard=True)
ScholarUtils.log('info', 'loaded cookies file')
except Exception as msg:
ScholarUtils.log('warn', 'could not load cookies file: %s' % msg)
self.cjar = MozillaCookieJar() # Just to be safe
self.opener = build_opener(HTTPCookieProcessor(self.cjar))
self.settings = None # Last settings object, if any
def apply_settings(self, settings):
"""
Applies settings as provided by a ScholarSettings instance.
"""
if settings is None or not settings.is_configured():
return True
self.settings = settings
# This is a bit of work. We need to actually retrieve the
# contents of the Settings pane HTML in order to extract
# hidden fields before we can compose the query for updating
# the settings.
html = self._get_http_response(url=self.GET_SETTINGS_URL,
log_msg='dump of settings form HTML',
err_msg='requesting settings failed')
if html is None:
return False
# Now parse the required stuff out of the form. We require the
# "scisig" token to make the upload of our settings acceptable
# to Google.
soup = BeautifulSoup(html)
tag = soup.find(name='form', attrs={'id': 'gs_settings_form'})
if tag is None:
ScholarUtils.log('info', 'parsing settings failed: no form')
return False
tag = tag.find('input', attrs={'type':'hidden', 'name':'scisig'})
if tag is None:
ScholarUtils.log('info', 'parsing settings failed: scisig')
return False
urlargs = {'scisig': tag['value'],
'num': settings.per_page_results,
'scis': 'no',
'scisf': ''}
if settings.citform != 0:
urlargs['scis'] = 'yes'
urlargs['scisf'] = '&scisf=%d' % settings.citform
html = self._get_http_response(url=self.SET_SETTINGS_URL % urlargs,
log_msg='dump of settings result HTML',
err_msg='applying setttings failed')
if html is None:
return False
ScholarUtils.log('info', 'settings applied')
return True
def send_query(self, query):
"""
This method initiates a search query (a ScholarQuery instance)
with subsequent parsing of the response.
"""
self.clear_articles()
self.query = query
html = self._get_http_response(url=query.get_url(),
log_msg='dump of query response HTML',
err_msg='results retrieval failed')
if html is None:
return
self.parse(html)
def get_citation_data(self, article):
"""
Given an article, retrieves citation link. Note, this requires that
you adjusted the settings to tell Google Scholar to actually
provide this information, *prior* to retrieving the article.
"""
if article['url_citation'] is None:
return False
if article.citation_data is not None:
return True
ScholarUtils.log('info', 'retrieving citation export data')
data = self._get_http_response(url=article['url_citation'],
log_msg='citation data response',
err_msg='requesting citation data failed')
if data is None:
return False
article.set_citation_data(data)
return True
def parse(self, html):
"""
This method allows parsing of provided HTML content.
"""
parser = self.Parser(self)
parser.parse(html)
def add_article(self, art):
self.get_citation_data(art)
self.articles.append(art)
def clear_articles(self):
"""Clears any existing articles stored from previous queries."""
self.articles = []
def save_cookies(self):
"""
This stores the latest cookies we're using to disk, for reuse in a
later session.
"""
if ScholarConf.COOKIE_JAR_FILE is None:
return False
try:
self.cjar.save(ScholarConf.COOKIE_JAR_FILE,
ignore_discard=True)
ScholarUtils.log('info', 'saved cookies file')
return True
except Exception as msg:
ScholarUtils.log('warn', 'could not save cookies file: %s' % msg)
return False
def _get_http_response(self, url, log_msg=None, err_msg=None):
"""
Helper method, sends HTTP request and returns response payload.
"""
if log_msg is None:
log_msg = 'HTTP response data follow'
if err_msg is None:
err_msg = 'request failed'
try:
ScholarUtils.log('info', 'requesting %s' % unquote(url))
req = Request(url=url, headers={'User-Agent': ScholarConf.USER_AGENT})
hdl = self.opener.open(req)
html = hdl.read()
ScholarUtils.log('debug', log_msg)
ScholarUtils.log('debug', '>>>>' + '-'*68)
ScholarUtils.log('debug', 'url: %s' % hdl.geturl())
ScholarUtils.log('debug', 'result: %s' % hdl.getcode())
ScholarUtils.log('debug', 'headers:\n' + str(hdl.info()))
ScholarUtils.log('debug', 'data:\n' + html.decode('utf-8')) # For Python 3
ScholarUtils.log('debug', '<<<<' + '-'*68)
return html
except Exception as err:
ScholarUtils.log('info', err_msg + ': %s' % err)
return None
def txt(querier, with_globals):
if with_globals:
# If we have any articles, check their attribute labels to get
# the maximum length -- makes for nicer alignment.
max_label_len = 0
if len(querier.articles) > 0:
items = sorted(list(querier.articles[0].attrs.values()),
key=lambda item: item[2])
max_label_len = max([len(str(item[1])) for item in items])
# Get items sorted in specified order:
items = sorted(list(querier.query.attrs.values()), key=lambda item: item[2])
# Find largest label length:
max_label_len = max([len(str(item[1])) for item in items] + [max_label_len])
fmt = '[G] %%%ds %%s' % max(0, max_label_len-4)
for item in items:
if item[0] is not None:
print(fmt % (item[1], item[0]))
if len(items) > 0:
print
articles = querier.articles
for art in articles:
print(encode(art.as_txt()) + '\n')
def csv(querier, header=False, sep='|'):
articles = querier.articles
lst = []
for art in articles:
result = art.as_csv(header=header, sep=sep)
print(encode(result))
lst.append(encode(result))
header = False
return lst
def citation_export(querier):
string = ''
articles = querier.articles
for art in articles:
string += art.as_citation()
return string
def main():
usage = """scholar.py [options] <query string>
A command-line interface to Google Scholar.
Examples:
# Retrieve one article written by Einstein on quantum theory:
scholar.py -c 1 --author "albert einstein" --phrase "quantum theory"
# Retrieve a BibTeX entry for that quantum theory paper:
scholar.py -c 1 -C 17749203648027613321 --citation bt
# Retrieve five articles written by Einstein after 1970 where the title
# does not contain the words "quantum" and "theory":
scholar.py -c 5 -a "albert einstein" -t --none "quantum theory" --after 1970"""
fmt = optparse.IndentedHelpFormatter(max_help_position=50, width=100)
parser = optparse.OptionParser(usage=usage, formatter=fmt)
group = optparse.OptionGroup(parser, 'Query arguments',
'These options define search query arguments and parameters.')
group.add_option('-a', '--author', metavar='AUTHORS', default=None,
help='Author name(s)')
group.add_option('-A', '--all', metavar='WORDS', default=None, dest='allw',
help='Results must contain all of these words')
group.add_option('-s', '--some', metavar='WORDS', default=None,
help='Results must contain at least one of these words. Pass arguments in form -s "foo bar baz" for simple words, and -s "a phrase, another phrase" for phrases')
group.add_option('-n', '--none', metavar='WORDS', default=None,
help='Results must contain none of these words. See -s|--some re. formatting')
group.add_option('-p', '--phrase', metavar='PHRASE', default=None,
help='Results must contain exact phrase')
group.add_option('-t', '--title-only', action='store_true', default=False,
help='Search title only')
group.add_option('-P', '--pub', metavar='PUBLICATIONS', default=None,
help='Results must have appeared in this publication')
group.add_option('--after', metavar='YEAR', default=None,
help='Results must have appeared in or after given year')
group.add_option('--before', metavar='YEAR', default=None,
help='Results must have appeared in or before given year')
group.add_option('--no-patents', action='store_true', default=False,
help='Do not include patents in results')
group.add_option('--no-citations', action='store_true', default=False,
help='Do not include citations in results')
group.add_option('-C', '--cluster-id', metavar='CLUSTER_ID', default=None,
help='Do not search, just use articles in given cluster ID')
group.add_option('-c', '--count', type='int', default=None,
help='Maximum number of results')
parser.add_option_group(group)
group = optparse.OptionGroup(parser, 'Output format',
'These options control the appearance of the results.')
group.add_option('--txt', action='store_true',
help='Print article data in text format (default)')
group.add_option('--txt-globals', action='store_true',
help='Like --txt, but first print global results too')
group.add_option('--csv', action='store_true',
help='Print article data in CSV form (separator is "|")')
group.add_option('--csv-header', action='store_true',
help='Like --csv, but print header with column names')
group.add_option('--citation', metavar='FORMAT', default=None,
help='Print article details in standard citation format. Argument Must be one of "bt" (BibTeX), "en" (EndNote), "rm" (RefMan), or "rw" (RefWorks).')
parser.add_option_group(group)
group = optparse.OptionGroup(parser, 'Miscellaneous')
group.add_option('--cookie-file', metavar='FILE', default=None,
help='File to use for cookie storage. If given, will read any existing cookies if found at startup, and save resulting cookies in the end.')
group.add_option('-d', '--debug', action='count', default=0,
help='Enable verbose logging to stderr. Repeated options increase detail of debug output.')
group.add_option('-v', '--version', action='store_true', default=False,
help='Show version information')
parser.add_option_group(group)
options, _ = parser.parse_args()
# Show help if we have neither keyword search nor author name
if len(sys.argv) == 1:
parser.print_help()
return 1
if options.debug > 0:
options.debug = min(options.debug, ScholarUtils.LOG_LEVELS['debug'])
ScholarConf.LOG_LEVEL = options.debug
ScholarUtils.log('info', 'using log level %d' % ScholarConf.LOG_LEVEL)
if options.version:
print('This is scholar.py %s.' % ScholarConf.VERSION)
return 0
if options.cookie_file:
ScholarConf.COOKIE_JAR_FILE = options.cookie_file
# Sanity-check the options: if they include a cluster ID query, it
# makes no sense to have search arguments:
if options.cluster_id is not None:
if options.author or options.allw or options.some or options.none \
or options.phrase or options.title_only or options.pub \
or options.after or options.before:
print('Cluster ID queries do not allow additional search arguments.')
return 1
querier = ScholarQuerier()
settings = ScholarSettings()
if options.citation == 'bt':
settings.set_citation_format(ScholarSettings.CITFORM_BIBTEX)
elif options.citation == 'en':
settings.set_citation_format(ScholarSettings.CITFORM_ENDNOTE)
elif options.citation == 'rm':
settings.set_citation_format(ScholarSettings.CITFORM_REFMAN)
elif options.citation == 'rw':
settings.set_citation_format(ScholarSettings.CITFORM_REFWORKS)
elif options.citation is not None:
print('Invalid citation link format, must be one of "bt", "en", "rm", or "rw".')
return 1
querier.apply_settings(settings)
if options.cluster_id:
query = ClusterScholarQuery(cluster=options.cluster_id)
else:
query = SearchScholarQuery()
if options.author:
query.set_author(options.author)
if options.allw:
query.set_words(options.allw)
if options.some:
query.set_words_some(options.some)
if options.none:
query.set_words_none(options.none)
if options.phrase:
query.set_phrase(options.phrase)
if options.title_only:
query.set_scope(True)
if options.pub:
query.set_pub(options.pub)
if options.after or options.before:
query.set_timeframe(options.after, options.before)
if options.no_patents:
query.set_include_patents(False)
if options.no_citations:
query.set_include_citations(False)
if options.count is not None:
options.count = min(options.count, ScholarConf.MAX_PAGE_RESULTS)
query.set_num_page_results(options.count)
querier.send_query(query)
if options.csv:
csv(querier)
elif options.csv_header:
csv(querier, header=True)
elif options.citation is not None:
citation_export(querier)
else:
txt(querier, with_globals=options.txt_globals)
if options.cookie_file:
querier.save_cookies()
return 0
if __name__ == "__main__":
sys.exit(main())
| bsd-2-clause |
UstadMobile/exelearning-ustadmobile-work | exe/webui/reflectionblock.py | 1 | 4588 | # ===========================================================================
# eXe
# Copyright 2004-2006, University of Auckland
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
# ===========================================================================
"""
ReflectionBlock can render and process ReflectionIdevices as XHTML
"""
import logging
from exe.webui.block import Block
from exe.webui import common
from exe.webui.element import TextAreaElement
log = logging.getLogger(__name__)
# ===========================================================================
class ReflectionBlock(Block):
"""
ReflectionBlock can render and process ReflectionIdevices as XHTML
"""
def __init__(self, parent, idevice):
"""
Initialize a new Block object
"""
Block.__init__(self, parent, idevice)
self.activityInstruc = idevice.activityInstruc
self.answerInstruc = idevice.answerInstruc
# to compensate for the strange unpickling timing when objects are
# loaded from an elp, ensure that proper idevices are set:
if idevice.activityTextArea.idevice is None:
idevice.activityTextArea.idevice = idevice
if idevice.answerTextArea.idevice is None:
idevice.answerTextArea.idevice = idevice
self.activityElement = TextAreaElement(idevice.activityTextArea)
self.answerElement = TextAreaElement(idevice.answerTextArea)
self.previewing = False # In view or preview render
if not hasattr(self.idevice,'undo'):
self.idevice.undo = True
def process(self, request):
"""
Process the request arguments from the web server
"""
Block.process(self, request)
is_cancel = common.requestHasCancel(request)
if not is_cancel:
self.activityElement.process(request)
self.answerElement.process(request)
if "title"+self.id in request.args:
self.idevice.title = request.args["title"+self.id][0]
def renderEdit(self, style):
"""
Returns an XHTML string with the form element for editing this block
"""
html = "<div class=\"iDevice\"><br/>\n"
html += common.textInput("title"+self.id, self.idevice.title)
html += self.activityElement.renderEdit()
html += self.answerElement.renderEdit()
html += "<br/>" + self.renderEditButtons()
html += "</div>\n"
return html
def renderPreview(self, style):
"""
Remembers if we're previewing or not,
then implicitly calls self.renderViewContent (via Block.renderPreview)
"""
self.previewing = True
return Block.renderPreview(self, style)
def renderView(self, style):
"""
Remembers if we're previewing or not,
then implicitly calls self.renderViewContent (via Block.renderPreview)
"""
self.previewing = False
return Block.renderView(self, style)
def renderViewContent(self):
"""
Returns an XHTML string for this block
"""
if self.previewing:
html = self.activityElement.renderPreview()
feedback = self.answerElement.renderPreview()
else:
html = self.activityElement.renderView()
feedback = self.answerElement.renderView()
html += common.feedbackBlock(self.id,feedback)
return html
from exe.engine.reflectionidevice import ReflectionIdevice
from exe.webui.blockfactory import g_blockFactory
g_blockFactory.registerBlockType(ReflectionBlock, ReflectionIdevice)
# ===========================================================================
| gpl-2.0 |
kirca/OpenUpgrade | addons/sale_analytic_plans/sale_analytic_plans.py | 381 | 1765 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
class sale_order_line(osv.osv):
_inherit = 'sale.order.line'
_columns = {
'analytics_id': fields.many2one('account.analytic.plan.instance', 'Analytic Distribution'),
}
def invoice_line_create(self, cr, uid, ids, context=None):
if context is None:
context = {}
line_obj = self.pool.get('account.invoice.line')
create_ids = super(sale_order_line, self).invoice_line_create(cr, uid, ids, context=context)
i = 0
for line in self.browse(cr, uid, ids, context=context):
line_obj.write(cr, uid, [create_ids[i]], {'analytics_id': line.analytics_id.id})
i = i + 1
return create_ids
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
Jcing95/iop-hd | test/functional/txn_clone.py | 2 | 7590 | #!/usr/bin/env python3
# Copyright (c) 2014-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test the wallet accounts properly when there are cloned transactions with malleated scriptsigs."""
from test_framework.test_framework import IoPTestFramework
from test_framework.util import *
class TxnMallTest(IoPTestFramework):
def set_test_params(self):
self.num_nodes = 4
def add_options(self, parser):
parser.add_option("--mineblock", dest="mine_block", default=False, action="store_true",
help="Test double-spend of 1-confirmed transaction")
def setup_network(self):
# Start with split network:
super(TxnMallTest, self).setup_network()
disconnect_nodes(self.nodes[1], 2)
disconnect_nodes(self.nodes[2], 1)
def run_test(self):
# All nodes should start with 1,250 IOP:
starting_balance = 1250
for i in range(4):
assert_equal(self.nodes[i].getbalance(), starting_balance)
self.nodes[i].getnewaddress("") # bug workaround, coins generated assigned to first getnewaddress!
# Assign coins to foo and bar accounts:
self.nodes[0].settxfee(.001)
node0_address_foo = self.nodes[0].getnewaddress("foo")
fund_foo_txid = self.nodes[0].sendfrom("", node0_address_foo, 1219)
fund_foo_tx = self.nodes[0].gettransaction(fund_foo_txid)
node0_address_bar = self.nodes[0].getnewaddress("bar")
fund_bar_txid = self.nodes[0].sendfrom("", node0_address_bar, 29)
fund_bar_tx = self.nodes[0].gettransaction(fund_bar_txid)
assert_equal(self.nodes[0].getbalance(""),
starting_balance - 1219 - 29 + fund_foo_tx["fee"] + fund_bar_tx["fee"])
# Coins are sent to node1_address
node1_address = self.nodes[1].getnewaddress("from0")
# Send tx1, and another transaction tx2 that won't be cloned
txid1 = self.nodes[0].sendfrom("foo", node1_address, 40, 0)
txid2 = self.nodes[0].sendfrom("bar", node1_address, 20, 0)
# Construct a clone of tx1, to be malleated
rawtx1 = self.nodes[0].getrawtransaction(txid1,1)
clone_inputs = [{"txid":rawtx1["vin"][0]["txid"],"vout":rawtx1["vin"][0]["vout"]}]
clone_outputs = {rawtx1["vout"][0]["scriptPubKey"]["addresses"][0]:rawtx1["vout"][0]["value"],
rawtx1["vout"][1]["scriptPubKey"]["addresses"][0]:rawtx1["vout"][1]["value"]}
clone_locktime = rawtx1["locktime"]
clone_raw = self.nodes[0].createrawtransaction(clone_inputs, clone_outputs, clone_locktime)
# createrawtransaction randomizes the order of its outputs, so swap them if necessary.
# output 0 is at version+#inputs+input+sigstub+sequence+#outputs
# 40 IOP serialized is 00286bee00000000
pos0 = 2*(4+1+36+1+4+1)
hex40 = "00286bee00000000"
output_len = 16 + 2 + 2 * int("0x" + clone_raw[pos0 + 16 : pos0 + 16 + 2], 0)
if (rawtx1["vout"][0]["value"] == 40 and clone_raw[pos0 : pos0 + 16] != hex40 or
rawtx1["vout"][0]["value"] != 40 and clone_raw[pos0 : pos0 + 16] == hex40):
output0 = clone_raw[pos0 : pos0 + output_len]
output1 = clone_raw[pos0 + output_len : pos0 + 2 * output_len]
clone_raw = clone_raw[:pos0] + output1 + output0 + clone_raw[pos0 + 2 * output_len:]
# Use a different signature hash type to sign. This creates an equivalent but malleated clone.
# Don't send the clone anywhere yet
tx1_clone = self.nodes[0].signrawtransaction(clone_raw, None, None, "ALL|ANYONECANPAY")
assert_equal(tx1_clone["complete"], True)
# Have node0 mine a block, if requested:
if (self.options.mine_block):
self.nodes[0].generate(1)
sync_blocks(self.nodes[0:2])
tx1 = self.nodes[0].gettransaction(txid1)
tx2 = self.nodes[0].gettransaction(txid2)
# Node0's balance should be starting balance, plus 50IOP for another
# matured block, minus tx1 and tx2 amounts, and minus transaction fees:
expected = starting_balance + fund_foo_tx["fee"] + fund_bar_tx["fee"]
if self.options.mine_block: expected += 50
expected += tx1["amount"] + tx1["fee"]
expected += tx2["amount"] + tx2["fee"]
assert_equal(self.nodes[0].getbalance(), expected)
# foo and bar accounts should be debited:
assert_equal(self.nodes[0].getbalance("foo", 0), 1219 + tx1["amount"] + tx1["fee"])
assert_equal(self.nodes[0].getbalance("bar", 0), 29 + tx2["amount"] + tx2["fee"])
if self.options.mine_block:
assert_equal(tx1["confirmations"], 1)
assert_equal(tx2["confirmations"], 1)
# Node1's "from0" balance should be both transaction amounts:
assert_equal(self.nodes[1].getbalance("from0"), -(tx1["amount"] + tx2["amount"]))
else:
assert_equal(tx1["confirmations"], 0)
assert_equal(tx2["confirmations"], 0)
# Send clone and its parent to miner
self.nodes[2].sendrawtransaction(fund_foo_tx["hex"])
txid1_clone = self.nodes[2].sendrawtransaction(tx1_clone["hex"])
# ... mine a block...
self.nodes[2].generate(1)
# Reconnect the split network, and sync chain:
connect_nodes(self.nodes[1], 2)
self.nodes[2].sendrawtransaction(fund_bar_tx["hex"])
self.nodes[2].sendrawtransaction(tx2["hex"])
self.nodes[2].generate(1) # Mine another block to make sure we sync
sync_blocks(self.nodes)
# Re-fetch transaction info:
tx1 = self.nodes[0].gettransaction(txid1)
tx1_clone = self.nodes[0].gettransaction(txid1_clone)
tx2 = self.nodes[0].gettransaction(txid2)
# Verify expected confirmations
assert_equal(tx1["confirmations"], -2)
assert_equal(tx1_clone["confirmations"], 2)
assert_equal(tx2["confirmations"], 1)
# Check node0's total balance; should be same as before the clone, + 100 IOP for 2 matured,
# less possible orphaned matured subsidy
expected += 100
if (self.options.mine_block):
expected -= 50
assert_equal(self.nodes[0].getbalance(), expected)
assert_equal(self.nodes[0].getbalance("*", 0), expected)
# Check node0's individual account balances.
# "foo" should have been debited by the equivalent clone of tx1
assert_equal(self.nodes[0].getbalance("foo"), 1219 + tx1["amount"] + tx1["fee"])
# "bar" should have been debited by (possibly unconfirmed) tx2
assert_equal(self.nodes[0].getbalance("bar", 0), 29 + tx2["amount"] + tx2["fee"])
# "" should have starting balance, less funding txes, plus subsidies
assert_equal(self.nodes[0].getbalance("", 0), starting_balance
- 1219
+ fund_foo_tx["fee"]
- 29
+ fund_bar_tx["fee"]
+ 100)
# Node1's "from0" account balance
assert_equal(self.nodes[1].getbalance("from0", 0), -(tx1["amount"] + tx2["amount"]))
if __name__ == '__main__':
TxnMallTest().main()
| mit |
SEMAFORInformatik/femagtools | tests/test_fsl.py | 1 | 15788 | #!/usr/bin/env python
#
import unittest
import femagtools.fsl
import femagtools.magnet
import copy
import re
modelpars = dict(
name="PM 130 L4",
outer_diam=0.13,
bore_diam=0.07,
lfe=0.1,
poles=4,
stator=dict(
num_slots=12,
mcvkey_yoke="3",
num_slots_gen=3,
nodedist=1.5,
rlength=1.0),
windings=dict(
num_phases=3,
num_layers=1,
num_wires=4,
coil_span=3))
feapars = dict(
lfe=0.1,
speed=50.0,
current=10.0,
nu_move_steps=49,
num_cur_steps=5,
angl_i_up=0,
optim_i_up=0,
wind_temp=60.0,
magn_temp=60.0,
eval_force=0,
calc_fe_loss=1,
cog_move_steps=90,
num_layers=1,
slot_indul=0,
skew_angle=0.0,
culength=1.4,
num_par_wdgs=1,
cufilfact=0.45,
num_skew_steps=0)
class FslBuilderTest(unittest.TestCase):
def setUp(self):
self.m = copy.deepcopy(modelpars)
self.builder = femagtools.fsl.Builder()
def tearDown(self):
self.m = None
self.builder = None
def test_stator1(self):
self.m['stator']['stator1'] = dict(
tooth_width=0.009,
slot_rf1=0.002,
tip_rh1=0.002,
tip_rh2=0.002,
slot_width=0.003)
model = femagtools.MachineModel(self.m)
fsl = self.builder.create_stator_model(model)
self.assertEqual(len(fsl), 27)
def test_stator2(self):
self.m['stator']['stator2'] = dict(
slot_width=0.009,
slot_t1=0.002,
slot_t2=0.002,
slot_t3=0.002,
corner_width=0.002,
slot_depth=0.003)
model = femagtools.MachineModel(self.m)
fsl = self.builder.create_stator_model(model)
self.assertEqual(len(fsl), 23)
def test_stator3(self):
self.m['stator']['statorRotor3'] = dict(
slot_h1=0.002,
slot_h2=0.004,
middle_line=0,
tooth_width=0.009,
wedge_width2=0.0,
wedge_width1=0.0,
slot_top_sh=0,
slot_r2=0.002,
slot_height=0.02,
slot_r1=0.003,
slot_width=0.003)
model = femagtools.MachineModel(self.m)
fsl = self.builder.create_stator_model(model)
self.assertEqual(len(fsl), 41)
def test_stator4(self):
self.m['stator']['stator4'] = dict(
slot_height=0.1,
slot_h1=1e-3,
slot_h2=0,
slot_h3=2e-3,
slot_h4=3e-4,
slot_r1=11e-3,
slot_width=22e-3,
wedge_width1=111e-5,
wedge_width2=222e-5,
wedge_width3=333e-5)
model = femagtools.MachineModel(self.m)
fsl = self.builder.create_stator_model(model)
self.assertEqual(len(fsl), 36)
def test_statorBG(self):
self.m['stator']['statorBG'] = dict(
yoke_diam_ins=0.0344,
slottooth=0.0,
tip_rad=0.0,
middle_line=1,
slot_h1=1e-3,
slot_r1=0,
slot_h3=2e-3,
slot_r2=3e-4,
tooth_width=3.2e-3,
slot_width=22e-3)
model = femagtools.MachineModel(self.m)
fsl = self.builder.create_stator_model(model)
self.assertEqual(len(fsl), 33)
def test_magnetSector(self):
self.m['magnet'] = dict(
magnetSector=dict(
magn_height=0.005,
magn_width_pct=0.8,
condshaft_r=0.0591,
magn_rfe=0.0,
magn_len=1.0,
magn_shape=0.0,
bridge_height=0.0,
bridge_width=0.0,
magn_ori=2,
magn_type=1,
magn_num=1))
model = femagtools.MachineModel(self.m)
fsl = self.builder.create_magnet_model(model)
self.assertEqual(len(fsl), 30)
def test_magnetIron(self):
self.m['magnet'] = dict(
magnetIron=dict(
magn_height=0.005,
magn_width=0.008,
gap_ma_iron=0,
air_triangle=5,
iron_height=0.001,
magn_rem=1.2,
condshaft_r=0.0591,
magn_ori=1,
bridge_height=0,
bridge_width=0,
iron_shape=0))
model = femagtools.MachineModel(self.m)
fsl = self.builder.create_magnet_model(model)
# print('\n'.join(fsl))
self.assertEqual(len(fsl), 26)
def test_magnetIron2(self):
self.m['magnet'] = dict(
magnetIron2=dict(
magn_height=0.005,
magn_width=0.008,
gap_ma_iron=0,
air_triangle=1,
iron_height=0.001,
magn_rem=1.2,
condshaft_r=0.006,
magn_ori=1,
gap_ma_right=1e-3,
gap_ma_left=2e-3,
iron_shape=0))
model = femagtools.MachineModel(self.m)
fsl = self.builder.create_magnet_model(model)
self.assertEqual(len(fsl), 28)
def test_magnetIron3(self):
self.m['magnet'] = dict(
magnetIron3=dict(
magn_height=0.005,
magn_width=0.008,
gap_ma_iron=0,
iron_bfe=3e-3,
magn_num=1,
air_triangle=1,
iron_height=0.001,
condshaft_r=0.006,
magn_ori=1,
gap_ma_right=1e-3,
gap_ma_left=2e-3,
iron_shape=0))
model = femagtools.MachineModel(self.m)
fsl = self.builder.create_magnet_model(model)
self.assertEqual(len(fsl), 27)
def test_magnetIron4(self):
self.m['magnet'] = dict(
magnetIron4=dict(
magn_height=0.005,
magn_width=0.008,
gap_ma_iron=0,
iron_bfe=3e-3,
magn_num=1,
air_space_h=1e-3,
iron_height=0.001,
corner_r=0.006,
magn_ori=1,
magn_di_ra=1e-3,
air_sp_ori=1,
iron_shape=0))
model = femagtools.MachineModel(self.m)
fsl = self.builder.create_magnet_model(model)
self.assertEqual(len(fsl), 27)
def test_magnetIron5(self):
self.m['magnet'] = dict(
magnetIron5=dict(
magn_height=0.005,
magn_width=0.008,
gap_ma_iron=0,
iron_bfe=3e-3,
magn_num=1,
air_space_h=1e-3,
iron_height=0.001,
corner_r=0.006,
air_space_b=1e-3,
magn_di_ra=1e-3,
air_sp_ori=1,
iron_shape=0))
model = femagtools.MachineModel(self.m)
fsl = self.builder.create_magnet_model(model)
self.assertEqual(len(fsl), 27)
def test_magnetIronV(self):
self.m['magnet'] = dict(
magnetIronV=dict(
magn_height=0.005,
magn_width=0.008,
gap_ma_iron=0,
iron_hs=3e-3,
magn_num=1,
magn_rem=1.2,
air_triangle=1,
iron_height=0.001,
condshaft_r=0.006,
magn_angle=130,
iron_shape=0))
model = femagtools.MachineModel(self.m)
fsl = self.builder.create_magnet_model(model)
self.assertEqual(len(fsl), 27)
def test_magnetFC2(self):
self.m['magnet'] = dict(
magnetFC2=dict(
magn_height=0.005,
magn_width=0.008,
yoke_height=5e-3,
iron_h1=3e-3,
iron_h2=2e-3,
iron_hp=2e-3,
iron_b=2e-3,
magn_num=1,
iron_bfe=0.001,
iron_bfo=0.001,
iron_shape=0))
model = femagtools.MachineModel(self.m)
fsl = self.builder.create_magnet_model(model)
self.assertEqual(len(fsl), 25)
def test_rot_hsm(self):
self.m['rotor'] = dict(
rot_hsm=dict(
gap_pol_shaft = 1e-3,
core_height = 0.02,
pole_height = 0.016,
pole_rad = 0.042,
core_width2 = 0.04,
core_width1 = 0.04,
pole_width_r = 0.05,
pole_width = 0.052,
slot_width = 0.002,
slot_height = 0.002,
damper_diam = 0.004,
damper_div = 0.007
))
model = femagtools.MachineModel(self.m)
fsl = self.builder.create_rotor_model(model)
self.assertEqual(len(fsl), 33)
def test_fe_losses(self):
model = femagtools.MachineModel(self.m)
fsl = self.builder.create_fe_losses(model)
self.assertFalse(fsl)
self.m['ffactor'] = 1.1
model = femagtools.MachineModel(self.m)
fsl = self.builder.create_fe_losses(model)
self.assertEqual(len(fsl), 21)
ffact = [float(f.split('=')[1])
for f in fsl if f.startswith('m.ffactor')][0]
self.assertEqual(ffact, self.m['ffactor'])
feloss = [f.split('"')[1]
for f in fsl if f.find('pre_models') > 0][0]
self.assertEqual(feloss, 'FE-Losses-1')
def test_run_models(self):
feapars['calculationMode'] = "cogg_calc"
fsl = self.builder.create_analysis(feapars)
self.assertEqual(len(fsl), 26)
feapars['calculationMode'] = "pm_sym_fast"
fsl = self.builder.create_analysis(feapars)
self.assertEqual(len(fsl), 33)
feapars['calculationMode'] = "mult_cal_fast"
fsl = self.builder.create_analysis(feapars)
self.assertEqual(len(fsl), 29)
feapars['calculationMode'] = "torq_calc"
fsl = self.builder.create_analysis(feapars)
self.assertEqual(len(fsl), 26)
def test_run_existing_model(self):
model = femagtools.MachineModel('data/magnsec')
feapars['calculationMode'] = "cogg_calc"
fsl = self.builder.create(model, feapars)
self.assertEqual(len(fsl), 60)
def test_create_plots(self):
pars = copy.deepcopy(feapars)
pars['calculationMode'] = "pm_sym_fast"
pars['plots'] = ['field_lines', 'Babs']
fsl = self.builder.create_analysis(pars)
field_lines = re.findall(r'field_lines\(([^)]*)\)', ''.join(fsl))
self.assertEqual(len(field_lines), 1)
self.assertEqual(int(field_lines[0].split(',')[-1]), 20)
colorgrad = re.findall(r'color_gradation\(([^)]*)\)', ''.join(fsl))
self.assertEqual(len(field_lines), 1)
min, max = [int(l) for l in colorgrad[0].split(',')[4:6]]
self.assertEqual(min, 0)
self.assertEqual(max, 0)
pars['plots'] = [('field_lines', 10), ('Babs', 0.0, 2.0)]
fsl = self.builder.create_analysis(pars)
field_lines = re.findall(r'field_lines\(([^)]*)\)', ''.join(fsl))
self.assertEqual(len(field_lines), 1)
self.assertEqual(int(field_lines[0].split(',')[-1]), 10)
colorgrad = re.findall(r'color_gradation\(([^)]*)\)', ''.join(fsl))
self.assertEqual(len(field_lines), 1)
min, max = [float(l) for l in colorgrad[0].split(',')[4:6]]
self.assertEqual(min, 0.0)
self.assertEqual(max, 2.0)
def test_readfsl(self):
content = [
'dshaft = 360 --shaft diameter',
'hm = 38 -- magnet height',
'bm = 122 -- magnet width',
'ws = 10 -- slot width',
'lfe = 224',
'-- calculate slot height, angle and pole pairs',
'hs = (da2-dy2)/2 - bm ',
'alpha = math.pi/p/2 -- slot angle',
'p = m.num_poles/2',
'x = {}',
'y = {}',
'-- Berechnung der Koordinaten',
'x[1],y[1] = pr2c(dy2/2, 0)',
'x[2],y[2] = pr2c(da2/2, 0)',
'x[3],y[3] = pr2c(da2/2, alpha - math.atan2(ws,(da2/2)))',
'x[4],y[4] = pr2c(da2/2-hs, alpha - math.atan2(ws,(da2/2 - hs)))',
'nc_line(x[1], y[1], x[2], y[2], 0)']
result = self.builder.read(content)
self.assertEqual(len(result['parameter']), 4)
for p in result['parameter']:
self.assertTrue(p['key'] in ['dshaft', 'hm', 'bm', 'ws'])
def test_gen_winding(self):
model = femagtools.MachineModel(self.m)
fsl = self.builder.create_gen_winding(model)
self.assertEqual(len(fsl), 20)
model.windings['leak_dist_wind'] = dict(
perimrad=1,
vbendrad=1,
endheight=1,
wiredia=1)
fsl = self.builder.create_gen_winding(model)
self.assertEqual(len(fsl), 30)
model.windings.pop('leak_dist_wind')
model.windings['leak_evol_wind'] = dict(
evol1rad=1,
evol2rad=1,
botlevel=1,
toplevel=1,
endheight=1,
evolbend=1,
wiredia=1)
fsl = self.builder.create_gen_winding(model)
self.assertEqual(len(fsl), 34)
model.windings.pop('leak_evol_wind')
model.windings['leak_tooth_wind'] = dict(
endheight=1,
bendrad=1,
wiredia=1)
fsl = self.builder.create_gen_winding(model)
self.assertEqual(len(fsl), 30)
def test_create_model_with_magnet_material(self):
magnetmat = [dict(
name='M45',
remanenc=1.1,
relperm=1.04,
spmaweight=7.4,
temcoefbr=-0.0015,
temcoefhc=-0.0013,
magncond=625000.0
)]
machine = dict(
name="PM 886 32",
lfe=0.224,
poles=32,
outer_diam=0.886,
bore_diam=0.76,
inner_diam=0.4956,
airgap=0.007,
external_rotor=1,
stator=dict(
num_slots=120,
rlength=1.0,
stator4=dict(
slot_height=0.035,
slot_h1=0.002,
slot_h2=0.0,
slot_h3=0.004,
slot_h4=0.0,
slot_width=0.01,
slot_r1=0.0,
wedge_width1=0.01,
wedge_width2=0.01,
wedge_width3=0.01)
),
magnet=dict(
material='M45',
magnetSector=dict(
magn_num=1,
magn_height=0.014,
magn_width_pct=0.85,
condshaft_r=0.0,
magn_rfe=0.0,
magn_len=1.0,
magn_shape=0.0,
bridge_height=0.0,
bridge_width=0.0,
magn_ori=1,
magn_type=1
)
),
windings=dict(
num_phases=3,
num_wires=5,
coil_span=1,
num_layers=2)
)
model = femagtools.MachineModel(machine)
magnets = femagtools.magnet.Magnet(magnetmat)
fsl = self.builder.create_model(model, magnets)
self.assertEqual(len(fsl), 172)
brem = [l.strip() for l in fsl
if l.split('=')[0].strip() == 'm.remanenc'][0]
self.assertEqual(brem.split('=')[-1].strip(), '1.1')
if __name__ == '__main__':
unittest.main()
| bsd-2-clause |
wtsi-hgi/irobot | irobot/tests/unit/authentication/test_http.py | 1 | 5773 | """
Copyright (c) 2017 Genome Research Ltd.
Author: Christopher Harrison <[email protected]>
This program is free software: you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by the
Free Software Foundation, either version 3 of the License, or (at your
option) any later version.
This program is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
Public License for more details.
You should have received a copy of the GNU General Public License along
with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import unittest
from datetime import datetime, timedelta
from unittest.mock import patch
from aiohttp import ClientResponseError
from aioresponses import aioresponses
import irobot.authentication._http as http
from irobot.authentication._base import AuthenticatedUser
from irobot.authentication.parser import HTTPAuthMethod
from irobot.config import Configuration
from irobot.config._tree_builder import ConfigValue
from irobot.tests.unit.async import async_test
class _MockHTTPAuthConfig(Configuration):
def __init__(self, cache):
super().__init__()
self.add_value("cache", ConfigValue(cache, lambda x: x))
_CONFIG_CACHE = _MockHTTPAuthConfig(timedelta(minutes=10))
_CONFIG_NOCACHE = _MockHTTPAuthConfig(None)
class _MockHTTPAuthenticator(http.BaseHTTPAuthHandler):
@property
def www_authenticate(self):
return "Mock"
def match_auth_method(self, challenge_response):
return (challenge_response.auth_method == "foo")
def set_handler_parameters(self, challenge_response):
return http.HTTPValidatorParameters("foo", "bar")
async def get_authenticated_user(self, challenge_response, auth_response):
return AuthenticatedUser("Testy McTestface")
class TestHTTPAuthenticationHandler(unittest.TestCase):
def test_constructor(self):
auth_cache = _MockHTTPAuthenticator(_CONFIG_CACHE)
self.assertTrue(hasattr(auth_cache, "_cache"))
auth_nocache = _MockHTTPAuthenticator(_CONFIG_NOCACHE)
self.assertFalse(hasattr(auth_nocache, "_cache"))
@patch("irobot.authentication._http.Timer", spec=True)
def test_cached_shutdown(self, *args):
auth = _MockHTTPAuthenticator(_CONFIG_CACHE)
auth._cleanup_timer.is_alive.return_value = True
auth.__del__()
auth._cleanup_timer.cancel.assert_called_once()
@patch("irobot.authentication._base.datetime", spec=True)
@patch("irobot.authentication._http.Timer", spec=True)
def test_cache_cleanup(self, _mock_timer, mock_datetime):
auth = _MockHTTPAuthenticator(_CONFIG_CACHE)
auth_method = HTTPAuthMethod("foo")
validation_time = mock_datetime.utcnow.return_value = datetime.utcnow()
auth._cache[auth_method] = AuthenticatedUser("Testy McTestface")
auth._cleanup()
self.assertIn(auth_method, auth._cache)
self.assertEqual(auth._cache[auth_method].user, "Testy McTestface")
self.assertEqual(auth._cache[auth_method].authenticated, validation_time)
mock_datetime.utcnow.return_value = validation_time + timedelta(minutes=11)
auth._cleanup()
self.assertEqual(auth._cache, {})
@async_test
@aioresponses()
async def test_request_validator(self, mock_response):
auth = _MockHTTPAuthenticator(_CONFIG_NOCACHE)
mock_url = "foo"
params = http.HTTPValidatorParameters(mock_url, "bar")
mock_response.get(mock_url, status=200)
validation_response = await auth._validate_request(params)
self.assertIsNotNone(validation_response)
mock_response.get(mock_url, status=401)
validation_response = await auth._validate_request(params)
self.assertIsNone(validation_response)
mock_response.get(mock_url, status=500)
try:
validation_response = await auth._validate_request(params)
except Exception as e:
self.assertIsInstance(e, ClientResponseError)
@async_test
@aioresponses()
async def test_authenticate(self, mock_response):
with patch("irobot.authentication._base.datetime", spec=True) as mock_datetime:
# patch and aioresponses don't play nicely together as
# decorators, so we use patch's context manager instead
validation_time = mock_datetime.utcnow.return_value = datetime.utcnow()
auth = _MockHTTPAuthenticator(_CONFIG_CACHE)
auth_response = await auth.authenticate("this is a bad header")
self.assertIsNone(auth_response)
auth_response = await auth.authenticate("bar")
self.assertIsNone(auth_response)
mock_response.get("foo", status=401)
auth_response = await auth.authenticate("foo")
self.assertIsNone(auth_response)
mock_response.get("foo", status=200)
auth_response = await auth.authenticate("foo")
self.assertEqual(auth_response.user, "Testy McTestface")
# Run again to test it's coming from the cache
mock_response.get("foo", status=200)
auth_response = await auth.authenticate("foo")
self.assertEqual(auth_response.user, "Testy McTestface")
# Invalidate cache and go again
mock_datetime.utcnow.return_value = validation_time + timedelta(minutes=11)
mock_response.get("foo", status=200)
auth_response = await auth.authenticate("foo")
self.assertEqual(auth_response.user, "Testy McTestface")
if __name__ == "__main__":
unittest.main()
| gpl-3.0 |
ticosax/django | django/core/management/commands/loaddata.py | 17 | 12898 | from __future__ import unicode_literals
import glob
import gzip
import os
import warnings
import zipfile
from itertools import product
from django.apps import apps
from django.conf import settings
from django.core import serializers
from django.core.exceptions import ImproperlyConfigured
from django.core.management.base import BaseCommand, CommandError
from django.core.management.color import no_style
from django.db import (
DEFAULT_DB_ALIAS, DatabaseError, IntegrityError, connections, router,
transaction,
)
from django.utils import lru_cache
from django.utils._os import upath
from django.utils.encoding import force_text
from django.utils.functional import cached_property
try:
import bz2
has_bz2 = True
except ImportError:
has_bz2 = False
class Command(BaseCommand):
help = 'Installs the named fixture(s) in the database.'
missing_args_message = ("No database fixture specified. Please provide the "
"path of at least one fixture in the command line.")
def add_arguments(self, parser):
parser.add_argument('args', metavar='fixture', nargs='+',
help='Fixture labels.')
parser.add_argument('--database', action='store', dest='database',
default=DEFAULT_DB_ALIAS, help='Nominates a specific database to load '
'fixtures into. Defaults to the "default" database.')
parser.add_argument('--app', action='store', dest='app_label',
default=None, help='Only look for fixtures in the specified app.')
parser.add_argument('--ignorenonexistent', '-i', action='store_true',
dest='ignore', default=False,
help='Ignores entries in the serialized data for fields that do not '
'currently exist on the model.')
def handle(self, *fixture_labels, **options):
self.ignore = options.get('ignore')
self.using = options.get('database')
self.app_label = options.get('app_label')
self.hide_empty = options.get('hide_empty', False)
self.verbosity = options.get('verbosity')
with transaction.atomic(using=self.using):
self.loaddata(fixture_labels)
# Close the DB connection -- unless we're still in a transaction. This
# is required as a workaround for an edge case in MySQL: if the same
# connection is used to create tables, load data, and query, the query
# can return incorrect results. See Django #7572, MySQL #37735.
if transaction.get_autocommit(self.using):
connections[self.using].close()
def loaddata(self, fixture_labels):
connection = connections[self.using]
# Keep a count of the installed objects and fixtures
self.fixture_count = 0
self.loaded_object_count = 0
self.fixture_object_count = 0
self.models = set()
self.serialization_formats = serializers.get_public_serializer_formats()
# Forcing binary mode may be revisited after dropping Python 2 support (see #22399)
self.compression_formats = {
None: (open, 'rb'),
'gz': (gzip.GzipFile, 'rb'),
'zip': (SingleZipReader, 'r'),
}
if has_bz2:
self.compression_formats['bz2'] = (bz2.BZ2File, 'r')
with connection.constraint_checks_disabled():
for fixture_label in fixture_labels:
self.load_label(fixture_label)
# Since we disabled constraint checks, we must manually check for
# any invalid keys that might have been added
table_names = [model._meta.db_table for model in self.models]
try:
connection.check_constraints(table_names=table_names)
except Exception as e:
e.args = ("Problem installing fixtures: %s" % e,)
raise
# If we found even one object in a fixture, we need to reset the
# database sequences.
if self.loaded_object_count > 0:
sequence_sql = connection.ops.sequence_reset_sql(no_style(), self.models)
if sequence_sql:
if self.verbosity >= 2:
self.stdout.write("Resetting sequences\n")
with connection.cursor() as cursor:
for line in sequence_sql:
cursor.execute(line)
if self.verbosity >= 1:
if self.fixture_count == 0 and self.hide_empty:
pass
elif self.fixture_object_count == self.loaded_object_count:
self.stdout.write("Installed %d object(s) from %d fixture(s)" %
(self.loaded_object_count, self.fixture_count))
else:
self.stdout.write("Installed %d object(s) (of %d) from %d fixture(s)" %
(self.loaded_object_count, self.fixture_object_count, self.fixture_count))
def load_label(self, fixture_label):
"""
Loads fixtures files for a given label.
"""
show_progress = self.verbosity >= 3
for fixture_file, fixture_dir, fixture_name in self.find_fixtures(fixture_label):
_, ser_fmt, cmp_fmt = self.parse_name(os.path.basename(fixture_file))
open_method, mode = self.compression_formats[cmp_fmt]
fixture = open_method(fixture_file, mode)
try:
self.fixture_count += 1
objects_in_fixture = 0
loaded_objects_in_fixture = 0
if self.verbosity >= 2:
self.stdout.write("Installing %s fixture '%s' from %s." %
(ser_fmt, fixture_name, humanize(fixture_dir)))
objects = serializers.deserialize(ser_fmt, fixture,
using=self.using, ignorenonexistent=self.ignore)
for obj in objects:
objects_in_fixture += 1
if router.allow_migrate_model(self.using, obj.object.__class__):
loaded_objects_in_fixture += 1
self.models.add(obj.object.__class__)
try:
obj.save(using=self.using)
if show_progress:
self.stdout.write(
'\rProcessed %i object(s).' % loaded_objects_in_fixture,
ending=''
)
except (DatabaseError, IntegrityError) as e:
e.args = ("Could not load %(app_label)s.%(object_name)s(pk=%(pk)s): %(error_msg)s" % {
'app_label': obj.object._meta.app_label,
'object_name': obj.object._meta.object_name,
'pk': obj.object.pk,
'error_msg': force_text(e)
},)
raise
if objects and show_progress:
self.stdout.write('') # add a newline after progress indicator
self.loaded_object_count += loaded_objects_in_fixture
self.fixture_object_count += objects_in_fixture
except Exception as e:
if not isinstance(e, CommandError):
e.args = ("Problem installing fixture '%s': %s" % (fixture_file, e),)
raise
finally:
fixture.close()
# Warn if the fixture we loaded contains 0 objects.
if objects_in_fixture == 0:
warnings.warn(
"No fixture data found for '%s'. (File format may be "
"invalid.)" % fixture_name,
RuntimeWarning
)
@lru_cache.lru_cache(maxsize=None)
def find_fixtures(self, fixture_label):
"""
Finds fixture files for a given label.
"""
fixture_name, ser_fmt, cmp_fmt = self.parse_name(fixture_label)
databases = [self.using, None]
cmp_fmts = list(self.compression_formats.keys()) if cmp_fmt is None else [cmp_fmt]
ser_fmts = serializers.get_public_serializer_formats() if ser_fmt is None else [ser_fmt]
if self.verbosity >= 2:
self.stdout.write("Loading '%s' fixtures..." % fixture_name)
if os.path.isabs(fixture_name):
fixture_dirs = [os.path.dirname(fixture_name)]
fixture_name = os.path.basename(fixture_name)
else:
fixture_dirs = self.fixture_dirs
if os.path.sep in os.path.normpath(fixture_name):
fixture_dirs = [os.path.join(dir_, os.path.dirname(fixture_name))
for dir_ in fixture_dirs]
fixture_name = os.path.basename(fixture_name)
suffixes = ('.'.join(ext for ext in combo if ext)
for combo in product(databases, ser_fmts, cmp_fmts))
targets = set('.'.join((fixture_name, suffix)) for suffix in suffixes)
fixture_files = []
for fixture_dir in fixture_dirs:
if self.verbosity >= 2:
self.stdout.write("Checking %s for fixtures..." % humanize(fixture_dir))
fixture_files_in_dir = []
for candidate in glob.iglob(os.path.join(fixture_dir, fixture_name + '*')):
if os.path.basename(candidate) in targets:
# Save the fixture_dir and fixture_name for future error messages.
fixture_files_in_dir.append((candidate, fixture_dir, fixture_name))
if self.verbosity >= 2 and not fixture_files_in_dir:
self.stdout.write("No fixture '%s' in %s." %
(fixture_name, humanize(fixture_dir)))
# Check kept for backwards-compatibility; it isn't clear why
# duplicates are only allowed in different directories.
if len(fixture_files_in_dir) > 1:
raise CommandError(
"Multiple fixtures named '%s' in %s. Aborting." %
(fixture_name, humanize(fixture_dir)))
fixture_files.extend(fixture_files_in_dir)
if not fixture_files:
# Warning kept for backwards-compatibility; why not an exception?
warnings.warn("No fixture named '%s' found." % fixture_name)
return fixture_files
@cached_property
def fixture_dirs(self):
"""
Return a list of fixture directories.
The list contains the 'fixtures' subdirectory of each installed
application, if it exists, the directories in FIXTURE_DIRS, and the
current directory.
"""
dirs = []
fixture_dirs = settings.FIXTURE_DIRS
if len(fixture_dirs) != len(set(fixture_dirs)):
raise ImproperlyConfigured("settings.FIXTURE_DIRS contains duplicates.")
for app_config in apps.get_app_configs():
app_label = app_config.label
app_dir = os.path.join(app_config.path, 'fixtures')
if app_dir in fixture_dirs:
raise ImproperlyConfigured(
"'%s' is a default fixture directory for the '%s' app "
"and cannot be listed in settings.FIXTURE_DIRS." % (app_dir, app_label)
)
if self.app_label and app_label != self.app_label:
continue
if os.path.isdir(app_dir):
dirs.append(app_dir)
dirs.extend(list(fixture_dirs))
dirs.append('')
dirs = [upath(os.path.abspath(os.path.realpath(d))) for d in dirs]
return dirs
def parse_name(self, fixture_name):
"""
Splits fixture name in name, serialization format, compression format.
"""
parts = fixture_name.rsplit('.', 2)
if len(parts) > 1 and parts[-1] in self.compression_formats:
cmp_fmt = parts[-1]
parts = parts[:-1]
else:
cmp_fmt = None
if len(parts) > 1:
if parts[-1] in self.serialization_formats:
ser_fmt = parts[-1]
parts = parts[:-1]
else:
raise CommandError(
"Problem installing fixture '%s': %s is not a known "
"serialization format." % (''.join(parts[:-1]), parts[-1]))
else:
ser_fmt = None
name = '.'.join(parts)
return name, ser_fmt, cmp_fmt
class SingleZipReader(zipfile.ZipFile):
def __init__(self, *args, **kwargs):
zipfile.ZipFile.__init__(self, *args, **kwargs)
if len(self.namelist()) != 1:
raise ValueError("Zip-compressed fixtures must contain one file.")
def read(self):
return zipfile.ZipFile.read(self, self.namelist()[0])
def humanize(dirname):
return "'%s'" % dirname if dirname else 'absolute path'
| bsd-3-clause |
izonder/intellij-community | python/helpers/pycharm/django_manage_commands_provider/_xml.py | 78 | 6083 | # coding=utf-8
"""
This module exports information about manage commands and options from django to PyCharm.
Information is provided in XML (to prevent encoding troubles and simplify deserialization on java side).
Right after xml declaration, before root tag it contains following comment:
<!--jb pycharm data start-->
Use it to make sure you found correct XML
It does not have schema (yet!) but here is XML format it uses.
<commandInfo-array> -- root
<commandInfo args="args description" help="human readable text" name="command name"> -- info about command
<option help="option help" numberOfArgs="number of values (nargs)" type="option_type (see below)"> -- one entry for each option
<longNames>--each-for-one-long-opt-name</longNames>
<shortNames>-each-for-one-short-name</shortNames>
<choices>--each-for-one-available-value</choices>
</option>
</commandInfo>
</commandInfo-array>
"option_type" is only set if "numberOfArgs" > 0, and it can be: "int" (means integer),
"choices" (means opt can have one of the values, provided in choices) or "str" that means "string" (option may have any value)
Classes like DjangoCommandsInfo is used on Java side.
TODO: Since Django 1.8 we can fetch much more info from argparse like positional argument names, nargs etc. Use it!
"""
from xml.dom import minidom
from xml.dom.minidom import Element
from _jb_utils import VersionAgnosticUtils
__author__ = 'Ilya.Kazakevich'
class XmlDumper(object):
""""
Creates an API to generate XML provided in this package.
How to use:
* dumper.start_command(..)
* dumper.add_command_option(..) # optional
* dumper.close_command()
* print(dumper.xml)
"""
__command_info_tag = "commandInfo" # Name of main tag
def __init__(self):
self.__document = minidom.Document()
self.__root = self.__document.createElement("{0}-array".format(XmlDumper.__command_info_tag))
self.__document.appendChild(self.__document.createComment("jb pycharm data start"))
self.__document.appendChild(self.__root)
self.__command_element = None
def __create_text_array(self, parent, tag_name, values):
"""
Creates array of text elements and adds them to parent
:type parent Element
:type tag_name str
:type values list of str
:param parent destination to add new elements
:param tag_name name tag to create to hold text
:param values list of values to add
"""
for value in values:
tag = self.__document.createElement(tag_name)
text = self.__document.createTextNode(str(value))
tag.appendChild(text)
parent.appendChild(tag)
def start_command(self, command_name, command_help_text):
"""
Starts manage command
:param command_name: command name
:param command_help_text: command help
"""
assert not bool(self.__command_element), "Already in command"
self.__command_element = self.__document.createElement(XmlDumper.__command_info_tag)
self.__command_element.setAttribute("name", command_name)
self.__command_element.setAttribute("help", command_help_text)
self.__root.appendChild(self.__command_element)
def set_arguments(self, command_args_text):
"""
Adds "arguments help" to command.
TODO: Use real list of arguments instead of this text when people migrate to argparse (Dj. 1.8)
:param command_args_text: command text for args
:type command_args_text str
"""
assert bool(self.__command_element), "Not in a a command"
self.__command_element.setAttribute("args", VersionAgnosticUtils().to_unicode(command_args_text))
def add_command_option(self, long_opt_names, short_opt_names, help_text, argument_info):
"""
Adds command option
:param argument_info: None if option does not accept any arguments or tuple of (num_of_args, type_info) \
where num_of_args is int > 0 and type_info is str, representing type (only "int" and "string" are supported) \
or list of available types in case of choices
:param long_opt_names: list of long opt names
:param short_opt_names: list of short opt names
:param help_text: help text
:type long_opt_names iterable of str
:type short_opt_names iterable of str
:type help_text str
:type argument_info tuple or None
"""
assert isinstance(self.__command_element, Element), "Add option in command only"
option = self.__document.createElement("option")
opt_type_to_report = None
num_of_args = 0
if argument_info:
(num_of_args, type_info) = argument_info
if isinstance(type_info, list):
self.__create_text_array(option, "choices", type_info)
opt_type_to_report = "choices"
else:
opt_type_to_report = "int" if str(type_info) == "int" else "str"
if long_opt_names:
self.__create_text_array(option, "longNames", long_opt_names)
if short_opt_names:
self.__create_text_array(option, "shortNames", short_opt_names)
if opt_type_to_report:
option.setAttribute("type", opt_type_to_report)
option.setAttribute("help", help_text)
if num_of_args:
option.setAttribute("numberOfArgs", str(num_of_args))
self.__command_element.appendChild(option)
def close_command(self):
"""
Closes currently opened command
"""
assert bool(self.__command_element), "No command to close"
self.__command_element = None
pass
@property
def xml(self):
"""
:return: current commands as XML as described in package
:rtype str
"""
document = self.__document.toxml(encoding="utf-8")
return VersionAgnosticUtils().to_unicode(document.decode("utf-8") if isinstance(document, bytes) else document)
| apache-2.0 |
manaris/jythonMusic | library/jython2.5.3/Lib/distutils/spawn.py | 81 | 7818 | """distutils.spawn
Provides the 'spawn()' function, a front-end to various platform-
specific functions for launching another program in a sub-process.
Also provides the 'find_executable()' to search the path for a given
executable name.
"""
# This module should be kept compatible with Python 2.1.
__revision__ = "$Id: spawn.py 37828 2004-11-10 22:23:15Z loewis $"
import sys, os, string
from distutils.errors import *
from distutils import log
def spawn (cmd,
search_path=1,
verbose=0,
dry_run=0):
"""Run another program, specified as a command list 'cmd', in a new
process. 'cmd' is just the argument list for the new process, ie.
cmd[0] is the program to run and cmd[1:] are the rest of its arguments.
There is no way to run a program with a name different from that of its
executable.
If 'search_path' is true (the default), the system's executable
search path will be used to find the program; otherwise, cmd[0]
must be the exact path to the executable. If 'dry_run' is true,
the command will not actually be run.
Raise DistutilsExecError if running the program fails in any way; just
return on success.
"""
if os.name == 'posix':
_spawn_posix(cmd, search_path, dry_run=dry_run)
elif os.name == 'nt':
_spawn_nt(cmd, search_path, dry_run=dry_run)
elif os.name == 'os2':
_spawn_os2(cmd, search_path, dry_run=dry_run)
elif os.name == 'java':
_spawn_java(cmd, search_path, dry_run=dry_run)
else:
raise DistutilsPlatformError, \
"don't know how to spawn programs on platform '%s'" % os.name
# spawn ()
def _nt_quote_args (args):
"""Quote command-line arguments for DOS/Windows conventions: just
wraps every argument which contains blanks in double quotes, and
returns a new argument list.
"""
# XXX this doesn't seem very robust to me -- but if the Windows guys
# say it'll work, I guess I'll have to accept it. (What if an arg
# contains quotes? What other magic characters, other than spaces,
# have to be escaped? Is there an escaping mechanism other than
# quoting?)
for i in range(len(args)):
if string.find(args[i], ' ') != -1:
args[i] = '"%s"' % args[i]
return args
def _spawn_nt (cmd,
search_path=1,
verbose=0,
dry_run=0):
executable = cmd[0]
cmd = _nt_quote_args(cmd)
if search_path:
# either we find one or it stays the same
executable = find_executable(executable) or executable
log.info(string.join([executable] + cmd[1:], ' '))
if not dry_run:
# spawn for NT requires a full path to the .exe
try:
rc = os.spawnv(os.P_WAIT, executable, cmd)
except OSError, exc:
# this seems to happen when the command isn't found
raise DistutilsExecError, \
"command '%s' failed: %s" % (cmd[0], exc[-1])
if rc != 0:
# and this reflects the command running but failing
raise DistutilsExecError, \
"command '%s' failed with exit status %d" % (cmd[0], rc)
def _spawn_os2 (cmd,
search_path=1,
verbose=0,
dry_run=0):
executable = cmd[0]
#cmd = _nt_quote_args(cmd)
if search_path:
# either we find one or it stays the same
executable = find_executable(executable) or executable
log.info(string.join([executable] + cmd[1:], ' '))
if not dry_run:
# spawnv for OS/2 EMX requires a full path to the .exe
try:
rc = os.spawnv(os.P_WAIT, executable, cmd)
except OSError, exc:
# this seems to happen when the command isn't found
raise DistutilsExecError, \
"command '%s' failed: %s" % (cmd[0], exc[-1])
if rc != 0:
# and this reflects the command running but failing
print "command '%s' failed with exit status %d" % (cmd[0], rc)
raise DistutilsExecError, \
"command '%s' failed with exit status %d" % (cmd[0], rc)
def _spawn_posix (cmd,
search_path=1,
verbose=0,
dry_run=0):
log.info(string.join(cmd, ' '))
if dry_run:
return
exec_fn = search_path and os.execvp or os.execv
pid = os.fork()
if pid == 0: # in the child
try:
#print "cmd[0] =", cmd[0]
#print "cmd =", cmd
exec_fn(cmd[0], cmd)
except OSError, e:
sys.stderr.write("unable to execute %s: %s\n" %
(cmd[0], e.strerror))
os._exit(1)
sys.stderr.write("unable to execute %s for unknown reasons" % cmd[0])
os._exit(1)
else: # in the parent
# Loop until the child either exits or is terminated by a signal
# (ie. keep waiting if it's merely stopped)
while 1:
try:
(pid, status) = os.waitpid(pid, 0)
except OSError, exc:
import errno
if exc.errno == errno.EINTR:
continue
raise DistutilsExecError, \
"command '%s' failed: %s" % (cmd[0], exc[-1])
if os.WIFSIGNALED(status):
raise DistutilsExecError, \
"command '%s' terminated by signal %d" % \
(cmd[0], os.WTERMSIG(status))
elif os.WIFEXITED(status):
exit_status = os.WEXITSTATUS(status)
if exit_status == 0:
return # hey, it succeeded!
else:
raise DistutilsExecError, \
"command '%s' failed with exit status %d" % \
(cmd[0], exit_status)
elif os.WIFSTOPPED(status):
continue
else:
raise DistutilsExecError, \
"unknown error executing '%s': termination status %d" % \
(cmd[0], status)
# _spawn_posix ()
def _spawn_java(cmd,
search_path=1,
verbose=0,
dry_run=0):
executable = cmd[0]
cmd = ' '.join(_nt_quote_args(cmd))
log.info(cmd)
if not dry_run:
try:
rc = os.system(cmd) >> 8
except OSError, exc:
# this seems to happen when the command isn't found
raise DistutilsExecError, \
"command '%s' failed: %s" % (executable, exc[-1])
if rc != 0:
# and this reflects the command running but failing
print "command '%s' failed with exit status %d" % (executable, rc)
raise DistutilsExecError, \
"command '%s' failed with exit status %d" % (executable, rc)
def find_executable(executable, path=None):
"""Try to find 'executable' in the directories listed in 'path' (a
string listing directories separated by 'os.pathsep'; defaults to
os.environ['PATH']). Returns the complete filename or None if not
found.
"""
if path is None:
path = os.environ['PATH']
paths = string.split(path, os.pathsep)
(base, ext) = os.path.splitext(executable)
if (sys.platform == 'win32' or os.name == 'os2') and (ext != '.exe'):
executable = executable + '.exe'
if not os.path.isfile(executable):
for p in paths:
f = os.path.join(p, executable)
if os.path.isfile(f):
# the file exists, we have a shot at spawn working
return f
return None
else:
return executable
# find_executable()
| gpl-3.0 |
senyorjou/jshop | jshop/config/settings.py | 1 | 13783 | # -*- coding: utf-8 -*-
"""
Django settings for src project.
For more information on this file, see
https://docs.djangoproject.com/en/dev/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/dev/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
from os.path import join
# See: http://django-storages.readthedocs.org/en/latest/backends/amazon-S3.html#settings
try:
from S3 import CallingFormat
AWS_CALLING_FORMAT = CallingFormat.SUBDOMAIN
except ImportError:
# TODO: Fix this where even if in Dev this class is called.
pass
from configurations import Configuration, values
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
class Common(Configuration):
########## APP CONFIGURATION
DJANGO_APPS = (
# Default Django apps:
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
# Useful template tags:
# 'django.contrib.humanize',
# Admin
'django.contrib.admin',
)
THIRD_PARTY_APPS = (
'south', # Database migration helpers:
'crispy_forms', # Form layouts
'avatar', # for user avatars
)
# Apps specific for this project go here.
LOCAL_APPS = (
'users', # custom users app
# Your stuff: custom apps go here
)
# See: https://docs.djangoproject.com/en/dev/ref/settings/#installed-apps
INSTALLED_APPS = DJANGO_APPS + THIRD_PARTY_APPS + LOCAL_APPS
INSTALLED_APPS += (
# Needs to come last for now because of a weird edge case between
# South and allauth
'allauth', # registration
'allauth.account', # registration
'allauth.socialaccount', # registration
)
########## END APP CONFIGURATION
########## MIDDLEWARE CONFIGURATION
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
########## END MIDDLEWARE CONFIGURATION
########## DEBUG
# See: https://docs.djangoproject.com/en/dev/ref/settings/#debug
DEBUG = values.BooleanValue(True)
# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-debug
TEMPLATE_DEBUG = DEBUG
########## END DEBUG
########## SECRET CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#secret-key
# Note: This key only used for development and testing.
# In production, this is changed to a values.SecretValue() setting
SECRET_KEY = "CHANGEME!!!"
########## END SECRET CONFIGURATION
########## FIXTURE CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#std:setting-FIXTURE_DIRS
FIXTURE_DIRS = (
join(BASE_DIR, 'fixtures'),
)
########## END FIXTURE CONFIGURATION
########## EMAIL CONFIGURATION
EMAIL_BACKEND = values.Value('django.core.mail.backends.smtp.EmailBackend')
########## END EMAIL CONFIGURATION
########## MANAGER CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#admins
ADMINS = (
('Marc Jou', '[email protected]'),
)
# See: https://docs.djangoproject.com/en/dev/ref/settings/#managers
MANAGERS = ADMINS
########## END MANAGER CONFIGURATION
########## DATABASE CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#databases
DATABASES = values.DatabaseURLValue('postgres://localhost/jshop')
########## END DATABASE CONFIGURATION
########## CACHING
# Do this here because thanks to django-pylibmc-sasl and pylibmc memcacheify is painful to install on windows.
# memcacheify is what's used in Production
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
'LOCATION': ''
}
}
########## END CACHING
########## GENERAL CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#time-zone
TIME_ZONE = 'America/Los_Angeles'
# See: https://docs.djangoproject.com/en/dev/ref/settings/#language-code
LANGUAGE_CODE = 'en-us'
# See: https://docs.djangoproject.com/en/dev/ref/settings/#site-id
SITE_ID = 1
# See: https://docs.djangoproject.com/en/dev/ref/settings/#use-i18n
USE_I18N = True
# See: https://docs.djangoproject.com/en/dev/ref/settings/#use-l10n
USE_L10N = True
# See: https://docs.djangoproject.com/en/dev/ref/settings/#use-tz
USE_TZ = True
########## END GENERAL CONFIGURATION
########## TEMPLATE CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-context-processors
TEMPLATE_CONTEXT_PROCESSORS = (
'django.contrib.auth.context_processors.auth',
"allauth.account.context_processors.account",
"allauth.socialaccount.context_processors.socialaccount",
'django.core.context_processors.debug',
'django.core.context_processors.i18n',
'django.core.context_processors.media',
'django.core.context_processors.static',
'django.core.context_processors.tz',
'django.contrib.messages.context_processors.messages',
'django.core.context_processors.request',
# Your stuff: custom template context processers go here
)
# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-dirs
TEMPLATE_DIRS = (
join(BASE_DIR, 'templates'),
)
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
)
# See: http://django-crispy-forms.readthedocs.org/en/latest/install.html#template-packs
CRISPY_TEMPLATE_PACK = 'bootstrap3'
########## END TEMPLATE CONFIGURATION
########## STATIC FILE CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#static-root
STATIC_ROOT = join(os.path.dirname(BASE_DIR), 'staticfiles')
# See: https://docs.djangoproject.com/en/dev/ref/settings/#static-url
STATIC_URL = '/static/'
# See: https://docs.djangoproject.com/en/dev/ref/contrib/staticfiles/#std:setting-STATICFILES_DIRS
STATICFILES_DIRS = (
join(BASE_DIR, 'static'),
)
# See: https://docs.djangoproject.com/en/dev/ref/contrib/staticfiles/#staticfiles-finders
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
)
########## END STATIC FILE CONFIGURATION
########## MEDIA CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#media-root
MEDIA_ROOT = join(BASE_DIR, 'media')
# See: https://docs.djangoproject.com/en/dev/ref/settings/#media-url
MEDIA_URL = '/media/'
########## END MEDIA CONFIGURATION
########## URL Configuration
ROOT_URLCONF = 'config.urls'
# See: https://docs.djangoproject.com/en/dev/ref/settings/#wsgi-application
WSGI_APPLICATION = 'config.wsgi.application'
########## End URL Configuration
########## AUTHENTICATION CONFIGURATION
AUTHENTICATION_BACKENDS = (
"django.contrib.auth.backends.ModelBackend",
"allauth.account.auth_backends.AuthenticationBackend",
)
# Some really nice defaults
ACCOUNT_AUTHENTICATION_METHOD = "username"
ACCOUNT_EMAIL_REQUIRED = True
ACCOUNT_EMAIL_VERIFICATION = "mandatory"
########## END AUTHENTICATION CONFIGURATION
########## Custom user app defaults
# Select the correct user model
AUTH_USER_MODEL = "users.User"
LOGIN_REDIRECT_URL = "users:redirect"
########## END Custom user app defaults
########## SLUGLIFIER
AUTOSLUG_SLUGIFY_FUNCTION = "slugify.slugify"
########## END SLUGLIFIER
########## LOGGING CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#logging
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
########## END LOGGING CONFIGURATION
########## Your common stuff: Below this line define 3rd party libary settings
class Local(Common):
########## INSTALLED_APPS
INSTALLED_APPS = Common.INSTALLED_APPS
########## END INSTALLED_APPS
########## Mail settings
EMAIL_HOST = "localhost"
EMAIL_PORT = 1025
EMAIL_BACKEND = values.Value('django.core.mail.backends.console.EmailBackend')
########## End mail settings
########## django-debug-toolbar
MIDDLEWARE_CLASSES = Common.MIDDLEWARE_CLASSES + ('debug_toolbar.middleware.DebugToolbarMiddleware',)
INSTALLED_APPS += ('debug_toolbar',)
INTERNAL_IPS = ('127.0.0.1',)
DEBUG_TOOLBAR_CONFIG = {
'INTERCEPT_REDIRECTS': False,
'SHOW_TEMPLATE_CONTEXT': True,
}
########## end django-debug-toolbar
########## Your local stuff: Below this line define 3rd party libary settings
class Production(Common):
########## INSTALLED_APPS
INSTALLED_APPS = Common.INSTALLED_APPS
########## END INSTALLED_APPS
########## SECRET KEY
SECRET_KEY = values.SecretValue()
########## END SECRET KEY
########## django-secure
INSTALLED_APPS += ("djangosecure", )
# set this to 60 seconds and then to 518400 when you can prove it works
SECURE_HSTS_SECONDS = 60
SECURE_HSTS_INCLUDE_SUBDOMAINS = values.BooleanValue(True)
SECURE_FRAME_DENY = values.BooleanValue(True)
SECURE_CONTENT_TYPE_NOSNIFF = values.BooleanValue(True)
SECURE_BROWSER_XSS_FILTER = values.BooleanValue(True)
SESSION_COOKIE_SECURE = values.BooleanValue(False)
SESSION_COOKIE_HTTPONLY = values.BooleanValue(True)
SECURE_SSL_REDIRECT = values.BooleanValue(True)
########## end django-secure
########## SITE CONFIGURATION
# Hosts/domain names that are valid for this site
# See https://docs.djangoproject.com/en/1.5/ref/settings/#allowed-hosts
ALLOWED_HOSTS = ["*"]
########## END SITE CONFIGURATION
INSTALLED_APPS += ("gunicorn", )
########## STORAGE CONFIGURATION
# See: http://django-storages.readthedocs.org/en/latest/index.html
INSTALLED_APPS += (
'storages',
)
# See: http://django-storages.readthedocs.org/en/latest/backends/amazon-S3.html#settings
STATICFILES_STORAGE = DEFAULT_FILE_STORAGE = 'storages.backends.s3boto.S3BotoStorage'
# See: http://django-storages.readthedocs.org/en/latest/backends/amazon-S3.html#settings
AWS_ACCESS_KEY_ID = values.SecretValue()
AWS_SECRET_ACCESS_KEY = values.SecretValue()
AWS_STORAGE_BUCKET_NAME = values.SecretValue()
AWS_AUTO_CREATE_BUCKET = True
AWS_QUERYSTRING_AUTH = False
# see: https://github.com/antonagestam/collectfast
AWS_PRELOAD_METADATA = True
INSTALLED_APPS += ("collectfast", )
# AWS cache settings, don't change unless you know what you're doing:
AWS_EXPIREY = 60 * 60 * 24 * 7
AWS_HEADERS = {
'Cache-Control': 'max-age=%d, s-maxage=%d, must-revalidate' % (AWS_EXPIREY,
AWS_EXPIREY)
}
# See: https://docs.djangoproject.com/en/dev/ref/settings/#static-url
STATIC_URL = 'https://s3.amazonaws.com/%s/' % AWS_STORAGE_BUCKET_NAME
########## END STORAGE CONFIGURATION
########## EMAIL
DEFAULT_FROM_EMAIL = values.Value(
'src <[email protected]>')
EMAIL_HOST = values.Value('smtp.sendgrid.com')
EMAIL_HOST_PASSWORD = values.SecretValue(environ_prefix="", environ_name="SENDGRID_PASSWORD")
EMAIL_HOST_USER = values.SecretValue(environ_prefix="", environ_name="SENDGRID_USERNAME")
EMAIL_PORT = values.IntegerValue(587, environ_prefix="", environ_name="EMAIL_PORT")
EMAIL_SUBJECT_PREFIX = values.Value('[src] ', environ_name="EMAIL_SUBJECT_PREFIX")
EMAIL_USE_TLS = True
SERVER_EMAIL = EMAIL_HOST_USER
########## END EMAIL
########## TEMPLATE CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-dirs
TEMPLATE_LOADERS = (
('django.template.loaders.cached.Loader', (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
)),
)
########## END TEMPLATE CONFIGURATION
########## CACHING
# Only do this here because thanks to django-pylibmc-sasl and pylibmc memcacheify is painful to install on windows.
CACHES = values.CacheURLValue(default="memcached://127.0.0.1:11211")
########## END CACHING
########## Your production stuff: Below this line define 3rd party libary settings
| bsd-3-clause |
forge33/CouchPotatoServer | libs/suds/wsdl.py | 197 | 31503 | # This program is free software; you can redistribute it and/or modify
# it under the terms of the (LGPL) GNU Lesser General Public License as
# published by the Free Software Foundation; either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library Lesser General Public License for more details at
# ( http://www.gnu.org/licenses/lgpl.html ).
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
# written by: Jeff Ortel ( [email protected] )
"""
The I{wsdl} module provides an objectification of the WSDL.
The primary class is I{Definitions} as it represends the root element
found in the document.
"""
from logging import getLogger
from suds import *
from suds.sax import splitPrefix
from suds.sax.element import Element
from suds.bindings.document import Document
from suds.bindings.rpc import RPC, Encoded
from suds.xsd import qualify, Namespace
from suds.xsd.schema import Schema, SchemaCollection
from suds.xsd.query import ElementQuery
from suds.sudsobject import Object, Facade, Metadata
from suds.reader import DocumentReader, DefinitionsReader
from urlparse import urljoin
import re, soaparray
log = getLogger(__name__)
wsdlns = (None, "http://schemas.xmlsoap.org/wsdl/")
soapns = (None, 'http://schemas.xmlsoap.org/wsdl/soap/')
soap12ns = (None, 'http://schemas.xmlsoap.org/wsdl/soap12/')
class WObject(Object):
"""
Base object for wsdl types.
@ivar root: The XML I{root} element.
@type root: L{Element}
"""
def __init__(self, root, definitions=None):
"""
@param root: An XML root element.
@type root: L{Element}
@param definitions: A definitions object.
@type definitions: L{Definitions}
"""
Object.__init__(self)
self.root = root
pmd = Metadata()
pmd.excludes = ['root']
pmd.wrappers = dict(qname=repr)
self.__metadata__.__print__ = pmd
def resolve(self, definitions):
"""
Resolve named references to other WSDL objects.
@param definitions: A definitions object.
@type definitions: L{Definitions}
"""
pass
class NamedObject(WObject):
"""
A B{named} WSDL object.
@ivar name: The name of the object.
@type name: str
@ivar qname: The I{qualified} name of the object.
@type qname: (name, I{namespace-uri}).
"""
def __init__(self, root, definitions):
"""
@param root: An XML root element.
@type root: L{Element}
@param definitions: A definitions object.
@type definitions: L{Definitions}
"""
WObject.__init__(self, root, definitions)
self.name = root.get('name')
self.qname = (self.name, definitions.tns[1])
pmd = self.__metadata__.__print__
pmd.wrappers['qname'] = repr
class Definitions(WObject):
"""
Represents the I{root} container of the WSDL objects as defined
by <wsdl:definitions/>
@ivar id: The object id.
@type id: str
@ivar options: An options dictionary.
@type options: L{options.Options}
@ivar url: The URL used to load the object.
@type url: str
@ivar tns: The target namespace for the WSDL.
@type tns: str
@ivar schema: The collective WSDL schema object.
@type schema: L{SchemaCollection}
@ivar children: The raw list of child objects.
@type children: [L{WObject},...]
@ivar imports: The list of L{Import} children.
@type imports: [L{Import},...]
@ivar messages: The dictionary of L{Message} children key'd by I{qname}
@type messages: [L{Message},...]
@ivar port_types: The dictionary of L{PortType} children key'd by I{qname}
@type port_types: [L{PortType},...]
@ivar bindings: The dictionary of L{Binding} children key'd by I{qname}
@type bindings: [L{Binding},...]
@ivar service: The service object.
@type service: L{Service}
"""
Tag = 'definitions'
def __init__(self, url, options):
"""
@param url: A URL to the WSDL.
@type url: str
@param options: An options dictionary.
@type options: L{options.Options}
"""
log.debug('reading wsdl at: %s ...', url)
reader = DocumentReader(options)
d = reader.open(url)
root = d.root()
WObject.__init__(self, root)
self.id = objid(self)
self.options = options
self.url = url
self.tns = self.mktns(root)
self.types = []
self.schema = None
self.children = []
self.imports = []
self.messages = {}
self.port_types = {}
self.bindings = {}
self.services = []
self.add_children(self.root)
self.children.sort()
pmd = self.__metadata__.__print__
pmd.excludes.append('children')
pmd.excludes.append('wsdl')
pmd.wrappers['schema'] = repr
self.open_imports()
self.resolve()
self.build_schema()
self.set_wrapped()
for s in self.services:
self.add_methods(s)
log.debug("wsdl at '%s' loaded:\n%s", url, self)
def mktns(self, root):
""" Get/create the target namespace """
tns = root.get('targetNamespace')
prefix = root.findPrefix(tns)
if prefix is None:
log.debug('warning: tns (%s), not mapped to prefix', tns)
prefix = 'tns'
return (prefix, tns)
def add_children(self, root):
""" Add child objects using the factory """
for c in root.getChildren(ns=wsdlns):
child = Factory.create(c, self)
if child is None: continue
self.children.append(child)
if isinstance(child, Import):
self.imports.append(child)
continue
if isinstance(child, Types):
self.types.append(child)
continue
if isinstance(child, Message):
self.messages[child.qname] = child
continue
if isinstance(child, PortType):
self.port_types[child.qname] = child
continue
if isinstance(child, Binding):
self.bindings[child.qname] = child
continue
if isinstance(child, Service):
self.services.append(child)
continue
def open_imports(self):
""" Import the I{imported} WSDLs. """
for imp in self.imports:
imp.load(self)
def resolve(self):
""" Tell all children to resolve themselves """
for c in self.children:
c.resolve(self)
def build_schema(self):
""" Process L{Types} objects and create the schema collection """
container = SchemaCollection(self)
for t in [t for t in self.types if t.local()]:
for root in t.contents():
schema = Schema(root, self.url, self.options, container)
container.add(schema)
if not len(container): # empty
root = Element.buildPath(self.root, 'types/schema')
schema = Schema(root, self.url, self.options, container)
container.add(schema)
self.schema = container.load(self.options)
for s in [t.schema() for t in self.types if t.imported()]:
self.schema.merge(s)
return self.schema
def add_methods(self, service):
""" Build method view for service """
bindings = {
'document/literal' : Document(self),
'rpc/literal' : RPC(self),
'rpc/encoded' : Encoded(self)
}
for p in service.ports:
binding = p.binding
ptype = p.binding.type
operations = p.binding.type.operations.values()
for name in [op.name for op in operations]:
m = Facade('Method')
m.name = name
m.location = p.location
m.binding = Facade('binding')
op = binding.operation(name)
m.soap = op.soap
key = '/'.join((op.soap.style, op.soap.input.body.use))
m.binding.input = bindings.get(key)
key = '/'.join((op.soap.style, op.soap.output.body.use))
m.binding.output = bindings.get(key)
op = ptype.operation(name)
p.methods[name] = m
def set_wrapped(self):
""" set (wrapped|bare) flag on messages """
for b in self.bindings.values():
for op in b.operations.values():
for body in (op.soap.input.body, op.soap.output.body):
body.wrapped = False
if len(body.parts) != 1:
continue
for p in body.parts:
if p.element is None:
continue
query = ElementQuery(p.element)
pt = query.execute(self.schema)
if pt is None:
raise TypeNotFound(query.ref)
resolved = pt.resolve()
if resolved.builtin():
continue
body.wrapped = True
def __getstate__(self):
nopickle = ('options',)
state = self.__dict__.copy()
for k in nopickle:
if k in state:
del state[k]
return state
def __repr__(self):
return 'Definitions (id=%s)' % self.id
class Import(WObject):
"""
Represents the <wsdl:import/>.
@ivar location: The value of the I{location} attribute.
@type location: str
@ivar ns: The value of the I{namespace} attribute.
@type ns: str
@ivar imported: The imported object.
@type imported: L{Definitions}
"""
def __init__(self, root, definitions):
"""
@param root: An XML root element.
@type root: L{Element}
@param definitions: A definitions object.
@type definitions: L{Definitions}
"""
WObject.__init__(self, root, definitions)
self.location = root.get('location')
self.ns = root.get('namespace')
self.imported = None
pmd = self.__metadata__.__print__
pmd.wrappers['imported'] = repr
def load(self, definitions):
""" Load the object by opening the URL """
url = self.location
log.debug('importing (%s)', url)
if '://' not in url:
url = urljoin(definitions.url, url)
options = definitions.options
d = Definitions(url, options)
if d.root.match(Definitions.Tag, wsdlns):
self.import_definitions(definitions, d)
return
if d.root.match(Schema.Tag, Namespace.xsdns):
self.import_schema(definitions, d)
return
raise Exception('document at "%s" is unknown' % url)
def import_definitions(self, definitions, d):
""" import/merge wsdl definitions """
definitions.types += d.types
definitions.messages.update(d.messages)
definitions.port_types.update(d.port_types)
definitions.bindings.update(d.bindings)
self.imported = d
log.debug('imported (WSDL):\n%s', d)
def import_schema(self, definitions, d):
""" import schema as <types/> content """
if not len(definitions.types):
types = Types.create(definitions)
definitions.types.append(types)
else:
types = definitions.types[-1]
types.root.append(d.root)
log.debug('imported (XSD):\n%s', d.root)
def __gt__(self, other):
return False
class Types(WObject):
"""
Represents <types><schema/></types>.
"""
@classmethod
def create(cls, definitions):
root = Element('types', ns=wsdlns)
definitions.root.insert(root)
return Types(root, definitions)
def __init__(self, root, definitions):
"""
@param root: An XML root element.
@type root: L{Element}
@param definitions: A definitions object.
@type definitions: L{Definitions}
"""
WObject.__init__(self, root, definitions)
self.definitions = definitions
def contents(self):
return self.root.getChildren('schema', Namespace.xsdns)
def schema(self):
return self.definitions.schema
def local(self):
return ( self.definitions.schema is None )
def imported(self):
return ( not self.local() )
def __gt__(self, other):
return isinstance(other, Import)
class Part(NamedObject):
"""
Represents <message><part/></message>.
@ivar element: The value of the {element} attribute.
Stored as a I{qref} as converted by L{suds.xsd.qualify}.
@type element: str
@ivar type: The value of the {type} attribute.
Stored as a I{qref} as converted by L{suds.xsd.qualify}.
@type type: str
"""
def __init__(self, root, definitions):
"""
@param root: An XML root element.
@type root: L{Element}
@param definitions: A definitions object.
@type definitions: L{Definitions}
"""
NamedObject.__init__(self, root, definitions)
pmd = Metadata()
pmd.wrappers = dict(element=repr, type=repr)
self.__metadata__.__print__ = pmd
tns = definitions.tns
self.element = self.__getref('element', tns)
self.type = self.__getref('type', tns)
def __getref(self, a, tns):
""" Get the qualified value of attribute named 'a'."""
s = self.root.get(a)
if s is None:
return s
else:
return qualify(s, self.root, tns)
class Message(NamedObject):
"""
Represents <message/>.
@ivar parts: A list of message parts.
@type parts: [I{Part},...]
"""
def __init__(self, root, definitions):
"""
@param root: An XML root element.
@type root: L{Element}
@param definitions: A definitions object.
@type definitions: L{Definitions}
"""
NamedObject.__init__(self, root, definitions)
self.parts = []
for p in root.getChildren('part'):
part = Part(p, definitions)
self.parts.append(part)
def __gt__(self, other):
return isinstance(other, (Import, Types))
class PortType(NamedObject):
"""
Represents <portType/>.
@ivar operations: A list of contained operations.
@type operations: list
"""
def __init__(self, root, definitions):
"""
@param root: An XML root element.
@type root: L{Element}
@param definitions: A definitions object.
@type definitions: L{Definitions}
"""
NamedObject.__init__(self, root, definitions)
self.operations = {}
for c in root.getChildren('operation'):
op = Facade('Operation')
op.name = c.get('name')
op.tns = definitions.tns
input = c.getChild('input')
if input is None:
op.input = None
else:
op.input = input.get('message')
output = c.getChild('output')
if output is None:
op.output = None
else:
op.output = output.get('message')
faults = []
for fault in c.getChildren('fault'):
f = Facade('Fault')
f.name = fault.get('name')
f.message = fault.get('message')
faults.append(f)
op.faults = faults
self.operations[op.name] = op
def resolve(self, definitions):
"""
Resolve named references to other WSDL objects.
@param definitions: A definitions object.
@type definitions: L{Definitions}
"""
for op in self.operations.values():
if op.input is None:
op.input = Message(Element('no-input'), definitions)
else:
qref = qualify(op.input, self.root, definitions.tns)
msg = definitions.messages.get(qref)
if msg is None:
raise Exception("msg '%s', not-found" % op.input)
else:
op.input = msg
if op.output is None:
op.output = Message(Element('no-output'), definitions)
else:
qref = qualify(op.output, self.root, definitions.tns)
msg = definitions.messages.get(qref)
if msg is None:
raise Exception("msg '%s', not-found" % op.output)
else:
op.output = msg
for f in op.faults:
qref = qualify(f.message, self.root, definitions.tns)
msg = definitions.messages.get(qref)
if msg is None:
raise Exception, "msg '%s', not-found" % f.message
f.message = msg
def operation(self, name):
"""
Shortcut used to get a contained operation by name.
@param name: An operation name.
@type name: str
@return: The named operation.
@rtype: Operation
@raise L{MethodNotFound}: When not found.
"""
try:
return self.operations[name]
except Exception, e:
raise MethodNotFound(name)
def __gt__(self, other):
return isinstance(other, (Import, Types, Message))
class Binding(NamedObject):
"""
Represents <binding/>
@ivar operations: A list of contained operations.
@type operations: list
"""
def __init__(self, root, definitions):
"""
@param root: An XML root element.
@type root: L{Element}
@param definitions: A definitions object.
@type definitions: L{Definitions}
"""
NamedObject.__init__(self, root, definitions)
self.operations = {}
self.type = root.get('type')
sr = self.soaproot()
if sr is None:
self.soap = None
log.debug('binding: "%s" not a soap binding', self.name)
return
soap = Facade('soap')
self.soap = soap
self.soap.style = sr.get('style', default='document')
self.add_operations(self.root, definitions)
def soaproot(self):
""" get the soap:binding """
for ns in (soapns, soap12ns):
sr = self.root.getChild('binding', ns=ns)
if sr is not None:
return sr
return None
def add_operations(self, root, definitions):
""" Add <operation/> children """
dsop = Element('operation', ns=soapns)
for c in root.getChildren('operation'):
op = Facade('Operation')
op.name = c.get('name')
sop = c.getChild('operation', default=dsop)
soap = Facade('soap')
soap.action = '"%s"' % sop.get('soapAction', default='')
soap.style = sop.get('style', default=self.soap.style)
soap.input = Facade('Input')
soap.input.body = Facade('Body')
soap.input.headers = []
soap.output = Facade('Output')
soap.output.body = Facade('Body')
soap.output.headers = []
op.soap = soap
input = c.getChild('input')
if input is None:
input = Element('input', ns=wsdlns)
body = input.getChild('body')
self.body(definitions, soap.input.body, body)
for header in input.getChildren('header'):
self.header(definitions, soap.input, header)
output = c.getChild('output')
if output is None:
output = Element('output', ns=wsdlns)
body = output.getChild('body')
self.body(definitions, soap.output.body, body)
for header in output.getChildren('header'):
self.header(definitions, soap.output, header)
faults = []
for fault in c.getChildren('fault'):
sf = fault.getChild('fault')
if sf is None:
continue
fn = fault.get('name')
f = Facade('Fault')
f.name = sf.get('name', default=fn)
f.use = sf.get('use', default='literal')
faults.append(f)
soap.faults = faults
self.operations[op.name] = op
def body(self, definitions, body, root):
""" add the input/output body properties """
if root is None:
body.use = 'literal'
body.namespace = definitions.tns
body.parts = ()
return
parts = root.get('parts')
if parts is None:
body.parts = ()
else:
body.parts = re.split('[\s,]', parts)
body.use = root.get('use', default='literal')
ns = root.get('namespace')
if ns is None:
body.namespace = definitions.tns
else:
prefix = root.findPrefix(ns, 'b0')
body.namespace = (prefix, ns)
def header(self, definitions, parent, root):
""" add the input/output header properties """
if root is None:
return
header = Facade('Header')
parent.headers.append(header)
header.use = root.get('use', default='literal')
ns = root.get('namespace')
if ns is None:
header.namespace = definitions.tns
else:
prefix = root.findPrefix(ns, 'h0')
header.namespace = (prefix, ns)
msg = root.get('message')
if msg is not None:
header.message = msg
part = root.get('part')
if part is not None:
header.part = part
def resolve(self, definitions):
"""
Resolve named references to other WSDL objects. This includes
cross-linking information (from) the portType (to) the I{soap}
protocol information on the binding for each operation.
@param definitions: A definitions object.
@type definitions: L{Definitions}
"""
self.resolveport(definitions)
for op in self.operations.values():
self.resolvesoapbody(definitions, op)
self.resolveheaders(definitions, op)
self.resolvefaults(definitions, op)
def resolveport(self, definitions):
"""
Resolve port_type reference.
@param definitions: A definitions object.
@type definitions: L{Definitions}
"""
ref = qualify(self.type, self.root, definitions.tns)
port_type = definitions.port_types.get(ref)
if port_type is None:
raise Exception("portType '%s', not-found" % self.type)
else:
self.type = port_type
def resolvesoapbody(self, definitions, op):
"""
Resolve soap body I{message} parts by
cross-referencing with operation defined in port type.
@param definitions: A definitions object.
@type definitions: L{Definitions}
@param op: An I{operation} object.
@type op: I{operation}
"""
ptop = self.type.operation(op.name)
if ptop is None:
raise Exception, \
"operation '%s' not defined in portType" % op.name
soap = op.soap
parts = soap.input.body.parts
if len(parts):
pts = []
for p in ptop.input.parts:
if p.name in parts:
pts.append(p)
soap.input.body.parts = pts
else:
soap.input.body.parts = ptop.input.parts
parts = soap.output.body.parts
if len(parts):
pts = []
for p in ptop.output.parts:
if p.name in parts:
pts.append(p)
soap.output.body.parts = pts
else:
soap.output.body.parts = ptop.output.parts
def resolveheaders(self, definitions, op):
"""
Resolve soap header I{message} references.
@param definitions: A definitions object.
@type definitions: L{Definitions}
@param op: An I{operation} object.
@type op: I{operation}
"""
soap = op.soap
headers = soap.input.headers + soap.output.headers
for header in headers:
mn = header.message
ref = qualify(mn, self.root, definitions.tns)
message = definitions.messages.get(ref)
if message is None:
raise Exception, "message'%s', not-found" % mn
pn = header.part
for p in message.parts:
if p.name == pn:
header.part = p
break
if pn == header.part:
raise Exception, \
"message '%s' has not part named '%s'" % (ref, pn)
def resolvefaults(self, definitions, op):
"""
Resolve soap fault I{message} references by
cross-referencing with operation defined in port type.
@param definitions: A definitions object.
@type definitions: L{Definitions}
@param op: An I{operation} object.
@type op: I{operation}
"""
ptop = self.type.operation(op.name)
if ptop is None:
raise Exception, \
"operation '%s' not defined in portType" % op.name
soap = op.soap
for fault in soap.faults:
for f in ptop.faults:
if f.name == fault.name:
fault.parts = f.message.parts
continue
if hasattr(fault, 'parts'):
continue
raise Exception, \
"fault '%s' not defined in portType '%s'" % (fault.name, self.type.name)
def operation(self, name):
"""
Shortcut used to get a contained operation by name.
@param name: An operation name.
@type name: str
@return: The named operation.
@rtype: Operation
@raise L{MethodNotFound}: When not found.
"""
try:
return self.operations[name]
except:
raise MethodNotFound(name)
def __gt__(self, other):
return ( not isinstance(other, Service) )
class Port(NamedObject):
"""
Represents a service port.
@ivar service: A service.
@type service: L{Service}
@ivar binding: A binding name.
@type binding: str
@ivar location: The service location (url).
@type location: str
"""
def __init__(self, root, definitions, service):
"""
@param root: An XML root element.
@type root: L{Element}
@param definitions: A definitions object.
@type definitions: L{Definitions}
@param service: A service object.
@type service: L{Service}
"""
NamedObject.__init__(self, root, definitions)
self.__service = service
self.binding = root.get('binding')
address = root.getChild('address')
if address is None:
self.location = None
else:
self.location = address.get('location').encode('utf-8')
self.methods = {}
def method(self, name):
"""
Get a method defined in this portType by name.
@param name: A method name.
@type name: str
@return: The requested method object.
@rtype: I{Method}
"""
return self.methods.get(name)
class Service(NamedObject):
"""
Represents <service/>.
@ivar port: The contained ports.
@type port: [Port,..]
@ivar methods: The contained methods for all ports.
@type methods: [Method,..]
"""
def __init__(self, root, definitions):
"""
@param root: An XML root element.
@type root: L{Element}
@param definitions: A definitions object.
@type definitions: L{Definitions}
"""
NamedObject.__init__(self, root, definitions)
self.ports = []
for p in root.getChildren('port'):
port = Port(p, definitions, self)
self.ports.append(port)
def port(self, name):
"""
Locate a port by name.
@param name: A port name.
@type name: str
@return: The port object.
@rtype: L{Port}
"""
for p in self.ports:
if p.name == name:
return p
return None
def setlocation(self, url, names=None):
"""
Override the invocation location (url) for service method.
@param url: A url location.
@type url: A url.
@param names: A list of method names. None=ALL
@type names: [str,..]
"""
for p in self.ports:
for m in p.methods.values():
if names is None or m.name in names:
m.location = url
def resolve(self, definitions):
"""
Resolve named references to other WSDL objects.
Ports without soap bindings are discarded.
@param definitions: A definitions object.
@type definitions: L{Definitions}
"""
filtered = []
for p in self.ports:
ref = qualify(p.binding, self.root, definitions.tns)
binding = definitions.bindings.get(ref)
if binding is None:
raise Exception("binding '%s', not-found" % p.binding)
if binding.soap is None:
log.debug('binding "%s" - not a soap, discarded', binding.name)
continue
p.binding = binding
filtered.append(p)
self.ports = filtered
def __gt__(self, other):
return True
class Factory:
"""
Simple WSDL object factory.
@cvar tags: Dictionary of tag->constructor mappings.
@type tags: dict
"""
tags =\
{
'import' : Import,
'types' : Types,
'message' : Message,
'portType' : PortType,
'binding' : Binding,
'service' : Service,
}
@classmethod
def create(cls, root, definitions):
"""
Create an object based on the root tag name.
@param root: An XML root element.
@type root: L{Element}
@param definitions: A definitions object.
@type definitions: L{Definitions}
@return: The created object.
@rtype: L{WObject}
"""
fn = cls.tags.get(root.name)
if fn is not None:
return fn(root, definitions)
else:
return None
| gpl-3.0 |
wangjun/odoo | addons/sale_stock/company.py | 384 | 1524 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
class company(osv.osv):
_inherit = 'res.company'
_columns = {
'security_lead': fields.float(
'Security Days', required=True,
help="Margin of error for dates promised to customers. "\
"Products will be scheduled for procurement and delivery "\
"that many days earlier than the actual promised date, to "\
"cope with unexpected delays in the supply chain."),
}
_defaults = {
'security_lead': 0.0,
}
| agpl-3.0 |
bryfry/tomorrow-theme | ipythonqt/tomorrownight.py | 31 | 2459 | # -*- coding: utf-8 -*-
"""
pygments.styles.autumn
~~~~~~~~~~~~~~~~~~~~~~
Tomorrow Night theme for ipython qtconsole (invoke with
ipython qtconsole --style=tomorrownight)
See https://github.com/chriskempson/tomorrow-theme for theme info
:copyright: Copyright 2012 André Risnes, [email protected]
:license: BSD, see LICENSE for details.
"""
from pygments.style import Style
from pygments.token import Keyword, Name, Comment, String, Error, \
Number, Operator, Generic, Whitespace, Text, Punctuation
class TomorrownightStyle(Style):
"""
Tomorrow Night theme for ipython qtconsole
"""
background_color = '#1d1f21'
highlight_color = '#373b41'
styles = {
Whitespace: background_color,
Text: '#c5c8c6',
Punctuation: '#81a2be',
Comment: '#b5bd68',
Comment.Preproc: 'italic #b5bd68',
Comment.Special: 'italic #b5bd68',
Keyword: '#81a2be',
Keyword.Type: '#f0c674',
Keyword.Namespace: '#de935f',
Operator.Word: '#81a2be',
Name: '#de935f',
Name.Builtin: '#de935f',
Name.Function: '#8abeb7',
Name.Class: '#f0c674',
Name.Namespace: '#81a2be',
Name.Variable: '#de935f',
Name.Constant: '#c5c8c6',
Name.Entity: 'bold #00aaaa',
Name.Attribute: '#de935f',
Name.Tag: 'bold #b5bd68',
Name.Decorator: '#cc6666',
String: '#b5bd68',
String.Symbol: '#b5bd68',
String.Regex: '#b5bd68',
Number: '#cc6666',
Generic.Heading: 'bold #c5c8c6',
Generic.Subheading: 'bold #c5c8c6',
Generic.Deleted: '#de935f',
Generic.Inserted: '#8abeb7',
Generic.Error: '#cc6666',
Generic.Emph: 'italic',
Generic.Strong: 'bold',
Generic.Prompt: '#b5bd68',
Generic.Output: '#c5c8c6',
Generic.Traceback: '#c5c8c6',
Error: '#cc6666'
}
| mit |
tangyiyong/odoo | addons/sale/report/sale_report.py | 111 | 5981 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp import tools
from openerp.osv import fields, osv
class sale_report(osv.osv):
_name = "sale.report"
_description = "Sales Orders Statistics"
_auto = False
_rec_name = 'date'
_columns = {
'date': fields.datetime('Date Order', readonly=True), # TDE FIXME master: rename into date_order
'date_confirm': fields.date('Date Confirm', readonly=True),
'product_id': fields.many2one('product.product', 'Product', readonly=True),
'product_uom': fields.many2one('product.uom', 'Unit of Measure', readonly=True),
'product_uom_qty': fields.float('# of Qty', readonly=True),
'partner_id': fields.many2one('res.partner', 'Partner', readonly=True),
'company_id': fields.many2one('res.company', 'Company', readonly=True),
'user_id': fields.many2one('res.users', 'Salesperson', readonly=True),
'price_total': fields.float('Total Price', readonly=True),
'delay': fields.float('Commitment Delay', digits=(16,2), readonly=True),
'categ_id': fields.many2one('product.category','Category of Product', readonly=True),
'nbr': fields.integer('# of Lines', readonly=True), # TDE FIXME master: rename into nbr_lines
'state': fields.selection([
('cancel', 'Cancelled'),
('draft', 'Draft'),
('confirmed', 'Confirmed'),
('exception', 'Exception'),
('done', 'Done')], 'Order Status', readonly=True),
'pricelist_id': fields.many2one('product.pricelist', 'Pricelist', readonly=True),
'analytic_account_id': fields.many2one('account.analytic.account', 'Analytic Account', readonly=True),
'section_id': fields.many2one('crm.case.section', 'Sales Team'),
}
_order = 'date desc'
def _select(self):
select_str = """
WITH currency_rate (currency_id, rate, date_start, date_end) AS (
SELECT r.currency_id, r.rate, r.name AS date_start,
(SELECT name FROM res_currency_rate r2
WHERE r2.name > r.name AND
r2.currency_id = r.currency_id
ORDER BY r2.name ASC
LIMIT 1) AS date_end
FROM res_currency_rate r
)
SELECT min(l.id) as id,
l.product_id as product_id,
t.uom_id as product_uom,
sum(l.product_uom_qty / u.factor * u2.factor) as product_uom_qty,
sum(l.product_uom_qty * cr.rate * l.price_unit * (100.0-l.discount) / 100.0) as price_total,
count(*) as nbr,
s.date_order as date,
s.date_confirm as date_confirm,
s.partner_id as partner_id,
s.user_id as user_id,
s.company_id as company_id,
extract(epoch from avg(date_trunc('day',s.date_confirm)-date_trunc('day',s.create_date)))/(24*60*60)::decimal(16,2) as delay,
l.state,
t.categ_id as categ_id,
s.pricelist_id as pricelist_id,
s.project_id as analytic_account_id,
s.section_id as section_id
"""
return select_str
def _from(self):
from_str = """
sale_order_line l
join sale_order s on (l.order_id=s.id)
left join product_product p on (l.product_id=p.id)
left join product_template t on (p.product_tmpl_id=t.id)
left join product_uom u on (u.id=l.product_uom)
left join product_uom u2 on (u2.id=t.uom_id)
left join product_pricelist pp on (s.pricelist_id = pp.id)
join currency_rate cr on (cr.currency_id = pp.currency_id and
cr.date_start <= coalesce(s.date_order, now()) and
(cr.date_end is null or cr.date_end > coalesce(s.date_order, now())))
"""
return from_str
def _group_by(self):
group_by_str = """
GROUP BY l.product_id,
l.order_id,
t.uom_id,
t.categ_id,
s.date_order,
s.date_confirm,
s.partner_id,
s.user_id,
s.company_id,
l.state,
s.pricelist_id,
s.project_id,
s.section_id
"""
return group_by_str
def init(self, cr):
# self._table = sale_report
tools.drop_view_if_exists(cr, self._table)
cr.execute("""CREATE or REPLACE VIEW %s as (
%s
FROM ( %s )
%s
)""" % (self._table, self._select(), self._from(), self._group_by()))
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
ptemplier/ansible | lib/ansible/modules/system/interfaces_file.py | 12 | 13769 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# (c) 2016, Roman Belyakovsky <ihryamzik () gmail.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['stableinterface'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: interfaces_file
short_description: Tweak settings in /etc/network/interfaces files
extends_documentation_fragment: files
description:
- Manage (add, remove, change) individual interface options in an interfaces-style file without having
to manage the file as a whole with, say, M(template) or M(assemble). Interface has to be presented in a file.
- Read information about interfaces from interfaces-styled files
version_added: "2.4"
options:
dest:
description:
- Path to the interfaces file
required: false
default: /etc/network/interfaces
iface:
description:
- Name of the interface, required for value changes or option remove
required: false
default: null
option:
description:
- Name of the option, required for value changes or option remove
required: false
default: null
value:
description:
- If I(option) is not presented for the I(interface) and I(state) is C(present) option will be added.
If I(option) already exists and is not C(pre-up), C(up), C(post-up) or C(down), it's value will be updated.
C(pre-up), C(up), C(post-up) and C(down) options can't be updated, only adding new options, removing existing
ones or cleaning the whole option set are supported
required: false
default: null
backup:
description:
- Create a backup file including the timestamp information so you can get
the original file back if you somehow clobbered it incorrectly.
required: false
default: "no"
choices: [ "yes", "no" ]
state:
description:
- If set to C(absent) the option or section will be removed if present instead of created.
required: false
default: "present"
choices: [ "present", "absent" ]
notes:
- If option is defined multiple times last one will be updated but all will be deleted in case of an absent state
requirements: []
author: "Roman Belyakovsky (@hryamzik)"
'''
RETURN = '''
dest:
description: destination file/path
returned: success
type: string
sample: "/etc/network/interfaces"
ifaces:
description: interfaces dictionary
returned: success
type: complex
contains:
ifaces:
description: interface dictionary
returned: success
type: dictionary
contains:
eth0:
description: Name of the interface
returned: success
type: dictionary
contains:
address_family:
description: interface address family
returned: success
type: string
sample: "inet"
method:
description: interface method
returned: success
type: string
sample: "manual"
mtu:
description: other options, all values returned as strings
returned: success
type: string
sample: "1500"
pre-up:
description: list of C(pre-up) scripts
returned: success
type: list
sample:
- "route add -net 10.10.10.0/24 gw 10.10.10.1 dev eth1"
- "route add -net 10.10.11.0/24 gw 10.10.11.1 dev eth2"
up:
description: list of C(up) scripts
returned: success
type: list
sample:
- "route add -net 10.10.10.0/24 gw 10.10.10.1 dev eth1"
- "route add -net 10.10.11.0/24 gw 10.10.11.1 dev eth2"
post-up:
description: list of C(post-up) scripts
returned: success
type: list
sample:
- "route add -net 10.10.10.0/24 gw 10.10.10.1 dev eth1"
- "route add -net 10.10.11.0/24 gw 10.10.11.1 dev eth2"
down:
description: list of C(down) scripts
returned: success
type: list
sample:
- "route del -net 10.10.10.0/24 gw 10.10.10.1 dev eth1"
- "route del -net 10.10.11.0/24 gw 10.10.11.1 dev eth2"
...
'''
EXAMPLES = '''
# Set eth1 mtu configuration value to 8000
- interfaces_file:
dest: /etc/network/interfaces.d/eth1.cfg
iface: eth1
option: mtu
value: 8000
backup: yes
state: present
register: eth1_cfg
'''
import os
import re
import tempfile
from ansible.module_utils.basic import AnsibleModule
def lineDict(line):
return {'line': line, 'line_type': 'unknown'}
def optionDict(line, iface, option, value):
return {'line': line, 'iface': iface, 'option': option, 'value': value, 'line_type': 'option'}
def getValueFromLine(s):
spaceRe = re.compile('\s+')
for m in spaceRe.finditer(s):
pass
valueEnd = m.start()
option = s.split()[0]
optionStart = s.find(option)
optionLen = len(option)
valueStart = re.search('\s', s[optionLen + optionStart:]).end() + optionLen + optionStart
return s[valueStart:valueEnd]
def read_interfaces_file(module, filename):
f = open(filename, 'r')
return read_interfaces_lines(module, f)
def read_interfaces_lines(module, line_strings):
lines = []
ifaces = {}
currently_processing = None
i = 0
for line in line_strings:
i += 1
words = line.split()
if len(words) < 1:
lines.append(lineDict(line))
continue
if words[0][0] == "#":
lines.append(lineDict(line))
continue
if words[0] == "mapping":
# currmap = calloc(1, sizeof *currmap);
lines.append(lineDict(line))
currently_processing = "MAPPING"
elif words[0] == "source":
lines.append(lineDict(line))
currently_processing = "NONE"
elif words[0] == "source-dir":
lines.append(lineDict(line))
currently_processing = "NONE"
elif words[0] == "iface":
currif = {
"pre-up": [],
"up": [],
"down": [],
"post-up": []
}
iface_name, address_family_name, method_name = words[1:4]
if len(words) != 4:
module.fail_json(msg="Incorrect number of parameters (%d) in line %d, must be exectly 3" % (len(words), i))
# TODO: put line and count parameters
return None, None
currif['address_family'] = address_family_name
currif['method'] = method_name
ifaces[iface_name] = currif
lines.append({'line': line, 'iface': iface_name, 'line_type': 'iface', 'params': currif})
currently_processing = "IFACE"
elif words[0] == "auto":
lines.append(lineDict(line))
currently_processing = "NONE"
elif words[0] == "allow-":
lines.append(lineDict(line))
currently_processing = "NONE"
elif words[0] == "no-auto-down":
lines.append(lineDict(line))
currently_processing = "NONE"
elif words[0] == "no-scripts":
lines.append(lineDict(line))
currently_processing = "NONE"
else:
if currently_processing == "IFACE":
option_name = words[0]
# TODO: if option_name in currif.options
value = getValueFromLine(line)
lines.append(optionDict(line, iface_name, option_name, value))
if option_name in ["pre-up", "up", "down", "post-up"]:
currif[option_name].append(value)
else:
currif[option_name] = value
elif currently_processing == "MAPPING":
lines.append(lineDict(line))
elif currently_processing == "NONE":
lines.append(lineDict(line))
else:
module.fail_json(msg="misplaced option %s in line %d" % (line, i))
return None, None
return lines, ifaces
def setInterfaceOption(module, lines, iface, option, raw_value, state):
value = str(raw_value)
changed = False
iface_lines = [item for item in lines if "iface" in item and item["iface"] == iface]
if len(iface_lines) < 1:
# interface not found
module.fail_json(msg="Error: interface %s not found" % iface)
return changed
iface_options = list(filter(lambda i: i['line_type'] == 'option', iface_lines))
target_options = list(filter(lambda i: i['option'] == option, iface_options))
if state == "present":
if len(target_options) < 1:
changed = True
# add new option
last_line_dict = iface_lines[-1]
lines = addOptionAfterLine(option, value, iface, lines, last_line_dict, iface_options)
else:
if option in ["pre-up", "up", "down", "post-up"]:
if len(list(filter(lambda i: i['value'] == value, target_options))) < 1:
changed = True
lines = addOptionAfterLine(option, value, iface, lines, target_options[-1], iface_options)
else:
# if more than one option found edit the last one
if target_options[-1]['value'] != value:
changed = True
target_option = target_options[-1]
old_line = target_option['line']
old_value = target_option['value']
prefix_start = old_line.find(option)
optionLen = len(option)
old_value_position = re.search("\s+".join(old_value.split()), old_line[prefix_start + optionLen:])
start = old_value_position.start() + prefix_start + optionLen
end = old_value_position.end() + prefix_start + optionLen
line = old_line[:start] + value + old_line[end:]
index = len(lines) - lines[::-1].index(target_option) - 1
lines[index] = optionDict(line, iface, option, value)
elif state == "absent":
if len(target_options) >= 1:
if option in ["pre-up", "up", "down", "post-up"] and value is not None and value != "None":
for target_option in filter(lambda i: i['value'] == value, target_options):
changed = True
lines = list(filter(lambda l: l != target_option, lines))
else:
changed = True
for target_option in target_options:
lines = list(filter(lambda l: l != target_option, lines))
else:
module.fail_json(msg="Error: unsupported state %s, has to be either present or absent" % state)
return changed, lines
pass
def addOptionAfterLine(option, value, iface, lines, last_line_dict, iface_options):
last_line = last_line_dict['line']
prefix_start = last_line.find(last_line.split()[0])
suffix_start = last_line.rfind(last_line.split()[-1]) + len(last_line.split()[-1])
prefix = last_line[:prefix_start]
if len(iface_options) < 1:
# interface has no options, ident
prefix += " "
line = prefix + "%s %s" % (option, value) + last_line[suffix_start:]
option_dict = optionDict(line, iface, option, value)
index = len(lines) - lines[::-1].index(last_line_dict)
lines.insert(index, option_dict)
return lines
def write_changes(module, lines, dest):
tmpfd, tmpfile = tempfile.mkstemp()
f = os.fdopen(tmpfd, 'wb')
f.writelines(lines)
f.close()
module.atomic_move(tmpfile, os.path.realpath(dest))
def main():
module = AnsibleModule(
argument_spec=dict(
dest=dict(default='/etc/network/interfaces', required=False),
iface=dict(required=False),
option=dict(required=False),
value=dict(required=False),
backup=dict(default='no', type='bool'),
state=dict(default='present', choices=['present', 'absent']),
),
add_file_common_args=True,
supports_check_mode=True
)
dest = os.path.expanduser(module.params['dest'])
iface = module.params['iface']
option = module.params['option']
value = module.params['value']
backup = module.params['backup']
state = module.params['state']
if option is not None and iface is None:
module.fail_json(msg="Inteface must be set if option is defined")
if option is not None and state == "present" and value is None:
module.fail_json(msg="Value must be set if option is defined and state is 'present'")
lines, ifaces = read_interfaces_file(module, dest)
changed = False
if option is not None:
changed, lines = setInterfaceOption(module, lines, iface, option, value, state)
if changed:
_, ifaces = read_interfaces_lines(module, [d['line'] for d in lines if 'line' in d])
if changed and not module.check_mode:
if backup:
module.backup_local(dest)
write_changes(module, [d['line'] for d in lines if 'line' in d], dest)
module.exit_json(dest=dest, changed=changed, ifaces=ifaces)
if __name__ == '__main__':
main()
| gpl-3.0 |
sorenh/cc | vendor/tornado/website/markdown/extensions/toc.py | 4 | 5105 | """
Table of Contents Extension for Python-Markdown
* * *
(c) 2008 [Jack Miller](http://codezen.org)
Dependencies:
* [Markdown 2.0+](http://www.freewisdom.org/projects/python-markdown/)
"""
import markdown
from markdown import etree
import re
class TocTreeprocessor(markdown.treeprocessors.Treeprocessor):
# Iterator wrapper to get parent and child all at once
def iterparent(self, root):
for parent in root.getiterator():
for child in parent:
yield parent, child
def run(self, doc):
div = etree.Element("div")
div.attrib["class"] = "toc"
last_li = None
# Add title to the div
if self.config["title"][0]:
header = etree.SubElement(div, "span")
header.attrib["class"] = "toctitle"
header.text = self.config["title"][0]
level = 0
list_stack=[div]
header_rgx = re.compile("[Hh][123456]")
# Get a list of id attributes
used_ids = []
for c in doc.getiterator():
if "id" in c.attrib:
used_ids.append(c.attrib["id"])
for (p, c) in self.iterparent(doc):
if not c.text:
continue
# To keep the output from screwing up the
# validation by putting a <div> inside of a <p>
# we actually replace the <p> in its entirety.
# We do not allow the marker inside a header as that
# would causes an enless loop of placing a new TOC
# inside previously generated TOC.
if c.text.find(self.config["marker"][0]) > -1 and not header_rgx.match(c.tag):
for i in range(len(p)):
if p[i] == c:
p[i] = div
break
if header_rgx.match(c.tag):
tag_level = int(c.tag[-1])
# Regardless of how many levels we jumped
# only one list should be created, since
# empty lists containing lists are illegal.
if tag_level < level:
list_stack.pop()
level = tag_level
if tag_level > level:
newlist = etree.Element("ul")
if last_li:
last_li.append(newlist)
else:
list_stack[-1].append(newlist)
list_stack.append(newlist)
level = tag_level
# Do not override pre-existing ids
if not "id" in c.attrib:
id = self.config["slugify"][0](c.text)
if id in used_ids:
ctr = 1
while "%s_%d" % (id, ctr) in used_ids:
ctr += 1
id = "%s_%d" % (id, ctr)
used_ids.append(id)
c.attrib["id"] = id
else:
id = c.attrib["id"]
# List item link, to be inserted into the toc div
last_li = etree.Element("li")
link = etree.SubElement(last_li, "a")
link.text = c.text
link.attrib["href"] = '#' + id
if int(self.config["anchorlink"][0]):
anchor = etree.SubElement(c, "a")
anchor.text = c.text
anchor.attrib["href"] = "#" + id
anchor.attrib["class"] = "toclink"
c.text = ""
list_stack[-1].append(last_li)
class TocExtension(markdown.Extension):
def __init__(self, configs):
self.config = { "marker" : ["[TOC]",
"Text to find and replace with Table of Contents -"
"Defaults to \"[TOC]\""],
"slugify" : [self.slugify,
"Function to generate anchors based on header text-"
"Defaults to a built in slugify function."],
"title" : [None,
"Title to insert into TOC <div> - "
"Defaults to None"],
"anchorlink" : [0,
"1 if header should be a self link"
"Defaults to 0"]}
for key, value in configs:
self.setConfig(key, value)
# This is exactly the same as Django's slugify
def slugify(self, value):
""" Slugify a string, to make it URL friendly. """
import unicodedata
value = unicodedata.normalize('NFKD', value).encode('ascii', 'ignore')
value = unicode(re.sub('[^\w\s-]', '', value).strip().lower())
return re.sub('[-\s]+','-',value)
def extendMarkdown(self, md, md_globals):
tocext = TocTreeprocessor(md)
tocext.config = self.config
md.treeprocessors.add("toc", tocext, "_begin")
def makeExtension(configs={}):
return TocExtension(configs=configs)
| apache-2.0 |
Alwnikrotikz/raft | tabs/LogTab.py | 11 | 2204 | #
# Author: Gregory Fleischer ([email protected])
#
# Copyright (c) 2013 RAFT Team
#
# This file is part of RAFT.
#
# RAFT is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# RAFT is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with RAFT. If not, see <http://www.gnu.org/licenses/>.
#
import PyQt4
from PyQt4.QtCore import Qt, QObject, SIGNAL, QDateTime
from PyQt4.QtGui import QTableWidget, QTableWidgetItem, QHeaderView
class LogTab(QObject):
def __init__(self, framework, mainWindow):
QObject.__init__(self, mainWindow)
self.framework = framework
self.mainWindow = mainWindow
self.tableWidget = self.mainWindow.logTableWidget
self.tableWidget.setColumnCount(3)
self.tableWidget.setHorizontalHeaderLabels(['Date', 'Type', 'Message'])
self.tableWidget.horizontalHeader().setResizeMode(QHeaderView.ResizeToContents)
self.mainWindow.logTableClearLogButton.clicked.connect(self.handle_logTableClearLogButton_clicked)
self.framework.subscribe_log_events(self.log_message)
def log_message(self, message_type, message):
# TODO: set alignment
row = self.tableWidget.rowCount()
self.tableWidget.insertRow(row)
self.tableWidget.setItem(row, 0, QTableWidgetItem(QDateTime.currentDateTime().toString('yyyy.MM.dd hh:mm:ss.zzz')))
self.tableWidget.setItem(row, 1, QTableWidgetItem(message_type))
self.tableWidget.setItem(row, 2, QTableWidgetItem(message))
self.tableWidget.resizeRowToContents(row)
def handle_logTableClearLogButton_clicked(self):
# TODO: is clearContents needed?
# self.tableWidget.clearContents()
while self.tableWidget.rowCount() > 0:
self.tableWidget.removeRow(0)
| gpl-3.0 |
JohnGeorgiadis/invenio | invenio/ext/email/__init__.py | 1 | 20712 | # -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2007, 2008, 2009, 2010, 2011, 2012, 2013, 2014, 2015 CERN.
#
# Invenio is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""
Invenio mail sending utilities. send_email() is the main API function
people should be using; just check out its docstring.
"""
__revision__ = "$Id$"
import os
import re
import sys
from email import Encoders
from email.Header import Header
from email.MIMEBase import MIMEBase
from email.MIMEImage import MIMEImage
from email.MIMEMultipart import MIMEMultipart
from email.Utils import formatdate
from flask_email.message import EmailMultiAlternatives, EmailMessage
from formatter import DumbWriter, AbstractFormatter
from six import iteritems, StringIO
from time import sleep
from invenio.base.globals import cfg
from invenio.base.helpers import unicodifier
from invenio.base.i18n import _
from invenio.ext.template import render_template_to_string
from .errors import EmailError
default_ln = lambda ln: cfg.get('CFG_SITE_LANG') if ln is None else ln
def setup_app(app):
"""
Prepare application config from Invenio configuration.
@see: https://flask-email.readthedocs.org/en/latest/#configuration
"""
cfg = app.config
app.config.setdefault('EMAIL_BACKEND', cfg.get(
'CFG_EMAIL_BACKEND', 'flask_email.backends.smtp.Mail'))
app.config.setdefault('DEFAULT_FROM_EMAIL', cfg['CFG_SITE_SUPPORT_EMAIL'])
app.config.setdefault('SERVER_EMAIL', cfg['CFG_SITE_ADMIN_EMAIL'])
app.config.setdefault('ADMINS', (('', cfg['CFG_SITE_ADMIN_EMAIL']),))
app.config.setdefault('MANAGERS', (cfg['CFG_SITE_SUPPORT_EMAIL'], ))
CFG_MISCUTIL_SMTP_HOST = cfg.get('CFG_MISCUTIL_SMTP_HOST')
CFG_MISCUTIL_SMTP_PORT = cfg.get('CFG_MISCUTIL_SMTP_PORT')
CFG_MISCUTIL_SMTP_USER = cfg.get('CFG_MISCUTIL_SMTP_USER', '')
CFG_MISCUTIL_SMTP_PASS = cfg.get('CFG_MISCUTIL_SMTP_PASS', '')
CFG_MISCUTIL_SMTP_TLS = cfg.get('CFG_MISCUTIL_SMTP_TLS', False)
app.config.setdefault('EMAIL_HOST', CFG_MISCUTIL_SMTP_HOST)
app.config.setdefault('EMAIL_PORT', CFG_MISCUTIL_SMTP_PORT)
app.config.setdefault('EMAIL_HOST_USER', CFG_MISCUTIL_SMTP_USER)
app.config.setdefault('EMAIL_HOST_PASSWORD', CFG_MISCUTIL_SMTP_PASS)
app.config.setdefault('EMAIL_USE_TLS', CFG_MISCUTIL_SMTP_TLS)
# app.config['EMAIL_USE_SSL']: defaults to False
app.config.setdefault('EMAIL_FILE_PATH', cfg['CFG_LOGDIR'])
return app
def scheduled_send_email(fromaddr,
toaddr,
subject="",
content="",
header=None,
footer=None,
copy_to_admin=0,
attempt_times=1,
attempt_sleeptime=10,
user=None,
other_bibtasklet_arguments=None,
replytoaddr="",
bccaddr="",
):
"""
Like send_email, but send an email via the bibsched
infrastructure.
@param fromaddr: sender
@type fromaddr: string
@param toaddr: list of receivers
@type toaddr: string (comma separated) or list of strings
@param subject: the subject
@param content: the body of the message
@param header: optional header, otherwise default is used
@param footer: optional footer, otherwise default is used
@param copy_to_admin: set to 1 in order to send email the admins
@param attempt_times: try at least n times before giving up sending
@param attempt_sleeptime: number of seconds to sleep between two attempts
@param user: the user name to user when scheduling the bibtasklet. If
None, the sender will be used
@param other_bibtasklet_arguments: other arguments to append to the list
of arguments to the call of task_low_level_submission
@param replytoaddr: [string or list-of-strings] to be used for the
reply-to header of the email (if string, then
receivers are separated by ',')
@param bccaddr: [string or list-of-strings] to be used for BCC header
of the email
(if string, then receivers are separated by ',')
@return: the scheduled bibtasklet
"""
from invenio.legacy.bibsched.bibtask import task_low_level_submission
if not isinstance(toaddr, (unicode, str)):
toaddr = ','.join(toaddr)
if not isinstance(replytoaddr, (unicode, str)):
replytoaddr = ','.join(replytoaddr)
toaddr = remove_temporary_emails(toaddr)
if user is None:
user = fromaddr
if other_bibtasklet_arguments is None:
other_bibtasklet_arguments = []
else:
other_bibtasklet_arguments = list(other_bibtasklet_arguments)
if not header is None:
other_bibtasklet_arguments.extend(("-a", "header=%s" % header))
if not footer is None:
other_bibtasklet_arguments.extend(("-a", "footer=%s" % footer))
return task_low_level_submission(
"bibtasklet", user, "-T", "bst_send_email",
"-a", "fromaddr=%s" % fromaddr,
"-a", "toaddr=%s" % toaddr,
"-a", "replytoaddr=%s" % replytoaddr,
"-a", "subject=%s" % subject,
"-a", "content=%s" % content,
"-a", "copy_to_admin=%s" % copy_to_admin,
"-a", "attempt_times=%s" % attempt_times,
"-a", "attempt_sleeptime=%s" % attempt_sleeptime,
"-a", "bccaddr=%s" % bccaddr,
*other_bibtasklet_arguments)
def send_email(fromaddr,
toaddr,
subject="",
content="",
html_content='',
html_images=None,
header=None,
footer=None,
html_header=None,
html_footer=None,
copy_to_admin=0,
attempt_times=1,
attempt_sleeptime=10,
debug_level=0,
ln=None,
charset=None,
replytoaddr="",
attachments=None,
bccaddr="",
forward_failures_to_admin=True,
):
"""Send a forged email to TOADDR from FROMADDR with message created from subjet, content and possibly
header and footer.
@param fromaddr: [string] sender
@param toaddr: [string or list-of-strings] list of receivers (if string, then
receivers are separated by ','). BEWARE: If more than once receiptiant is given,
the receivers are put in BCC and To will be "Undisclosed.Recipients:".
@param subject: [string] subject of the email
@param content: [string] content of the email
@param html_content: [string] html version of the email
@param html_images: [dict] dictionary of image id, image path
@param header: [string] header to add, None for the Default
@param footer: [string] footer to add, None for the Default
@param html_header: [string] header to add to the html part, None for the Default
@param html_footer: [string] footer to add to the html part, None for the Default
@param copy_to_admin: [int] if 1 add CFG_SITE_ADMIN_EMAIL in receivers
@param attempt_times: [int] number of tries
@param attempt_sleeptime: [int] seconds in between tries
@param debug_level: [int] debug level
@param ln: [string] invenio language
@param charset: [string] the content charset. By default is None which means
to try to encode the email as ascii, then latin1 then utf-8.
@param replytoaddr: [string or list-of-strings] to be used for the
reply-to header of the email (if string, then
receivers are separated by ',')
@param attachments: list of paths of files to be attached. Alternatively,
every element of the list could be a tuple: (filename, mimetype)
@param bccaddr: [string or list-of-strings] to be used for BCC header of the email
(if string, then receivers are separated by ',')
@param forward_failures_to_admin: [bool] prevents infinite recursion
in case of admin reporting,
when the problem is not in
the e-mail address format,
but rather in the network
If sending fails, try to send it ATTEMPT_TIMES, and wait for
ATTEMPT_SLEEPTIME seconds in between tries.
e.g.:
send_email('[email protected]', '[email protected]', 'Let\'s try!'', 'check 1234', '<strong>check</strong> <em>1234</em><img src="cid:image1">', {'image1': '/tmp/quantum.jpg'})
@return: [bool]: True if email was sent okay, False if it was not.
"""
from invenio.ext.logging import register_exception
ln = default_ln(ln)
if html_images is None:
html_images = {}
if type(toaddr) is not list:
toaddr = toaddr.strip().split(',')
toaddr = remove_temporary_emails(toaddr)
usebcc = len(toaddr) > 1 # More than one address, let's use Bcc in place of To
if copy_to_admin:
if cfg['CFG_SITE_ADMIN_EMAIL'] not in toaddr:
toaddr.append(cfg['CFG_SITE_ADMIN_EMAIL'])
if type(bccaddr) is not list:
bccaddr = bccaddr.strip().split(',')
msg = forge_email(fromaddr, toaddr, subject, content, html_content,
html_images, usebcc, header, footer, html_header,
html_footer, ln, charset, replytoaddr, attachments,
bccaddr)
if attempt_times < 1 or not toaddr:
try:
raise EmailError(_(
'The system is not attempting to send an email from %(x_from)s'
', to %(x_to)s, with body %(x_body)s.', x_from=fromaddr,
x_to=toaddr, x_body=content))
except EmailError:
register_exception()
return False
sent = False
failure_reason = ''
while not sent and attempt_times > 0:
try:
sent = msg.send()
except Exception as e:
failure_reason = str(e)
register_exception()
if debug_level > 1:
try:
raise EmailError(_('Error in sending message. \
Waiting %(sec)s seconds. Exception is %(exc)s, \
while sending email from %(sender)s to %(receipient)s \
with body %(email_body)s.', \
sec = attempt_sleeptime, \
exc = sys.exc_info()[0], \
sender = fromaddr, \
receipient = toaddr, \
email_body = content))
except EmailError:
register_exception()
if not sent:
attempt_times -= 1
if attempt_times > 0: # sleep only if we shall retry again
sleep(attempt_sleeptime)
if not sent:
# report failure to the admin with the intended message, its
# sender and recipients
if forward_failures_to_admin:
# prepend '> ' to every line of the original message
quoted_body = '> ' + '> '.join(content.splitlines(True))
# define and fill in the report template
admin_report_subject = _('Error while sending an email: %(x_subject)s',
x_subject=subject)
admin_report_body = _(
"\nError while sending an email.\n"
"Reason: %(x_reason)s\n"
"Sender: \"%(x_sender)s\"\n"
"Recipient(s): \"%(x_recipient)s\"\n\n"
"The content of the mail was as follows:\n"
"%(x_body)s",
x_reason=failure_reason,
x_sender=fromaddr,
x_recipient=', '.join(toaddr),
x_body=quoted_body)
send_email(cfg['CFG_SITE_ADMIN_EMAIL'], cfg['CFG_SITE_ADMIN_EMAIL'],
admin_report_subject, admin_report_body,
forward_failures_to_admin=False)
try:
raise EmailError(_(
'Error in sending email from %(x_from)s to %(x_to)s with body'
'%(x_body)s.', x_from=fromaddr, x_to=toaddr, x_body=content))
except EmailError:
register_exception()
return sent
def attach_embed_image(email, image_id, image_path):
"""
Attach an image to the email.
"""
with open(image_path, 'rb') as image_data:
img = MIMEImage(image_data.read())
img.add_header('Content-ID', '<%s>' % image_id)
img.add_header('Content-Disposition', 'attachment', filename=os.path.split(image_path)[1])
email.attach(img)
def forge_email(fromaddr, toaddr, subject, content, html_content='',
html_images=None, usebcc=False, header=None, footer=None,
html_header=None, html_footer=None, ln=None,
charset=None, replytoaddr="", attachments=None, bccaddr=""):
"""Prepare email. Add header and footer if needed.
@param fromaddr: [string] sender
@param toaddr: [string or list-of-strings] list of receivers (if string, then
receivers are separated by ',')
@param usebcc: [bool] True for using Bcc in place of To
@param subject: [string] subject of the email
@param content: [string] content of the email
@param html_content: [string] html version of the email
@param html_images: [dict] dictionary of image id, image path
@param header: [string] None for the default header
@param footer: [string] None for the default footer
@param ln: language
@charset: [string] the content charset. By default is None which means
to try to encode the email as ascii, then latin1 then utf-8.
@param replytoaddr: [string or list-of-strings] to be used for the
reply-to header of the email (if string, then
receivers are separated by ',')
@param attachments: list of paths of files to be attached. Alternatively,
every element of the list could be a tuple: (filename, mimetype)
@param bccaddr: [string or list-of-strings] to be used for BCC header of the email
(if string, then receivers are separated by ',')
@return: forged email as an EmailMessage object"""
ln = default_ln(ln)
if html_images is None:
html_images = {}
content = render_template_to_string('mail_text.tpl',
content=unicodifier(content),
header=unicodifier(header),
footer=unicodifier(footer)
).encode('utf8')
if type(toaddr) is list:
toaddr = ','.join(toaddr)
if type(bccaddr) is list:
bccaddr = ','.join(bccaddr)
if type(replytoaddr) is list:
replytoaddr = ','.join(replytoaddr)
toaddr = remove_temporary_emails(toaddr)
headers = {}
kwargs = {'to': [], 'cc': [], 'bcc': []}
if replytoaddr:
headers['Reply-To'] = replytoaddr
if usebcc:
headers['Bcc'] = bccaddr
kwargs['bcc'] = toaddr.split(',') + bccaddr.split(',')
kwargs['to'] = ['Undisclosed.Recipients:']
else:
kwargs['to'] = toaddr.split(',')
headers['From'] = fromaddr
headers['Date'] = formatdate(localtime=True)
headers['User-Agent'] = 'Invenio %s at %s' % (cfg['CFG_VERSION'],
cfg['CFG_SITE_URL'])
if html_content:
html_content = render_template_to_string(
'mail_html.tpl',
content=unicodifier(html_content),
header=unicodifier(html_header),
footer=unicodifier(html_footer)
).encode('utf8')
msg_root = EmailMultiAlternatives(subject=subject, body=content,
from_email=fromaddr,
headers=headers, **kwargs)
msg_root.attach_alternative(html_content, "text/html")
#if not html_images:
# # No image? Attach the HTML to the root
# msg_root.attach(msg_text)
#else:
if html_images:
# Image(s)? Attach the HTML and image(s) as children of a
# "related" block
msg_related = MIMEMultipart('related')
#msg_related.attach(msg_text)
for image_id, image_path in iteritems(html_images):
attach_embed_image(msg_related, image_id, image_path)
msg_root.attach(msg_related)
else:
msg_root = EmailMessage(subject=subject, body=content,
from_email=fromaddr, headers=headers, **kwargs)
if attachments:
from invenio.legacy.bibdocfile.api import _mimes, guess_format_from_url
#old_msg_root = msg_root
#msg_root = MIMEMultipart()
#msg_root.attach(old_msg_root)
for attachment in attachments:
try:
mime = None
if type(attachment) in (list, tuple):
attachment, mime = attachment
if mime is None:
## Automatic guessing of mimetype
mime = _mimes.guess_type(attachment)[0]
if mime is None:
ext = guess_format_from_url(attachment)
mime = _mimes.guess_type("foo" + ext)[0]
if not mime:
mime = 'application/octet-stream'
part = MIMEBase(*mime.split('/', 1))
part.set_payload(open(attachment, 'rb').read())
Encoders.encode_base64(part)
part.add_header('Content-Disposition', 'attachment; filename="%s"' % os.path.basename(attachment))
msg_root.attach(part)
except:
from invenio.ext.logging import register_exception
register_exception(alert_admin=True, prefix="Can't attach %s" % attachment)
return msg_root
RE_NEWLINES = re.compile(r'<br\s*/?>|</p>', re.I)
RE_SPACES = re.compile(r'\s+')
RE_HTML_TAGS = re.compile(r'<.+?>')
def email_strip_html(html_content):
"""Strip html tags from html_content, trying to respect formatting."""
html_content = RE_SPACES.sub(' ', html_content)
html_content = RE_NEWLINES.sub('\n', html_content)
html_content = RE_HTML_TAGS.sub('', html_content)
html_content = html_content.split('\n')
out = StringIO()
out_format = AbstractFormatter(DumbWriter(out))
for row in html_content:
out_format.add_flowing_data(row)
out_format.end_paragraph(1)
return out.getvalue()
def remove_temporary_emails(emails):
"""
Removes the temporary emails (which are constructed randomly when user logs in
with an external authentication provider which doesn't supply an email
address) from an email list.
@param emails: email list (if string, then receivers are separated by ',')
@type emails: [str]|str
@rtype: list|str
"""
from invenio_access.local_config import CFG_TEMP_EMAIL_ADDRESS
_RE_TEMPORARY_EMAIL = re.compile(CFG_TEMP_EMAIL_ADDRESS % r'.+?', re.I)
if type(emails) in (str, unicode):
emails = [email.strip() for email in emails.split(',') if email.strip()]
emails = [email for email in emails if not _RE_TEMPORARY_EMAIL.match(email)]
return ','.join(emails)
else:
return [email for email in emails if not _RE_TEMPORARY_EMAIL.match(email)]
def get_mail_header(value):
"""
Return a MIME-compliant header-string. Will join lists of strings
into one string with comma (,) as separator.
"""
if not isinstance(value, basestring):
value = ','.join(value)
try:
value = value.encode('ascii')
except (UnicodeEncodeError, UnicodeDecodeError):
value = Header(value, 'utf-8')
return value
| gpl-2.0 |
sonicxml/tuna-feather-kernel | tools/perf/scripts/python/syscall-counts.py | 11181 | 1522 | # system call counts
# (c) 2010, Tom Zanussi <[email protected]>
# Licensed under the terms of the GNU GPL License version 2
#
# Displays system-wide system call totals, broken down by syscall.
# If a [comm] arg is specified, only syscalls called by [comm] are displayed.
import os
import sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from Util import syscall_name
usage = "perf script -s syscall-counts.py [comm]\n";
for_comm = None
if len(sys.argv) > 2:
sys.exit(usage)
if len(sys.argv) > 1:
for_comm = sys.argv[1]
syscalls = autodict()
def trace_begin():
print "Press control+C to stop and show the summary"
def trace_end():
print_syscall_totals()
def raw_syscalls__sys_enter(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
id, args):
if for_comm is not None:
if common_comm != for_comm:
return
try:
syscalls[id] += 1
except TypeError:
syscalls[id] = 1
def print_syscall_totals():
if for_comm is not None:
print "\nsyscall events for %s:\n\n" % (for_comm),
else:
print "\nsyscall events:\n\n",
print "%-40s %10s\n" % ("event", "count"),
print "%-40s %10s\n" % ("----------------------------------------", \
"-----------"),
for id, val in sorted(syscalls.iteritems(), key = lambda(k, v): (v, k), \
reverse = True):
print "%-40s %10d\n" % (syscall_name(id), val),
| gpl-2.0 |
Jmainguy/ansible-modules-extras | cloud/amazon/ec2_vpc_dhcp_options.py | 65 | 14933 | #!/usr/bin/python
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = """
---
module: ec2_vpc_dhcp_options
short_description: Manages DHCP Options, and can ensure the DHCP options for the given VPC match what's
requested
description:
- This module removes, or creates DHCP option sets, and can associate them to a VPC.
Optionally, a new DHCP Options set can be created that converges a VPC's existing
DHCP option set with values provided.
When dhcp_options_id is provided, the module will
1. remove (with state='absent')
2. ensure tags are applied (if state='present' and tags are provided
3. attach it to a VPC (if state='present' and a vpc_id is provided.
If any of the optional values are missing, they will either be treated
as a no-op (i.e., inherit what already exists for the VPC)
To remove existing options while inheriting, supply an empty value
(e.g. set ntp_servers to [] if you want to remove them from the VPC's options)
Most of the options should be self-explanatory.
author: "Joel Thompson (@joelthompson)"
version_added: 2.1
options:
domain_name:
description:
- The domain name to set in the DHCP option sets
required: false
default: None
dns_servers:
description:
- A list of hosts to set the DNS servers for the VPC to. (Should be a
list of IP addresses rather than host names.)
required: false
default: None
ntp_servers:
description:
- List of hosts to advertise as NTP servers for the VPC.
required: false
default: None
netbios_name_servers:
description:
- List of hosts to advertise as NetBIOS servers.
required: false
default: None
netbios_node_type:
description:
- NetBIOS node type to advertise in the DHCP options.
The AWS recommendation is to use 2 (when using netbios name services)
http://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/VPC_DHCP_Options.html
required: false
default: None
vpc_id:
description:
- VPC ID to associate with the requested DHCP option set.
If no vpc id is provided, and no matching option set is found then a new
DHCP option set is created.
required: false
default: None
delete_old:
description:
- Whether to delete the old VPC DHCP option set when associating a new one.
This is primarily useful for debugging/development purposes when you
want to quickly roll back to the old option set. Note that this setting
will be ignored, and the old DHCP option set will be preserved, if it
is in use by any other VPC. (Otherwise, AWS will return an error.)
required: false
default: true
inherit_existing:
description:
- For any DHCP options not specified in these parameters, whether to
inherit them from the options set already applied to vpc_id, or to
reset them to be empty.
required: false
default: false
tags:
description:
- Tags to be applied to a VPC options set if a new one is created, or
if the resource_id is provided. (options must match)
required: False
default: None
aliases: [ 'resource_tags']
version_added: "2.1"
dhcp_options_id:
description:
- The resource_id of an existing DHCP options set.
If this is specified, then it will override other settings, except tags
(which will be updated to match)
required: False
default: None
version_added: "2.1"
state:
description:
- create/assign or remove the DHCP options.
If state is set to absent, then a DHCP options set matched either
by id, or tags and options will be removed if possible.
required: False
default: present
choices: [ 'absent', 'present' ]
version_added: "2.1"
extends_documentation_fragment: aws
requirements:
- boto
"""
RETURN = """
new_options:
description: The DHCP options created, associated or found
returned: when appropriate
type: dict
sample:
domain-name-servers:
- 10.0.0.1
- 10.0.1.1
netbois-name-servers:
- 10.0.0.1
- 10.0.1.1
netbios-node-type: 2
domain-name: "my.example.com"
dhcp_options_id:
description: The aws resource id of the primary DCHP options set created, found or removed
type: string
returned: when available
changed:
description: Whether the dhcp options were changed
type: bool
returned: always
"""
EXAMPLES = """
# Completely overrides the VPC DHCP options associated with VPC vpc-123456 and deletes any existing
# DHCP option set that may have been attached to that VPC.
- ec2_vpc_dhcp_options:
domain_name: "foo.example.com"
region: us-east-1
dns_servers:
- 10.0.0.1
- 10.0.1.1
ntp_servers:
- 10.0.0.2
- 10.0.1.2
netbios_name_servers:
- 10.0.0.1
- 10.0.1.1
netbios_node_type: 2
vpc_id: vpc-123456
delete_old: True
inherit_existing: False
# Ensure the DHCP option set for the VPC has 10.0.0.4 and 10.0.1.4 as the specified DNS servers, but
# keep any other existing settings. Also, keep the old DHCP option set around.
- ec2_vpc_dhcp_options:
region: us-east-1
dns_servers:
- "{{groups['dns-primary']}}"
- "{{groups['dns-secondary']}}"
vpc_id: vpc-123456
inherit_existing: True
delete_old: False
## Create a DHCP option set with 4.4.4.4 and 8.8.8.8 as the specified DNS servers, with tags
## but do not assign to a VPC
- ec2_vpc_dhcp_options:
region: us-east-1
dns_servers:
- 4.4.4.4
- 8.8.8.8
tags:
Name: google servers
Environment: Test
## Delete a DHCP options set that matches the tags and options specified
- ec2_vpc_dhcp_options:
region: us-east-1
dns_servers:
- 4.4.4.4
- 8.8.8.8
tags:
Name: google servers
Environment: Test
state: absent
## Associate a DHCP options set with a VPC by ID
- ec2_vpc_dhcp_options:
region: us-east-1
dhcp_options_id: dopt-12345678
vpc_id: vpc-123456
"""
import boto.vpc
import boto.ec2
from boto.exception import EC2ResponseError
import socket
import collections
def get_resource_tags(vpc_conn, resource_id):
return dict((t.name, t.value) for t in vpc_conn.get_all_tags(filters={'resource-id': resource_id}))
def ensure_tags(vpc_conn, resource_id, tags, add_only, check_mode):
try:
cur_tags = get_resource_tags(vpc_conn, resource_id)
if tags == cur_tags:
return {'changed': False, 'tags': cur_tags}
to_delete = dict((k, cur_tags[k]) for k in cur_tags if k not in tags)
if to_delete and not add_only:
vpc_conn.delete_tags(resource_id, to_delete, dry_run=check_mode)
to_add = dict((k, tags[k]) for k in tags if k not in cur_tags)
if to_add:
vpc_conn.create_tags(resource_id, to_add, dry_run=check_mode)
latest_tags = get_resource_tags(vpc_conn, resource_id)
return {'changed': True, 'tags': latest_tags}
except EC2ResponseError as e:
module.fail_json(msg=get_error_message(e.args[2]))
def fetch_dhcp_options_for_vpc(vpc_conn, vpc_id):
"""
Returns the DHCP options object currently associated with the requested VPC ID using the VPC
connection variable.
"""
vpcs = vpc_conn.get_all_vpcs(vpc_ids=[vpc_id])
if len(vpcs) != 1 or vpcs[0].dhcp_options_id == "default":
return None
dhcp_options = vpc_conn.get_all_dhcp_options(dhcp_options_ids=[vpcs[0].dhcp_options_id])
if len(dhcp_options) != 1:
return None
return dhcp_options[0]
def match_dhcp_options(vpc_conn, tags=None, options=None):
"""
Finds a DHCP Options object that optionally matches the tags and options provided
"""
dhcp_options = vpc_conn.get_all_dhcp_options()
for dopts in dhcp_options:
if (not tags) or get_resource_tags(vpc_conn, dopts.id) == tags:
if (not options) or dopts.options == options:
return(True, dopts)
return(False, None)
def remove_dhcp_options_by_id(vpc_conn, dhcp_options_id):
associations = vpc_conn.get_all_vpcs(filters={'dhcpOptionsId': dhcp_options_id})
if len(associations) > 0:
return False
else:
vpc_conn.delete_dhcp_options(dhcp_options_id)
return True
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(dict(
dhcp_options_id=dict(type='str', default=None),
domain_name=dict(type='str', default=None),
dns_servers=dict(type='list', default=None),
ntp_servers=dict(type='list', default=None),
netbios_name_servers=dict(type='list', default=None),
netbios_node_type=dict(type='int', default=None),
vpc_id=dict(type='str', default=None),
delete_old=dict(type='bool', default=True),
inherit_existing=dict(type='bool', default=False),
tags=dict(type='dict', default=None, aliases=['resource_tags']),
state=dict(type='str', default='present', choices=['present', 'absent'])
)
)
module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True)
params = module.params
found = False
changed = False
new_options = collections.defaultdict(lambda: None)
region, ec2_url, boto_params = get_aws_connection_info(module)
connection = connect_to_aws(boto.vpc, region, **boto_params)
existing_options = None
# First check if we were given a dhcp_options_id
if not params['dhcp_options_id']:
# No, so create new_options from the parameters
if params['dns_servers'] != None:
new_options['domain-name-servers'] = params['dns_servers']
if params['netbios_name_servers'] != None:
new_options['netbios-name-servers'] = params['netbios_name_servers']
if params['ntp_servers'] != None:
new_options['ntp-servers'] = params['ntp_servers']
if params['domain_name'] != None:
# needs to be a list for comparison with boto objects later
new_options['domain-name'] = [ params['domain_name'] ]
if params['netbios_node_type'] != None:
# needs to be a list for comparison with boto objects later
new_options['netbios-node-type'] = [ str(params['netbios_node_type']) ]
# If we were given a vpc_id then we need to look at the options on that
if params['vpc_id']:
existing_options = fetch_dhcp_options_for_vpc(connection, params['vpc_id'])
# if we've been asked to inherit existing options, do that now
if params['inherit_existing']:
if existing_options:
for option in [ 'domain-name-servers', 'netbios-name-servers', 'ntp-servers', 'domain-name', 'netbios-node-type']:
if existing_options.options.get(option) and new_options[option] != [] and (not new_options[option] or [''] == new_options[option]):
new_options[option] = existing_options.options.get(option)
# Do the vpc's dhcp options already match what we're asked for? if so we are done
if existing_options and new_options == existing_options.options:
module.exit_json(changed=changed, new_options=new_options, dhcp_options_id=existing_options.id)
# If no vpc_id was given, or the options don't match then look for an existing set using tags
found, dhcp_option = match_dhcp_options(connection, params['tags'], new_options)
# Now let's cover the case where there are existing options that we were told about by id
# If a dhcp_options_id was supplied we don't look at options inside, just set tags (if given)
else:
supplied_options = connection.get_all_dhcp_options(filters={'dhcp-options-id':params['dhcp_options_id']})
if len(supplied_options) != 1:
if params['state'] != 'absent':
module.fail_json(msg=" a dhcp_options_id was supplied, but does not exist")
else:
found = True
dhcp_option = supplied_options[0]
if params['state'] != 'absent' and params['tags']:
ensure_tags(connection, dhcp_option.id, params['tags'], False, module.check_mode)
# Now we have the dhcp options set, let's do the necessary
# if we found options we were asked to remove then try to do so
if params['state'] == 'absent':
if not module.check_mode:
if found:
changed = remove_dhcp_options_by_id(connection, dhcp_option.id)
module.exit_json(changed=changed, new_options={})
# otherwise if we haven't found the required options we have something to do
elif not module.check_mode and not found:
# create some dhcp options if we weren't able to use existing ones
if not found:
# Convert netbios-node-type and domain-name back to strings
if new_options['netbios-node-type']:
new_options['netbios-node-type'] = new_options['netbios-node-type'][0]
if new_options['domain-name']:
new_options['domain-name'] = new_options['domain-name'][0]
# create the new dhcp options set requested
dhcp_option = connection.create_dhcp_options(
new_options['domain-name'],
new_options['domain-name-servers'],
new_options['ntp-servers'],
new_options['netbios-name-servers'],
new_options['netbios-node-type'])
changed = True
if params['tags']:
ensure_tags(connection, dhcp_option.id, params['tags'], False, module.check_mode)
# If we were given a vpc_id, then attach the options we now have to that before we finish
if params['vpc_id'] and not module.check_mode:
changed = True
connection.associate_dhcp_options(dhcp_option.id, params['vpc_id'])
# and remove old ones if that was requested
if params['delete_old'] and existing_options:
remove_dhcp_options_by_id(connection, existing_options.id)
module.exit_json(changed=changed, new_options=new_options, dhcp_options_id=dhcp_option.id)
from ansible.module_utils.basic import *
from ansible.module_utils.ec2 import *
if __name__ == "__main__":
main()
| gpl-3.0 |
mm112287/2015cd_midterm | static/Brython3.1.0-20150301-090019/Lib/site-packages/docs.py | 624 | 1503 | from browser import document as doc
from browser import window, html, markdown
import highlight
import time
def run(ev):
# run the code in the elt after the button
ix = ev.target.parent.children.index(ev.target)
elt = ev.target.parent.children[ix+1]
exec(elt.text)
elt.focus()
def load(url,target):
# fake query string to bypass browser cache
qs = '?foo=%s' %time.time()
try:
mk,scripts = markdown.mark(open(url+qs).read())
except IOError:
doc[target].html = "Page %s not found" %url
return False
doc[target].html = mk
for script in scripts:
exec(script)
for elt in doc[target].get(selector='.exec'):
# Python code executed when user clicks on a button
elt.contentEditable = True
src = elt.text.strip()
h = highlight.highlight(src)
h.className = "pycode"
elt.clear()
elt <= h
elt.focus()
btn = html.BUTTON('▶')
btn.bind('click', run)
elt.parent.insertBefore(btn, elt)
for elt in doc[target].get(selector='.exec_on_load'):
# Python code executed on page load
src = elt.text.strip()
h = highlight.highlight(src)
h.className = "pycode"
elt.clear()
elt <= h
exec(src)
for elt in doc[target].get(selector='.python'):
src = elt.text.strip()
h = highlight.highlight(src)
h.className = "pycode"
elt.clear()
elt <= h
return False
| gpl-3.0 |
jcoady9/python-for-android | python3-alpha/python3-src/Lib/codeop.py | 187 | 5994 | r"""Utilities to compile possibly incomplete Python source code.
This module provides two interfaces, broadly similar to the builtin
function compile(), which take program text, a filename and a 'mode'
and:
- Return code object if the command is complete and valid
- Return None if the command is incomplete
- Raise SyntaxError, ValueError or OverflowError if the command is a
syntax error (OverflowError and ValueError can be produced by
malformed literals).
Approach:
First, check if the source consists entirely of blank lines and
comments; if so, replace it with 'pass', because the built-in
parser doesn't always do the right thing for these.
Compile three times: as is, with \n, and with \n\n appended. If it
compiles as is, it's complete. If it compiles with one \n appended,
we expect more. If it doesn't compile either way, we compare the
error we get when compiling with \n or \n\n appended. If the errors
are the same, the code is broken. But if the errors are different, we
expect more. Not intuitive; not even guaranteed to hold in future
releases; but this matches the compiler's behavior from Python 1.4
through 2.2, at least.
Caveat:
It is possible (but not likely) that the parser stops parsing with a
successful outcome before reaching the end of the source; in this
case, trailing symbols may be ignored instead of causing an error.
For example, a backslash followed by two newlines may be followed by
arbitrary garbage. This will be fixed once the API for the parser is
better.
The two interfaces are:
compile_command(source, filename, symbol):
Compiles a single command in the manner described above.
CommandCompiler():
Instances of this class have __call__ methods identical in
signature to compile_command; the difference is that if the
instance compiles program text containing a __future__ statement,
the instance 'remembers' and compiles all subsequent program texts
with the statement in force.
The module also provides another class:
Compile():
Instances of this class act like the built-in function compile,
but with 'memory' in the sense described above.
"""
import __future__
_features = [getattr(__future__, fname)
for fname in __future__.all_feature_names]
__all__ = ["compile_command", "Compile", "CommandCompiler"]
PyCF_DONT_IMPLY_DEDENT = 0x200 # Matches pythonrun.h
def _maybe_compile(compiler, source, filename, symbol):
# Check for source consisting of only blank lines and comments
for line in source.split("\n"):
line = line.strip()
if line and line[0] != '#':
break # Leave it alone
else:
if symbol != "eval":
source = "pass" # Replace it with a 'pass' statement
err = err1 = err2 = None
code = code1 = code2 = None
try:
code = compiler(source, filename, symbol)
except SyntaxError as err:
pass
try:
code1 = compiler(source + "\n", filename, symbol)
except SyntaxError as e:
err1 = e
try:
code2 = compiler(source + "\n\n", filename, symbol)
except SyntaxError as e:
err2 = e
if code:
return code
if not code1 and repr(err1) == repr(err2):
raise err1
def _compile(source, filename, symbol):
return compile(source, filename, symbol, PyCF_DONT_IMPLY_DEDENT)
def compile_command(source, filename="<input>", symbol="single"):
r"""Compile a command and determine whether it is incomplete.
Arguments:
source -- the source string; may contain \n characters
filename -- optional filename from which source was read; default
"<input>"
symbol -- optional grammar start symbol; "single" (default) or "eval"
Return value / exceptions raised:
- Return a code object if the command is complete and valid
- Return None if the command is incomplete
- Raise SyntaxError, ValueError or OverflowError if the command is a
syntax error (OverflowError and ValueError can be produced by
malformed literals).
"""
return _maybe_compile(_compile, source, filename, symbol)
class Compile:
"""Instances of this class behave much like the built-in compile
function, but if one is used to compile text containing a future
statement, it "remembers" and compiles all subsequent program texts
with the statement in force."""
def __init__(self):
self.flags = PyCF_DONT_IMPLY_DEDENT
def __call__(self, source, filename, symbol):
codeob = compile(source, filename, symbol, self.flags, 1)
for feature in _features:
if codeob.co_flags & feature.compiler_flag:
self.flags |= feature.compiler_flag
return codeob
class CommandCompiler:
"""Instances of this class have __call__ methods identical in
signature to compile_command; the difference is that if the
instance compiles program text containing a __future__ statement,
the instance 'remembers' and compiles all subsequent program texts
with the statement in force."""
def __init__(self,):
self.compiler = Compile()
def __call__(self, source, filename="<input>", symbol="single"):
r"""Compile a command and determine whether it is incomplete.
Arguments:
source -- the source string; may contain \n characters
filename -- optional filename from which source was read;
default "<input>"
symbol -- optional grammar start symbol; "single" (default) or
"eval"
Return value / exceptions raised:
- Return a code object if the command is complete and valid
- Return None if the command is incomplete
- Raise SyntaxError, ValueError or OverflowError if the command is a
syntax error (OverflowError and ValueError can be produced by
malformed literals).
"""
return _maybe_compile(self.compiler, source, filename, symbol)
| apache-2.0 |
XMelancholy/android_kernel_snda_u8500 | tools/perf/scripts/python/sched-migration.py | 11215 | 11670 | #!/usr/bin/python
#
# Cpu task migration overview toy
#
# Copyright (C) 2010 Frederic Weisbecker <[email protected]>
#
# perf script event handlers have been generated by perf script -g python
#
# This software is distributed under the terms of the GNU General
# Public License ("GPL") version 2 as published by the Free Software
# Foundation.
import os
import sys
from collections import defaultdict
from UserList import UserList
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
sys.path.append('scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from SchedGui import *
threads = { 0 : "idle"}
def thread_name(pid):
return "%s:%d" % (threads[pid], pid)
class RunqueueEventUnknown:
@staticmethod
def color():
return None
def __repr__(self):
return "unknown"
class RunqueueEventSleep:
@staticmethod
def color():
return (0, 0, 0xff)
def __init__(self, sleeper):
self.sleeper = sleeper
def __repr__(self):
return "%s gone to sleep" % thread_name(self.sleeper)
class RunqueueEventWakeup:
@staticmethod
def color():
return (0xff, 0xff, 0)
def __init__(self, wakee):
self.wakee = wakee
def __repr__(self):
return "%s woke up" % thread_name(self.wakee)
class RunqueueEventFork:
@staticmethod
def color():
return (0, 0xff, 0)
def __init__(self, child):
self.child = child
def __repr__(self):
return "new forked task %s" % thread_name(self.child)
class RunqueueMigrateIn:
@staticmethod
def color():
return (0, 0xf0, 0xff)
def __init__(self, new):
self.new = new
def __repr__(self):
return "task migrated in %s" % thread_name(self.new)
class RunqueueMigrateOut:
@staticmethod
def color():
return (0xff, 0, 0xff)
def __init__(self, old):
self.old = old
def __repr__(self):
return "task migrated out %s" % thread_name(self.old)
class RunqueueSnapshot:
def __init__(self, tasks = [0], event = RunqueueEventUnknown()):
self.tasks = tuple(tasks)
self.event = event
def sched_switch(self, prev, prev_state, next):
event = RunqueueEventUnknown()
if taskState(prev_state) == "R" and next in self.tasks \
and prev in self.tasks:
return self
if taskState(prev_state) != "R":
event = RunqueueEventSleep(prev)
next_tasks = list(self.tasks[:])
if prev in self.tasks:
if taskState(prev_state) != "R":
next_tasks.remove(prev)
elif taskState(prev_state) == "R":
next_tasks.append(prev)
if next not in next_tasks:
next_tasks.append(next)
return RunqueueSnapshot(next_tasks, event)
def migrate_out(self, old):
if old not in self.tasks:
return self
next_tasks = [task for task in self.tasks if task != old]
return RunqueueSnapshot(next_tasks, RunqueueMigrateOut(old))
def __migrate_in(self, new, event):
if new in self.tasks:
self.event = event
return self
next_tasks = self.tasks[:] + tuple([new])
return RunqueueSnapshot(next_tasks, event)
def migrate_in(self, new):
return self.__migrate_in(new, RunqueueMigrateIn(new))
def wake_up(self, new):
return self.__migrate_in(new, RunqueueEventWakeup(new))
def wake_up_new(self, new):
return self.__migrate_in(new, RunqueueEventFork(new))
def load(self):
""" Provide the number of tasks on the runqueue.
Don't count idle"""
return len(self.tasks) - 1
def __repr__(self):
ret = self.tasks.__repr__()
ret += self.origin_tostring()
return ret
class TimeSlice:
def __init__(self, start, prev):
self.start = start
self.prev = prev
self.end = start
# cpus that triggered the event
self.event_cpus = []
if prev is not None:
self.total_load = prev.total_load
self.rqs = prev.rqs.copy()
else:
self.rqs = defaultdict(RunqueueSnapshot)
self.total_load = 0
def __update_total_load(self, old_rq, new_rq):
diff = new_rq.load() - old_rq.load()
self.total_load += diff
def sched_switch(self, ts_list, prev, prev_state, next, cpu):
old_rq = self.prev.rqs[cpu]
new_rq = old_rq.sched_switch(prev, prev_state, next)
if old_rq is new_rq:
return
self.rqs[cpu] = new_rq
self.__update_total_load(old_rq, new_rq)
ts_list.append(self)
self.event_cpus = [cpu]
def migrate(self, ts_list, new, old_cpu, new_cpu):
if old_cpu == new_cpu:
return
old_rq = self.prev.rqs[old_cpu]
out_rq = old_rq.migrate_out(new)
self.rqs[old_cpu] = out_rq
self.__update_total_load(old_rq, out_rq)
new_rq = self.prev.rqs[new_cpu]
in_rq = new_rq.migrate_in(new)
self.rqs[new_cpu] = in_rq
self.__update_total_load(new_rq, in_rq)
ts_list.append(self)
if old_rq is not out_rq:
self.event_cpus.append(old_cpu)
self.event_cpus.append(new_cpu)
def wake_up(self, ts_list, pid, cpu, fork):
old_rq = self.prev.rqs[cpu]
if fork:
new_rq = old_rq.wake_up_new(pid)
else:
new_rq = old_rq.wake_up(pid)
if new_rq is old_rq:
return
self.rqs[cpu] = new_rq
self.__update_total_load(old_rq, new_rq)
ts_list.append(self)
self.event_cpus = [cpu]
def next(self, t):
self.end = t
return TimeSlice(t, self)
class TimeSliceList(UserList):
def __init__(self, arg = []):
self.data = arg
def get_time_slice(self, ts):
if len(self.data) == 0:
slice = TimeSlice(ts, TimeSlice(-1, None))
else:
slice = self.data[-1].next(ts)
return slice
def find_time_slice(self, ts):
start = 0
end = len(self.data)
found = -1
searching = True
while searching:
if start == end or start == end - 1:
searching = False
i = (end + start) / 2
if self.data[i].start <= ts and self.data[i].end >= ts:
found = i
end = i
continue
if self.data[i].end < ts:
start = i
elif self.data[i].start > ts:
end = i
return found
def set_root_win(self, win):
self.root_win = win
def mouse_down(self, cpu, t):
idx = self.find_time_slice(t)
if idx == -1:
return
ts = self[idx]
rq = ts.rqs[cpu]
raw = "CPU: %d\n" % cpu
raw += "Last event : %s\n" % rq.event.__repr__()
raw += "Timestamp : %d.%06d\n" % (ts.start / (10 ** 9), (ts.start % (10 ** 9)) / 1000)
raw += "Duration : %6d us\n" % ((ts.end - ts.start) / (10 ** 6))
raw += "Load = %d\n" % rq.load()
for t in rq.tasks:
raw += "%s \n" % thread_name(t)
self.root_win.update_summary(raw)
def update_rectangle_cpu(self, slice, cpu):
rq = slice.rqs[cpu]
if slice.total_load != 0:
load_rate = rq.load() / float(slice.total_load)
else:
load_rate = 0
red_power = int(0xff - (0xff * load_rate))
color = (0xff, red_power, red_power)
top_color = None
if cpu in slice.event_cpus:
top_color = rq.event.color()
self.root_win.paint_rectangle_zone(cpu, color, top_color, slice.start, slice.end)
def fill_zone(self, start, end):
i = self.find_time_slice(start)
if i == -1:
return
for i in xrange(i, len(self.data)):
timeslice = self.data[i]
if timeslice.start > end:
return
for cpu in timeslice.rqs:
self.update_rectangle_cpu(timeslice, cpu)
def interval(self):
if len(self.data) == 0:
return (0, 0)
return (self.data[0].start, self.data[-1].end)
def nr_rectangles(self):
last_ts = self.data[-1]
max_cpu = 0
for cpu in last_ts.rqs:
if cpu > max_cpu:
max_cpu = cpu
return max_cpu
class SchedEventProxy:
def __init__(self):
self.current_tsk = defaultdict(lambda : -1)
self.timeslices = TimeSliceList()
def sched_switch(self, headers, prev_comm, prev_pid, prev_prio, prev_state,
next_comm, next_pid, next_prio):
""" Ensure the task we sched out this cpu is really the one
we logged. Otherwise we may have missed traces """
on_cpu_task = self.current_tsk[headers.cpu]
if on_cpu_task != -1 and on_cpu_task != prev_pid:
print "Sched switch event rejected ts: %s cpu: %d prev: %s(%d) next: %s(%d)" % \
(headers.ts_format(), headers.cpu, prev_comm, prev_pid, next_comm, next_pid)
threads[prev_pid] = prev_comm
threads[next_pid] = next_comm
self.current_tsk[headers.cpu] = next_pid
ts = self.timeslices.get_time_slice(headers.ts())
ts.sched_switch(self.timeslices, prev_pid, prev_state, next_pid, headers.cpu)
def migrate(self, headers, pid, prio, orig_cpu, dest_cpu):
ts = self.timeslices.get_time_slice(headers.ts())
ts.migrate(self.timeslices, pid, orig_cpu, dest_cpu)
def wake_up(self, headers, comm, pid, success, target_cpu, fork):
if success == 0:
return
ts = self.timeslices.get_time_slice(headers.ts())
ts.wake_up(self.timeslices, pid, target_cpu, fork)
def trace_begin():
global parser
parser = SchedEventProxy()
def trace_end():
app = wx.App(False)
timeslices = parser.timeslices
frame = RootFrame(timeslices, "Migration")
app.MainLoop()
def sched__sched_stat_runtime(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, runtime, vruntime):
pass
def sched__sched_stat_iowait(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, delay):
pass
def sched__sched_stat_sleep(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, delay):
pass
def sched__sched_stat_wait(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, delay):
pass
def sched__sched_process_fork(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
parent_comm, parent_pid, child_comm, child_pid):
pass
def sched__sched_process_wait(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, prio):
pass
def sched__sched_process_exit(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, prio):
pass
def sched__sched_process_free(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, prio):
pass
def sched__sched_migrate_task(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, prio, orig_cpu,
dest_cpu):
headers = EventHeaders(common_cpu, common_secs, common_nsecs,
common_pid, common_comm)
parser.migrate(headers, pid, prio, orig_cpu, dest_cpu)
def sched__sched_switch(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
prev_comm, prev_pid, prev_prio, prev_state,
next_comm, next_pid, next_prio):
headers = EventHeaders(common_cpu, common_secs, common_nsecs,
common_pid, common_comm)
parser.sched_switch(headers, prev_comm, prev_pid, prev_prio, prev_state,
next_comm, next_pid, next_prio)
def sched__sched_wakeup_new(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, prio, success,
target_cpu):
headers = EventHeaders(common_cpu, common_secs, common_nsecs,
common_pid, common_comm)
parser.wake_up(headers, comm, pid, success, target_cpu, 1)
def sched__sched_wakeup(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, prio, success,
target_cpu):
headers = EventHeaders(common_cpu, common_secs, common_nsecs,
common_pid, common_comm)
parser.wake_up(headers, comm, pid, success, target_cpu, 0)
def sched__sched_wait_task(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid, prio):
pass
def sched__sched_kthread_stop_ret(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
ret):
pass
def sched__sched_kthread_stop(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
comm, pid):
pass
def trace_unhandled(event_name, context, common_cpu, common_secs, common_nsecs,
common_pid, common_comm):
pass
| gpl-2.0 |
bratsche/Neutron-Drive | google_appengine/lib/webapp2/tests/extras_config_test.py | 24 | 11910 | # -*- coding: utf-8 -*-
import webapp2
from webapp2_extras import config as app_config
import test_base
class TestConfig(test_base.BaseTestCase):
def tearDown(self):
pass
def test_get(self):
config = app_config.Config({'foo': {
'bar': 'baz',
'doo': 'ding',
}})
self.assertEqual(config.get('foo'), {
'bar': 'baz',
'doo': 'ding',
})
self.assertEqual(config.get('bar'), {})
def test_get_existing_keys(self):
config = app_config.Config({'foo': {
'bar': 'baz',
'doo': 'ding',
}})
self.assertEqual(config.get_config('foo', 'bar'), 'baz')
self.assertEqual(config.get_config('foo', 'doo'), 'ding')
def test_get_existing_keys_from_default(self):
config = app_config.Config({}, {'foo': {
'bar': 'baz',
'doo': 'ding',
}})
self.assertEqual(config.get_config('foo', 'bar'), 'baz')
self.assertEqual(config.get_config('foo', 'doo'), 'ding')
def test_get_non_existing_keys(self):
config = app_config.Config()
self.assertRaises(KeyError, config.get_config, 'foo', 'bar')
def test_get_dict_existing_keys(self):
config = app_config.Config({'foo': {
'bar': 'baz',
'doo': 'ding',
}})
self.assertEqual(config.get_config('foo'), {
'bar': 'baz',
'doo': 'ding',
})
def test_get_dict_non_existing_keys(self):
config = app_config.Config()
self.assertRaises(KeyError, config.get_config, 'bar')
def test_get_with_default(self):
config = app_config.Config()
self.assertRaises(KeyError, config.get_config, 'foo', 'bar', 'ooops')
self.assertRaises(KeyError, config.get_config, 'foo', 'doo', 'wooo')
def test_get_with_default_and_none(self):
config = app_config.Config({'foo': {
'bar': None,
}})
self.assertEqual(config.get_config('foo', 'bar', 'ooops'), None)
def test_update(self):
config = app_config.Config({'foo': {
'bar': 'baz',
'doo': 'ding',
}})
self.assertEqual(config.get_config('foo', 'bar'), 'baz')
self.assertEqual(config.get_config('foo', 'doo'), 'ding')
config.update('foo', {'bar': 'other'})
self.assertEqual(config.get_config('foo', 'bar'), 'other')
self.assertEqual(config.get_config('foo', 'doo'), 'ding')
def test_setdefault(self):
config = app_config.Config()
self.assertRaises(KeyError, config.get_config, 'foo')
config.setdefault('foo', {
'bar': 'baz',
'doo': 'ding',
})
self.assertEqual(config.get_config('foo', 'bar'), 'baz')
self.assertEqual(config.get_config('foo', 'doo'), 'ding')
def test_setdefault2(self):
config = app_config.Config({'foo': {
'bar': 'baz',
}})
self.assertEqual(config.get_config('foo'), {
'bar': 'baz',
})
config.setdefault('foo', {
'bar': 'wooo',
'doo': 'ding',
})
self.assertEqual(config.get_config('foo', 'bar'), 'baz')
self.assertEqual(config.get_config('foo', 'doo'), 'ding')
def test_setitem(self):
config = app_config.Config()
config['foo'] = {'bar': 'baz'}
self.assertEqual(config, {'foo': {'bar': 'baz'}})
self.assertEqual(config['foo'], {'bar': 'baz'})
def test_init_no_dict_values(self):
self.assertRaises(AssertionError, app_config.Config, {'foo': 'bar'})
self.assertRaises(AssertionError, app_config.Config, {'foo': None})
self.assertRaises(AssertionError, app_config.Config, 'foo')
def test_init_no_dict_default(self):
self.assertRaises(AssertionError, app_config.Config, {}, {'foo': 'bar'})
self.assertRaises(AssertionError, app_config.Config, {}, {'foo': None})
self.assertRaises(AssertionError, app_config.Config, {}, 'foo')
def test_update_no_dict_values(self):
config = app_config.Config()
self.assertRaises(AssertionError, config.update, {'foo': 'bar'}, 'baz')
self.assertRaises(AssertionError, config.update, {'foo': None}, 'baz')
self.assertRaises(AssertionError, config.update, 'foo', 'bar')
def test_setdefault_no_dict_values(self):
config = app_config.Config()
self.assertRaises(AssertionError, config.setdefault, 'foo', 'bar')
self.assertRaises(AssertionError, config.setdefault, 'foo', None)
def test_setitem_no_dict_values(self):
config = app_config.Config()
def setitem(key, value):
config[key] = value
return config
self.assertRaises(AssertionError, setitem, 'foo', 'bar')
self.assertRaises(AssertionError, setitem, 'foo', None)
class TestLoadConfig(test_base.BaseTestCase):
def tearDown(self):
pass
def test_default_config(self):
config = app_config.Config()
from resources.template import default_config as template_config
from resources.i18n import default_config as i18n_config
self.assertEqual(config.get_config('resources.template', 'templates_dir'), template_config['templates_dir'])
self.assertEqual(config.get_config('resources.i18n', 'locale'), i18n_config['locale'])
self.assertEqual(config.get_config('resources.i18n', 'timezone'), i18n_config['timezone'])
def test_default_config_with_non_existing_key(self):
config = app_config.Config()
from resources.i18n import default_config as i18n_config
# In the first time the module config will be loaded normally.
self.assertEqual(config.get_config('resources.i18n', 'locale'), i18n_config['locale'])
# In the second time it won't be loaded, but won't find the value and then use the default.
self.assertEqual(config.get_config('resources.i18n', 'i_dont_exist', 'foo'), 'foo')
def test_override_config(self):
config = app_config.Config({
'resources.template': {
'templates_dir': 'apps/templates'
},
'resources.i18n': {
'locale': 'pt_BR',
'timezone': 'America/Sao_Paulo',
},
})
self.assertEqual(config.get_config('resources.template', 'templates_dir'), 'apps/templates')
self.assertEqual(config.get_config('resources.i18n', 'locale'), 'pt_BR')
self.assertEqual(config.get_config('resources.i18n', 'timezone'), 'America/Sao_Paulo')
def test_override_config2(self):
config = app_config.Config({
'resources.i18n': {
'timezone': 'America/Sao_Paulo',
},
})
self.assertEqual(config.get_config('resources.i18n', 'locale'), 'en_US')
self.assertEqual(config.get_config('resources.i18n', 'timezone'), 'America/Sao_Paulo')
def test_get(self):
config = app_config.Config({'foo': {
'bar': 'baz',
}})
self.assertEqual(config.get_config('foo', 'bar'), 'baz')
def test_get_with_default(self):
config = app_config.Config()
self.assertEqual(config.get_config('resources.i18n', 'bar', 'baz'), 'baz')
def test_get_with_default_and_none(self):
config = app_config.Config({'foo': {
'bar': None,
}})
self.assertEqual(config.get_config('foo', 'bar', 'ooops'), None)
def test_get_with_default_and_module_load(self):
config = app_config.Config()
self.assertEqual(config.get_config('resources.i18n', 'locale'), 'en_US')
self.assertEqual(config.get_config('resources.i18n', 'locale', 'foo'), 'en_US')
def test_required_config(self):
config = app_config.Config()
self.assertRaises(KeyError, config.get_config, 'resources.i18n', 'foo')
def test_missing_module(self):
config = app_config.Config()
self.assertRaises(KeyError, config.get_config, 'i_dont_exist', 'i_dont_exist')
def test_missing_module2(self):
config = app_config.Config()
self.assertRaises(KeyError, config.get_config, 'i_dont_exist')
def test_missing_key(self):
config = app_config.Config()
self.assertRaises(KeyError, config.get_config, 'resources.i18n', 'i_dont_exist')
def test_missing_default_config(self):
config = app_config.Config()
self.assertRaises(KeyError, config.get_config, 'tipfy', 'foo')
class TestLoadConfigGetItem(test_base.BaseTestCase):
def tearDown(self):
pass
def test_default_config(self):
config = app_config.Config()
from resources.template import default_config as template_config
from resources.i18n import default_config as i18n_config
self.assertEqual(config['resources.template']['templates_dir'], template_config['templates_dir'])
self.assertEqual(config['resources.i18n']['locale'], i18n_config['locale'])
self.assertEqual(config['resources.i18n']['timezone'], i18n_config['timezone'])
def test_default_config_with_non_existing_key(self):
config = app_config.Config()
from resources.i18n import default_config as i18n_config
# In the first time the module config will be loaded normally.
self.assertEqual(config['resources.i18n']['locale'], i18n_config['locale'])
# In the second time it won't be loaded, but won't find the value and then use the default.
self.assertEqual(config['resources.i18n'].get('i_dont_exist', 'foo'), 'foo')
def test_override_config(self):
config = app_config.Config({
'resources.template': {
'templates_dir': 'apps/templates'
},
'resources.i18n': {
'locale': 'pt_BR',
'timezone': 'America/Sao_Paulo',
},
})
self.assertEqual(config['resources.template']['templates_dir'], 'apps/templates')
self.assertEqual(config['resources.i18n']['locale'], 'pt_BR')
self.assertEqual(config['resources.i18n']['timezone'], 'America/Sao_Paulo')
def test_override_config2(self):
config = app_config.Config({
'resources.i18n': {
'timezone': 'America/Sao_Paulo',
},
})
self.assertEqual(config['resources.i18n']['locale'], 'en_US')
self.assertEqual(config['resources.i18n']['timezone'], 'America/Sao_Paulo')
def test_get(self):
config = app_config.Config({'foo': {
'bar': 'baz',
}})
self.assertEqual(config['foo']['bar'], 'baz')
def test_get_with_default(self):
config = app_config.Config()
self.assertEqual(config['resources.i18n'].get('bar', 'baz'), 'baz')
def test_get_with_default_and_none(self):
config = app_config.Config({'foo': {
'bar': None,
}})
self.assertEqual(config['foo'].get('bar', 'ooops'), None)
def test_get_with_default_and_module_load(self):
config = app_config.Config()
self.assertEqual(config['resources.i18n']['locale'], 'en_US')
self.assertEqual(config['resources.i18n'].get('locale', 'foo'), 'en_US')
def test_required_config(self):
config = app_config.Config()
self.assertRaises(KeyError, config['resources.i18n'].__getitem__, 'foo')
self.assertRaises(KeyError, config['resources.i18n'].__getitem__, 'required')
def test_missing_module(self):
config = app_config.Config()
self.assertRaises(KeyError, config.__getitem__, 'i_dont_exist')
def test_missing_key(self):
config = app_config.Config()
self.assertRaises(KeyError, config['resources.i18n'].__getitem__, 'i_dont_exist')
if __name__ == '__main__':
test_base.main()
| bsd-3-clause |
dex4er/django-pyc | docs/conf.py | 1 | 12008 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# flake8: noqa
#
# django-pyc documentation build configuration file
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import os
import sys
sys.path.append(os.path.join(os.path.dirname(os.path.abspath(__file__)), '..'))
import django_pyc
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('..'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'alabaster',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'django-pyc'
copyright = '2014, 2019, Piotr Roszatycki'
author = 'Piotr Roszatycki'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = django_pyc.__version__
# The full version, including alpha/beta/rc tags.
release = django_pyc.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
html_theme_options = {
#'logo': 'logo.png',
'github_user': 'dex4er',
'github_repo': 'django-pyc',
'show_powered_by': 'false',
}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents.
# "<project> v<release> documentation" by default.
#html_title = 'django-pyc v0.0.1'
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (relative to this directory) to use as a favicon of
# the docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not None, a 'Last updated on:' timestamp is inserted at every page
# bottom, using the given strftime format.
# The empty string is equivalent to '%b %d, %Y'.
#html_last_updated_fmt = None
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr', 'zh'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# 'ja' uses this config value.
# 'zh' user can custom change `jieba` dictionary path.
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'django-pyc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
'preamble': """
\\usepackage{fontspec}
\setsansfont{Liberation Sans}
\setromanfont{Liberation Serif}
\setmonofont{FreeMono}
""",
'inputenc': '',
'utf8extra': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'django-pyc.tex', 'django-pyc documentation',
'Piotr Roszatycki', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
latex_show_urls = 'inline'
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'django-pyc', 'django-pyc documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'django-pyc', 'django-pyc documentation',
author, 'django-pyc', 'Allows to remove or recompile all files in a project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
# -- Options for Epub output ----------------------------------------------
# Bibliographic Dublin Core info.
epub_title = project
epub_author = author
epub_publisher = author
epub_copyright = copyright
# The basename for the epub file. It defaults to the project name.
#epub_basename = project
# The HTML theme for the epub output. Since the default themes are not
# optimized for small screen space, using the same theme for HTML and epub
# output is usually not wise. This defaults to 'epub', a theme designed to save
# visual space.
#epub_theme = 'epub'
# The language of the text. It defaults to the language option
# or 'en' if the language is not set.
#epub_language = ''
# The scheme of the identifier. Typical schemes are ISBN or URL.
#epub_scheme = ''
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#epub_identifier = ''
# A unique identification for the text.
#epub_uid = ''
# A tuple containing the cover image and cover page html template filenames.
#epub_cover = ()
# A sequence of (type, uri, title) tuples for the guide element of content.opf.
#epub_guide = ()
# HTML files that should be inserted before the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_pre_files = []
# HTML files that should be inserted after the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_post_files = []
# A list of files that should not be packed into the epub file.
epub_exclude_files = ['search.html']
# The depth of the table of contents in toc.ncx.
#epub_tocdepth = 3
# Allow duplicate toc entries.
#epub_tocdup = True
# Choose between 'default' and 'includehidden'.
#epub_tocscope = 'default'
# Fix unsupported image types using the Pillow.
#epub_fix_images = False
# Scale large images.
#epub_max_image_width = 0
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#epub_show_urls = 'inline'
# If false, no index is generated.
#epub_use_index = True
# -- Options for Apple Help output ----------------------------------------
applehelp_bundle_id = project
applehelp_disable_external_tools = True
| lgpl-3.0 |
morpheby/levelup-by | lms/djangoapps/open_ended_grading/open_ended_notifications.py | 2 | 8047 | from django.conf import settings
from xmodule.open_ended_grading_classes import peer_grading_service
from .staff_grading_service import StaffGradingService
from xmodule.open_ended_grading_classes.controller_query_service import ControllerQueryService
import json
from student.models import unique_id_for_user
from courseware.models import StudentModule
import logging
from courseware.access import has_access
from util.cache import cache
import datetime
from xmodule.x_module import ModuleSystem
from mitxmako.shortcuts import render_to_string
import datetime
from xblock.field_data import DictFieldData
log = logging.getLogger(__name__)
NOTIFICATION_CACHE_TIME = 300
KEY_PREFIX = "open_ended_"
NOTIFICATION_TYPES = (
('student_needs_to_peer_grade', 'peer_grading', 'Peer Grading'),
('staff_needs_to_grade', 'staff_grading', 'Staff Grading'),
('new_student_grading_to_view', 'open_ended_problems', 'Problems you have submitted'),
('flagged_submissions_exist', 'open_ended_flagged_problems', 'Flagged Submissions')
)
def staff_grading_notifications(course, user):
staff_gs = StaffGradingService(settings.OPEN_ENDED_GRADING_INTERFACE)
pending_grading = False
img_path = ""
course_id = course.id
student_id = unique_id_for_user(user)
notification_type = "staff"
success, notification_dict = get_value_from_cache(student_id, course_id, notification_type)
if success:
return notification_dict
try:
notifications = json.loads(staff_gs.get_notifications(course_id))
if notifications['success']:
if notifications['staff_needs_to_grade']:
pending_grading = True
except:
#Non catastrophic error, so no real action
notifications = {}
#This is a dev_facing_error
log.info(
"Problem with getting notifications from staff grading service for course {0} user {1}.".format(course_id,
student_id))
if pending_grading:
img_path = "/static/images/grading_notification.png"
notification_dict = {'pending_grading': pending_grading, 'img_path': img_path, 'response': notifications}
set_value_in_cache(student_id, course_id, notification_type, notification_dict)
return notification_dict
def peer_grading_notifications(course, user):
system = ModuleSystem(
ajax_url=None,
track_function=None,
get_module = None,
render_template=render_to_string,
replace_urls=None,
xmodule_field_data=DictFieldData({}),
)
peer_gs = peer_grading_service.PeerGradingService(settings.OPEN_ENDED_GRADING_INTERFACE, system)
pending_grading = False
img_path = ""
course_id = course.id
student_id = unique_id_for_user(user)
notification_type = "peer"
success, notification_dict = get_value_from_cache(student_id, course_id, notification_type)
if success:
return notification_dict
try:
notifications = json.loads(peer_gs.get_notifications(course_id, student_id))
if notifications['success']:
if notifications['student_needs_to_peer_grade']:
pending_grading = True
except:
#Non catastrophic error, so no real action
notifications = {}
#This is a dev_facing_error
log.info(
"Problem with getting notifications from peer grading service for course {0} user {1}.".format(course_id,
student_id))
if pending_grading:
img_path = "/static/images/grading_notification.png"
notification_dict = {'pending_grading': pending_grading, 'img_path': img_path, 'response': notifications}
set_value_in_cache(student_id, course_id, notification_type, notification_dict)
return notification_dict
def combined_notifications(course, user):
"""
Show notifications to a given user for a given course. Get notifications from the cache if possible,
or from the grading controller server if not.
@param course: The course object for which we are getting notifications
@param user: The user object for which we are getting notifications
@return: A dictionary with boolean pending_grading (true if there is pending grading), img_path (for notification
image), and response (actual response from grading controller server).
"""
#Set up return values so that we can return them for error cases
pending_grading = False
img_path = ""
notifications={}
notification_dict = {'pending_grading': pending_grading, 'img_path': img_path, 'response': notifications}
#We don't want to show anonymous users anything.
if not user.is_authenticated():
return notification_dict
#Define a mock modulesystem
system = ModuleSystem(
ajax_url=None,
track_function=None,
get_module = None,
render_template=render_to_string,
replace_urls=None,
xmodule_field_data=DictFieldData({})
)
#Initialize controller query service using our mock system
controller_qs = ControllerQueryService(settings.OPEN_ENDED_GRADING_INTERFACE, system)
student_id = unique_id_for_user(user)
user_is_staff = has_access(user, course, 'staff')
course_id = course.id
notification_type = "combined"
#See if we have a stored value in the cache
success, notification_dict = get_value_from_cache(student_id, course_id, notification_type)
if success:
return notification_dict
#Get the time of the last login of the user
last_login = user.last_login
last_time_viewed = last_login - datetime.timedelta(seconds=(NOTIFICATION_CACHE_TIME + 60))
try:
#Get the notifications from the grading controller
controller_response = controller_qs.check_combined_notifications(course.id, student_id, user_is_staff,
last_time_viewed)
notifications = json.loads(controller_response)
if notifications.get('success'):
if (notifications.get('staff_needs_to_grade') or
notifications.get('student_needs_to_peer_grade')):
pending_grading = True
except:
#Non catastrophic error, so no real action
#This is a dev_facing_error
log.exception(
"Problem with getting notifications from controller query service for course {0} user {1}.".format(
course_id, student_id))
if pending_grading:
img_path = "/static/images/grading_notification.png"
notification_dict = {'pending_grading': pending_grading, 'img_path': img_path, 'response': notifications}
#Store the notifications in the cache
set_value_in_cache(student_id, course_id, notification_type, notification_dict)
return notification_dict
def get_value_from_cache(student_id, course_id, notification_type):
key_name = create_key_name(student_id, course_id, notification_type)
success, value = _get_value_from_cache(key_name)
return success, value
def set_value_in_cache(student_id, course_id, notification_type, value):
key_name = create_key_name(student_id, course_id, notification_type)
_set_value_in_cache(key_name, value)
def create_key_name(student_id, course_id, notification_type):
key_name = "{prefix}{type}_{course}_{student}".format(prefix=KEY_PREFIX, type=notification_type, course=course_id,
student=student_id)
return key_name
def _get_value_from_cache(key_name):
value = cache.get(key_name)
success = False
if value is None:
return success, value
try:
value = json.loads(value)
success = True
except:
pass
return success, value
def _set_value_in_cache(key_name, value):
cache.set(key_name, json.dumps(value), NOTIFICATION_CACHE_TIME)
| agpl-3.0 |
nttks/edx-platform | common/lib/capa/capa/correctmap.py | 87 | 7080 | #-----------------------------------------------------------------------------
# class used to store graded responses to CAPA questions
#
# Used by responsetypes and capa_problem
class CorrectMap(object):
"""
Stores map between answer_id and response evaluation result for each question
in a capa problem. The response evaluation result for each answer_id includes
(correctness, npoints, msg, hint, hintmode).
- correctness : 'correct', 'incorrect', or 'partially-correct'
- npoints : None, or integer specifying number of points awarded for this answer_id
- msg : string (may have HTML) giving extra message response
(displayed below textline or textbox)
- hint : string (may have HTML) giving optional hint
(displayed below textline or textbox, above msg)
- hintmode : one of (None,'on_request','always') criteria for displaying hint
- queuestate : Dict {key:'', time:''} where key is a secret string, and time is a string dump
of a DateTime object in the format '%Y%m%d%H%M%S'. Is None when not queued
Behaves as a dict.
"""
def __init__(self, *args, **kwargs):
# start with empty dict
self.cmap = dict()
self.items = self.cmap.items
self.keys = self.cmap.keys
self.overall_message = ""
self.set(*args, **kwargs)
def __getitem__(self, *args, **kwargs):
return self.cmap.__getitem__(*args, **kwargs)
def __iter__(self):
return self.cmap.__iter__()
# See the documentation for 'set_dict' for the use of kwargs
def set(
self,
answer_id=None,
correctness=None,
npoints=None,
msg='',
hint='',
hintmode=None,
queuestate=None,
answervariable=None, # pylint: disable=C0330
**kwargs
):
if answer_id is not None:
self.cmap[answer_id] = {
'correctness': correctness,
'npoints': npoints,
'msg': msg,
'hint': hint,
'hintmode': hintmode,
'queuestate': queuestate,
'answervariable': answervariable,
}
def __repr__(self):
return repr(self.cmap)
def get_dict(self):
"""
return dict version of self
"""
return self.cmap
def set_dict(self, correct_map):
"""
Set internal dict of CorrectMap to provided correct_map dict
correct_map is saved by LMS as a plaintext JSON dump of the correctmap dict. This
means that when the definition of CorrectMap (e.g. its properties) are altered,
an existing correct_map dict will not coincide with the newest CorrectMap format as
defined by self.set.
For graceful migration, feed the contents of each correct map to self.set, rather than
making a direct copy of the given correct_map dict. This way, the common keys between
the incoming correct_map dict and the new CorrectMap instance will be written, while
mismatched keys will be gracefully ignored.
Special migration case:
If correct_map is a one-level dict, then convert it to the new dict of dicts format.
"""
# empty current dict
self.__init__()
# create new dict entries
if correct_map and not isinstance(correct_map.values()[0], dict):
# special migration
for k in correct_map:
self.set(k, correctness=correct_map[k])
else:
for k in correct_map:
self.set(k, **correct_map[k])
def is_correct(self, answer_id):
"""
Takes an answer_id
Returns true if the problem is correct OR partially correct.
"""
if answer_id in self.cmap:
return self.cmap[answer_id]['correctness'] in ['correct', 'partially-correct']
return None
def is_partially_correct(self, answer_id):
"""
Takes an answer_id
Returns true if the problem is partially correct.
"""
if answer_id in self.cmap:
return self.cmap[answer_id]['correctness'] == 'partially-correct'
return None
def is_queued(self, answer_id):
return answer_id in self.cmap and self.cmap[answer_id]['queuestate'] is not None
def is_right_queuekey(self, answer_id, test_key):
return self.is_queued(answer_id) and self.cmap[answer_id]['queuestate']['key'] == test_key
def get_queuetime_str(self, answer_id):
if self.cmap[answer_id]['queuestate']:
return self.cmap[answer_id]['queuestate']['time']
else:
return None
def get_npoints(self, answer_id):
"""Return the number of points for an answer, used for partial credit."""
npoints = self.get_property(answer_id, 'npoints')
if npoints is not None:
return npoints
elif self.is_correct(answer_id):
return 1
# if not correct and no points have been assigned, return 0
return 0
def set_property(self, answer_id, property, value):
if answer_id in self.cmap:
self.cmap[answer_id][property] = value
else:
self.cmap[answer_id] = {property: value}
def get_property(self, answer_id, property, default=None):
if answer_id in self.cmap:
return self.cmap[answer_id].get(property, default)
return default
def get_correctness(self, answer_id):
return self.get_property(answer_id, 'correctness')
def get_msg(self, answer_id):
return self.get_property(answer_id, 'msg', '')
def get_hint(self, answer_id):
return self.get_property(answer_id, 'hint', '')
def get_hintmode(self, answer_id):
return self.get_property(answer_id, 'hintmode', None)
def set_hint_and_mode(self, answer_id, hint, hintmode):
"""
- hint : (string) HTML text for hint
- hintmode : (string) mode for hint display ('always' or 'on_request')
"""
self.set_property(answer_id, 'hint', hint)
self.set_property(answer_id, 'hintmode', hintmode)
def update(self, other_cmap):
"""
Update this CorrectMap with the contents of another CorrectMap
"""
if not isinstance(other_cmap, CorrectMap):
raise Exception('CorrectMap.update called with invalid argument %s' % other_cmap)
self.cmap.update(other_cmap.get_dict())
self.set_overall_message(other_cmap.get_overall_message())
def set_overall_message(self, message_str):
""" Set a message that applies to the question as a whole,
rather than to individual inputs. """
self.overall_message = str(message_str) if message_str else ""
def get_overall_message(self):
""" Retrieve a message that applies to the question as a whole.
If no message is available, returns the empty string """
return self.overall_message
| agpl-3.0 |
fevxie/odoo | addons/hr_attendance/__openerp__.py | 260 | 2073 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Attendances',
'version': '1.1',
'category': 'Human Resources',
'description': """
This module aims to manage employee's attendances.
==================================================
Keeps account of the attendances of the employees on the basis of the
actions(Sign in/Sign out) performed by them.
""",
'author': 'OpenERP SA',
'website': 'https://www.odoo.com/page/employees',
'depends': ['hr', 'report'],
'data': [
'security/ir_rule.xml',
'security/ir.model.access.csv',
'hr_attendance_view.xml',
'hr_attendance_report.xml',
'wizard/hr_attendance_error_view.xml',
'res_config_view.xml',
'views/report_attendanceerrors.xml',
'views/hr_attendance.xml',
],
'demo': ['hr_attendance_demo.xml'],
'test': [
'test/attendance_process.yml',
'test/hr_attendance_report.yml',
],
'installable': True,
'auto_install': False,
#web
'qweb': ["static/src/xml/attendance.xml"],
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
fbrglez/gitBed | xBed/pLib/B/coord.py | 1 | 8879 | from itertools import imap
import time
import sys
import os
import pwd
import platform
import random
from math import log
def neighborhood(coordPivot = [0, 1, 0, 1, 0, 1, 0]):
thisCmd = "B.coord.neighborhood"
ABOUT = """
Procedure {} takes a binary coordinate such as 0101010 (here of
size L = 7) and returns a set of all ** adjacent coordinates **, i.e. the
coordinates with the Hamming distance of 1 from the input coordinate.
The size of this set is L.
""".format(thisCmd)
if coordPivot == "?":
print("Valid query is '" + thisCmd + " ??'")
return
if coordPivot == "??":
print ABOUT
return
L = len(coordPivot)
coordNeighbors = []
for i in range(L):
bit = coordPivot[i]
if bit:
coordPivot[i] = 0
else:
coordPivot[i] = 1
coordAdj = str(coordPivot)
coordNeighbors.append(coordAdj)
print("coordPivot\n" + coordPivot + "\ncoordNeighbors\n" + coordNeighbors)
return coordNeighbors
def distance(bstrL = [0, 1, 0, 1], bstrR = [1, 0, 0, 1]):
thisCmd = "B.coord.distance"
ABOUT = """
Procedure {} takes two binary strings and returns
the value of the Hamming distance between the strings.
""".format(thisCmd)
if bstrL == "?":
print("Valid query is '" + thisCmd + " ??'")
return
if bstrL == "??":
print ABOUT
return
L = len(bstrL)
dist = 0
if L != len(bstrR):
print("ERROR ... unequal length strings: " + str(len(bstrL)) + " vs " + str(len(bstrR)))
return
for j in range(L):
bL = bstrL[j]
bR = bstrR[j]
if bL != bR:
dist += 1
return dist
def from_int(val = 31, maxBits = 5):
thisCmd = "B.coord.from_int"
ABOUT = """
This procedure takes an integer and the length of the binary string that
can represent this integer and returns a binary string that actually
represents this integer.
""".format(thisCmd)
if val == "?":
print("Valid query is '" + thisCmd + " ??'")
return
if val == "??":
print ABOUT
return
intMax = int(pow(2, maxBits)) - 1
bstr = []
if val > intMax:
print("\nERROR from {} ..."
"maxBits={} cannot represent an integer={} \n").format(thisCmd, maxBits, val)
elif val < 0:
print("\nERROR from {} ... negative input value, val = {} \n").format(thisCmd, val)
elif val > 0:
nBits = int(log(val, 2.0))
remainder = val
for i in range(int(nBits), -1, -1):
base = pow(2, i)
quotient = remainder/base
remainder = remainder % int(base)
bstr.append(quotient)
numZeros = maxBits - len(bstr)
zeros = []
for i in range(numZeros):
zeros.append(0)
return zeros + bstr
def rand(L = 41, weightFactor = None):
thisCmd = "B.coord.rand"
ABOUT = """
This proc takes an integer L, and optionally a weightFactor > 0 and <= 1.
By default, weightFactor = NA, and an unbiased binary coordinate of length L
is returned. For weightFactor=0.5, a biased random coordinate of length L
is returned: it will have a random distribution of exactly L/2 'ones'
for L even and (L+1)/2 'ones' for L odd.
""".format(thisCmd)
if L == "?":
print("Valid query is '" + thisCmd + " ??'")
return
if L == "??":
print ABOUT
return
coord = []
if weightFactor == None:
for i in range(L):
coord.append(int(.5 + random.random()))
return coord
def rank(bstr = [0, 0, 0, 1, 1, 0, 1]):
thisCmd = "B.coord.rank"
ABOUT = """
This proc takes a binary coordinate as a string such as '010101' and
returns its weight number as the number of 'ones', which can also be
interpreted as the distance from '000000' or as 'the rank' of the
coordinate in the Hasse graph with respect to its 'bottom' coordinate
of all 'zeros'.
""".format(thisCmd)
if bstr == "?":
print("Valid query is '" + thisCmd + " ??'")
return
if bstr == "??":
print ABOUT
return bstr.count(1)
def string_to_list(coord = "1000"):
thisCmd = "B.coord.string_to_list"
ABOUT = """
This proc converts an input string such as '010101' and
returns its list form [0, 1, 0, 1, 0, 1]. If it's not an input string like above,
it will return the same object.
""".format(thisCmd)
if bstr == "?":
print("Valid query is '" + thisCmd + " ??'")
return
if bstr == "??":
print ABOUT
if isinstance(coord, basestring):
coord = map(int, coord)
return coord
def string_vs_list(L = 32, Lpoints = 3, sampleSize = 1000, seedInit = 1215):
thisCmd = "B.coord.string_vs_list"
ABOUT = """
Example: {} L Lpoints sampleSize seedInit
{} 32 7 2000 1066
The command {} implements an asympototic experiment to test
runtime costs of decoding binary coordinates represented either as a binary
string or a binary list. There are 4 input parameters:
the length of a binary coordinate L,
the value of Lpoints (points in the asymptotic experiments)
the value of sampleSize, and
the value of seedInit.
The experiment proceeds as follows:
(1) creates a refererence coordinate list of alternating 0's and 1's.
(2) creates two coordinate samples as random permutations of coordRefList;
one sample as a list of binary strings; the other as a list of binary lists.
(3) decodes commponent values of each coordinate sample.
(4) measures the total runtime of the two decoding operations for each L.
""".format(thisCmd, thisCmd, thisCmd)
if L == "??":
print ABOUT
return
if L == "?":
print "Valid query is " + thisCmd + "(\"?\")"
return
if L % 2:
print "\nERROR from " + thisCmd + ":\nthe value of L=" + str(L) + " is expected to be even!\n"
if seedInit == "":
# initialize the RNG with a random seed
seedInit = 1e9 * random.random()
random.seed(seedInit)
elif isinstance(seedInit, int):
# initialize the RNG with a user-selected seed
random.seed(seedInit)
else:
print "ERROR from " + thisCmd + ":\n.. only seedInit={} or seedInit=<int> are valid assignments, not -seedInit " + str(seedInit)
L_list = []
for points in range(1, Lpoints + 1):
L_list.append(L*pow(2, points - 1))
tableFile = thisCmd + "-" + str(sampleSize) + "-" + str(seedInit) + "-" + "asympTest.txt"
tableLines = """
# file = {} (an R-compatible file of labeled columns
# commandLine = {}({}, {}, {}, {})
# invoked on {}
# hostID = {}@{}-{}-{}
# compiler = python-{}
#
# seedInit = {}
# sampleSize = {}
#
# coordAsString\t\tcoordAsList
# coordSize\truntimeString\t\truntimeList\t\truntimeRatio
coordSize\truntimeString\t\truntimeList\t\truntimeRatio
""".format(tableFile, thisCmd, L, Lpoints, sampleSize, seedInit, time.strftime("%a %b %d %H:%M:%S %Z %Y"), pwd.getpwuid(os.getuid())[0],
os.uname()[1], platform.system(), os.uname()[2], ".".join(imap(str,sys.version_info[:3])), seedInit, sampleSize)
for L in L_list:
coordRefList = []
for i in range(L):
if i % 2:
coordRefList.append(1)
else:
coordRefList.append(0)
#print str(L) + "/" + str(coordRefList)
runtimeList = 0.0
runtimeString = 0.0
for sample in range(1, sampleSize + 1):
random.shuffle(coordRefList) #NOTE: In comparison to tcl version, this line actually shuffles the list
coordString = ''.join(map(str, coordRefList))
rankList = 0
rankString = 0
microSecs = time.time()
for item in coordRefList:
if item:
rankList += 1
runtimeList = runtimeList + (time.time() - microSecs)
microSecs = time.time()
for i in range(L):
item = int(coordString[i])
if item:
rankString += 1
runtimeString = runtimeString + (time.time() - microSecs)
if rankList != rankString:
print "ERROR from " + thisCmd + ":\n.. rank mismatch:rankList=" + str(rankList) + ", rankString=" + str(rankString) + "\n"
runtimeRatio = runtimeString/runtimeList
tableLines += (str(L) + "\t\t" + format(runtimeString, ".18f") + "\t" + format(runtimeList, ".18f") + "\t" + format(runtimeRatio, ".18f") + "\n")
print "\n" + tableLines
file_write(tableFile, tableLines)
print ".. created file " + tableFile
return
def file_write(fileName, data):
try:
f = file(fileName,'a')
f.write(data)
f.close()
except IOError as e:
sys.stderr.write("Error: {}\n".format(e.strerror))
sys.exit(1)
| gpl-2.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.